PR bootstrap/85838
[official-gcc.git] / gcc / config / sparc / sparc.c
blob768ee2b829c55106bc1ef9e60c22361a1e0bf7ab
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
5 at Cygnus Support.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #define IN_TARGET_CODE 1
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "target.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "memmodel.h"
33 #include "gimple.h"
34 #include "df.h"
35 #include "tm_p.h"
36 #include "stringpool.h"
37 #include "attribs.h"
38 #include "expmed.h"
39 #include "optabs.h"
40 #include "regs.h"
41 #include "emit-rtl.h"
42 #include "recog.h"
43 #include "diagnostic-core.h"
44 #include "alias.h"
45 #include "fold-const.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "varasm.h"
49 #include "output.h"
50 #include "insn-attr.h"
51 #include "explow.h"
52 #include "expr.h"
53 #include "debug.h"
54 #include "cfgrtl.h"
55 #include "common/common-target.h"
56 #include "gimplify.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "params.h"
60 #include "tree-pass.h"
61 #include "context.h"
62 #include "builtins.h"
63 #include "tree-vector-builder.h"
65 /* This file should be included last. */
66 #include "target-def.h"
68 /* Processor costs */
70 struct processor_costs {
71 /* Integer load */
72 const int int_load;
74 /* Integer signed load */
75 const int int_sload;
77 /* Integer zeroed load */
78 const int int_zload;
80 /* Float load */
81 const int float_load;
83 /* fmov, fneg, fabs */
84 const int float_move;
86 /* fadd, fsub */
87 const int float_plusminus;
89 /* fcmp */
90 const int float_cmp;
92 /* fmov, fmovr */
93 const int float_cmove;
95 /* fmul */
96 const int float_mul;
98 /* fdivs */
99 const int float_div_sf;
101 /* fdivd */
102 const int float_div_df;
104 /* fsqrts */
105 const int float_sqrt_sf;
107 /* fsqrtd */
108 const int float_sqrt_df;
110 /* umul/smul */
111 const int int_mul;
113 /* mulX */
114 const int int_mulX;
116 /* integer multiply cost for each bit set past the most
117 significant 3, so the formula for multiply cost becomes:
119 if (rs1 < 0)
120 highest_bit = highest_clear_bit(rs1);
121 else
122 highest_bit = highest_set_bit(rs1);
123 if (highest_bit < 3)
124 highest_bit = 3;
125 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
127 A value of zero indicates that the multiply costs is fixed,
128 and not variable. */
129 const int int_mul_bit_factor;
131 /* udiv/sdiv */
132 const int int_div;
134 /* divX */
135 const int int_divX;
137 /* movcc, movr */
138 const int int_cmove;
140 /* penalty for shifts, due to scheduling rules etc. */
141 const int shift_penalty;
144 static const
145 struct processor_costs cypress_costs = {
146 COSTS_N_INSNS (2), /* int load */
147 COSTS_N_INSNS (2), /* int signed load */
148 COSTS_N_INSNS (2), /* int zeroed load */
149 COSTS_N_INSNS (2), /* float load */
150 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
151 COSTS_N_INSNS (5), /* fadd, fsub */
152 COSTS_N_INSNS (1), /* fcmp */
153 COSTS_N_INSNS (1), /* fmov, fmovr */
154 COSTS_N_INSNS (7), /* fmul */
155 COSTS_N_INSNS (37), /* fdivs */
156 COSTS_N_INSNS (37), /* fdivd */
157 COSTS_N_INSNS (63), /* fsqrts */
158 COSTS_N_INSNS (63), /* fsqrtd */
159 COSTS_N_INSNS (1), /* imul */
160 COSTS_N_INSNS (1), /* imulX */
161 0, /* imul bit factor */
162 COSTS_N_INSNS (1), /* idiv */
163 COSTS_N_INSNS (1), /* idivX */
164 COSTS_N_INSNS (1), /* movcc/movr */
165 0, /* shift penalty */
168 static const
169 struct processor_costs supersparc_costs = {
170 COSTS_N_INSNS (1), /* int load */
171 COSTS_N_INSNS (1), /* int signed load */
172 COSTS_N_INSNS (1), /* int zeroed load */
173 COSTS_N_INSNS (0), /* float load */
174 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
175 COSTS_N_INSNS (3), /* fadd, fsub */
176 COSTS_N_INSNS (3), /* fcmp */
177 COSTS_N_INSNS (1), /* fmov, fmovr */
178 COSTS_N_INSNS (3), /* fmul */
179 COSTS_N_INSNS (6), /* fdivs */
180 COSTS_N_INSNS (9), /* fdivd */
181 COSTS_N_INSNS (12), /* fsqrts */
182 COSTS_N_INSNS (12), /* fsqrtd */
183 COSTS_N_INSNS (4), /* imul */
184 COSTS_N_INSNS (4), /* imulX */
185 0, /* imul bit factor */
186 COSTS_N_INSNS (4), /* idiv */
187 COSTS_N_INSNS (4), /* idivX */
188 COSTS_N_INSNS (1), /* movcc/movr */
189 1, /* shift penalty */
192 static const
193 struct processor_costs hypersparc_costs = {
194 COSTS_N_INSNS (1), /* int load */
195 COSTS_N_INSNS (1), /* int signed load */
196 COSTS_N_INSNS (1), /* int zeroed load */
197 COSTS_N_INSNS (1), /* float load */
198 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
199 COSTS_N_INSNS (1), /* fadd, fsub */
200 COSTS_N_INSNS (1), /* fcmp */
201 COSTS_N_INSNS (1), /* fmov, fmovr */
202 COSTS_N_INSNS (1), /* fmul */
203 COSTS_N_INSNS (8), /* fdivs */
204 COSTS_N_INSNS (12), /* fdivd */
205 COSTS_N_INSNS (17), /* fsqrts */
206 COSTS_N_INSNS (17), /* fsqrtd */
207 COSTS_N_INSNS (17), /* imul */
208 COSTS_N_INSNS (17), /* imulX */
209 0, /* imul bit factor */
210 COSTS_N_INSNS (17), /* idiv */
211 COSTS_N_INSNS (17), /* idivX */
212 COSTS_N_INSNS (1), /* movcc/movr */
213 0, /* shift penalty */
216 static const
217 struct processor_costs leon_costs = {
218 COSTS_N_INSNS (1), /* int load */
219 COSTS_N_INSNS (1), /* int signed load */
220 COSTS_N_INSNS (1), /* int zeroed load */
221 COSTS_N_INSNS (1), /* float load */
222 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
223 COSTS_N_INSNS (1), /* fadd, fsub */
224 COSTS_N_INSNS (1), /* fcmp */
225 COSTS_N_INSNS (1), /* fmov, fmovr */
226 COSTS_N_INSNS (1), /* fmul */
227 COSTS_N_INSNS (15), /* fdivs */
228 COSTS_N_INSNS (15), /* fdivd */
229 COSTS_N_INSNS (23), /* fsqrts */
230 COSTS_N_INSNS (23), /* fsqrtd */
231 COSTS_N_INSNS (5), /* imul */
232 COSTS_N_INSNS (5), /* imulX */
233 0, /* imul bit factor */
234 COSTS_N_INSNS (5), /* idiv */
235 COSTS_N_INSNS (5), /* idivX */
236 COSTS_N_INSNS (1), /* movcc/movr */
237 0, /* shift penalty */
240 static const
241 struct processor_costs leon3_costs = {
242 COSTS_N_INSNS (1), /* int load */
243 COSTS_N_INSNS (1), /* int signed load */
244 COSTS_N_INSNS (1), /* int zeroed load */
245 COSTS_N_INSNS (1), /* float load */
246 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
247 COSTS_N_INSNS (1), /* fadd, fsub */
248 COSTS_N_INSNS (1), /* fcmp */
249 COSTS_N_INSNS (1), /* fmov, fmovr */
250 COSTS_N_INSNS (1), /* fmul */
251 COSTS_N_INSNS (14), /* fdivs */
252 COSTS_N_INSNS (15), /* fdivd */
253 COSTS_N_INSNS (22), /* fsqrts */
254 COSTS_N_INSNS (23), /* fsqrtd */
255 COSTS_N_INSNS (5), /* imul */
256 COSTS_N_INSNS (5), /* imulX */
257 0, /* imul bit factor */
258 COSTS_N_INSNS (35), /* idiv */
259 COSTS_N_INSNS (35), /* idivX */
260 COSTS_N_INSNS (1), /* movcc/movr */
261 0, /* shift penalty */
264 static const
265 struct processor_costs sparclet_costs = {
266 COSTS_N_INSNS (3), /* int load */
267 COSTS_N_INSNS (3), /* int signed load */
268 COSTS_N_INSNS (1), /* int zeroed load */
269 COSTS_N_INSNS (1), /* float load */
270 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
271 COSTS_N_INSNS (1), /* fadd, fsub */
272 COSTS_N_INSNS (1), /* fcmp */
273 COSTS_N_INSNS (1), /* fmov, fmovr */
274 COSTS_N_INSNS (1), /* fmul */
275 COSTS_N_INSNS (1), /* fdivs */
276 COSTS_N_INSNS (1), /* fdivd */
277 COSTS_N_INSNS (1), /* fsqrts */
278 COSTS_N_INSNS (1), /* fsqrtd */
279 COSTS_N_INSNS (5), /* imul */
280 COSTS_N_INSNS (5), /* imulX */
281 0, /* imul bit factor */
282 COSTS_N_INSNS (5), /* idiv */
283 COSTS_N_INSNS (5), /* idivX */
284 COSTS_N_INSNS (1), /* movcc/movr */
285 0, /* shift penalty */
288 static const
289 struct processor_costs ultrasparc_costs = {
290 COSTS_N_INSNS (2), /* int load */
291 COSTS_N_INSNS (3), /* int signed load */
292 COSTS_N_INSNS (2), /* int zeroed load */
293 COSTS_N_INSNS (2), /* float load */
294 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
295 COSTS_N_INSNS (4), /* fadd, fsub */
296 COSTS_N_INSNS (1), /* fcmp */
297 COSTS_N_INSNS (2), /* fmov, fmovr */
298 COSTS_N_INSNS (4), /* fmul */
299 COSTS_N_INSNS (13), /* fdivs */
300 COSTS_N_INSNS (23), /* fdivd */
301 COSTS_N_INSNS (13), /* fsqrts */
302 COSTS_N_INSNS (23), /* fsqrtd */
303 COSTS_N_INSNS (4), /* imul */
304 COSTS_N_INSNS (4), /* imulX */
305 2, /* imul bit factor */
306 COSTS_N_INSNS (37), /* idiv */
307 COSTS_N_INSNS (68), /* idivX */
308 COSTS_N_INSNS (2), /* movcc/movr */
309 2, /* shift penalty */
312 static const
313 struct processor_costs ultrasparc3_costs = {
314 COSTS_N_INSNS (2), /* int load */
315 COSTS_N_INSNS (3), /* int signed load */
316 COSTS_N_INSNS (3), /* int zeroed load */
317 COSTS_N_INSNS (2), /* float load */
318 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
319 COSTS_N_INSNS (4), /* fadd, fsub */
320 COSTS_N_INSNS (5), /* fcmp */
321 COSTS_N_INSNS (3), /* fmov, fmovr */
322 COSTS_N_INSNS (4), /* fmul */
323 COSTS_N_INSNS (17), /* fdivs */
324 COSTS_N_INSNS (20), /* fdivd */
325 COSTS_N_INSNS (20), /* fsqrts */
326 COSTS_N_INSNS (29), /* fsqrtd */
327 COSTS_N_INSNS (6), /* imul */
328 COSTS_N_INSNS (6), /* imulX */
329 0, /* imul bit factor */
330 COSTS_N_INSNS (40), /* idiv */
331 COSTS_N_INSNS (71), /* idivX */
332 COSTS_N_INSNS (2), /* movcc/movr */
333 0, /* shift penalty */
336 static const
337 struct processor_costs niagara_costs = {
338 COSTS_N_INSNS (3), /* int load */
339 COSTS_N_INSNS (3), /* int signed load */
340 COSTS_N_INSNS (3), /* int zeroed load */
341 COSTS_N_INSNS (9), /* float load */
342 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
343 COSTS_N_INSNS (8), /* fadd, fsub */
344 COSTS_N_INSNS (26), /* fcmp */
345 COSTS_N_INSNS (8), /* fmov, fmovr */
346 COSTS_N_INSNS (29), /* fmul */
347 COSTS_N_INSNS (54), /* fdivs */
348 COSTS_N_INSNS (83), /* fdivd */
349 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
350 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
351 COSTS_N_INSNS (11), /* imul */
352 COSTS_N_INSNS (11), /* imulX */
353 0, /* imul bit factor */
354 COSTS_N_INSNS (72), /* idiv */
355 COSTS_N_INSNS (72), /* idivX */
356 COSTS_N_INSNS (1), /* movcc/movr */
357 0, /* shift penalty */
360 static const
361 struct processor_costs niagara2_costs = {
362 COSTS_N_INSNS (3), /* int load */
363 COSTS_N_INSNS (3), /* int signed load */
364 COSTS_N_INSNS (3), /* int zeroed load */
365 COSTS_N_INSNS (3), /* float load */
366 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
367 COSTS_N_INSNS (6), /* fadd, fsub */
368 COSTS_N_INSNS (6), /* fcmp */
369 COSTS_N_INSNS (6), /* fmov, fmovr */
370 COSTS_N_INSNS (6), /* fmul */
371 COSTS_N_INSNS (19), /* fdivs */
372 COSTS_N_INSNS (33), /* fdivd */
373 COSTS_N_INSNS (19), /* fsqrts */
374 COSTS_N_INSNS (33), /* fsqrtd */
375 COSTS_N_INSNS (5), /* imul */
376 COSTS_N_INSNS (5), /* imulX */
377 0, /* imul bit factor */
378 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
379 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
380 COSTS_N_INSNS (1), /* movcc/movr */
381 0, /* shift penalty */
384 static const
385 struct processor_costs niagara3_costs = {
386 COSTS_N_INSNS (3), /* int load */
387 COSTS_N_INSNS (3), /* int signed load */
388 COSTS_N_INSNS (3), /* int zeroed load */
389 COSTS_N_INSNS (3), /* float load */
390 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
391 COSTS_N_INSNS (9), /* fadd, fsub */
392 COSTS_N_INSNS (9), /* fcmp */
393 COSTS_N_INSNS (9), /* fmov, fmovr */
394 COSTS_N_INSNS (9), /* fmul */
395 COSTS_N_INSNS (23), /* fdivs */
396 COSTS_N_INSNS (37), /* fdivd */
397 COSTS_N_INSNS (23), /* fsqrts */
398 COSTS_N_INSNS (37), /* fsqrtd */
399 COSTS_N_INSNS (9), /* imul */
400 COSTS_N_INSNS (9), /* imulX */
401 0, /* imul bit factor */
402 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
403 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
404 COSTS_N_INSNS (1), /* movcc/movr */
405 0, /* shift penalty */
408 static const
409 struct processor_costs niagara4_costs = {
410 COSTS_N_INSNS (5), /* int load */
411 COSTS_N_INSNS (5), /* int signed load */
412 COSTS_N_INSNS (5), /* int zeroed load */
413 COSTS_N_INSNS (5), /* float load */
414 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
415 COSTS_N_INSNS (11), /* fadd, fsub */
416 COSTS_N_INSNS (11), /* fcmp */
417 COSTS_N_INSNS (11), /* fmov, fmovr */
418 COSTS_N_INSNS (11), /* fmul */
419 COSTS_N_INSNS (24), /* fdivs */
420 COSTS_N_INSNS (37), /* fdivd */
421 COSTS_N_INSNS (24), /* fsqrts */
422 COSTS_N_INSNS (37), /* fsqrtd */
423 COSTS_N_INSNS (12), /* imul */
424 COSTS_N_INSNS (12), /* imulX */
425 0, /* imul bit factor */
426 COSTS_N_INSNS (50), /* idiv, average of 41 - 60 cycle range */
427 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
428 COSTS_N_INSNS (1), /* movcc/movr */
429 0, /* shift penalty */
432 static const
433 struct processor_costs niagara7_costs = {
434 COSTS_N_INSNS (5), /* int load */
435 COSTS_N_INSNS (5), /* int signed load */
436 COSTS_N_INSNS (5), /* int zeroed load */
437 COSTS_N_INSNS (5), /* float load */
438 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
439 COSTS_N_INSNS (11), /* fadd, fsub */
440 COSTS_N_INSNS (11), /* fcmp */
441 COSTS_N_INSNS (11), /* fmov, fmovr */
442 COSTS_N_INSNS (11), /* fmul */
443 COSTS_N_INSNS (24), /* fdivs */
444 COSTS_N_INSNS (37), /* fdivd */
445 COSTS_N_INSNS (24), /* fsqrts */
446 COSTS_N_INSNS (37), /* fsqrtd */
447 COSTS_N_INSNS (12), /* imul */
448 COSTS_N_INSNS (12), /* imulX */
449 0, /* imul bit factor */
450 COSTS_N_INSNS (51), /* idiv, average of 42 - 61 cycle range */
451 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
452 COSTS_N_INSNS (1), /* movcc/movr */
453 0, /* shift penalty */
456 static const
457 struct processor_costs m8_costs = {
458 COSTS_N_INSNS (3), /* int load */
459 COSTS_N_INSNS (3), /* int signed load */
460 COSTS_N_INSNS (3), /* int zeroed load */
461 COSTS_N_INSNS (3), /* float load */
462 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
463 COSTS_N_INSNS (9), /* fadd, fsub */
464 COSTS_N_INSNS (9), /* fcmp */
465 COSTS_N_INSNS (9), /* fmov, fmovr */
466 COSTS_N_INSNS (9), /* fmul */
467 COSTS_N_INSNS (26), /* fdivs */
468 COSTS_N_INSNS (30), /* fdivd */
469 COSTS_N_INSNS (33), /* fsqrts */
470 COSTS_N_INSNS (41), /* fsqrtd */
471 COSTS_N_INSNS (12), /* imul */
472 COSTS_N_INSNS (10), /* imulX */
473 0, /* imul bit factor */
474 COSTS_N_INSNS (57), /* udiv/sdiv */
475 COSTS_N_INSNS (30), /* udivx/sdivx */
476 COSTS_N_INSNS (1), /* movcc/movr */
477 0, /* shift penalty */
480 static const struct processor_costs *sparc_costs = &cypress_costs;
482 #ifdef HAVE_AS_RELAX_OPTION
483 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
484 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
485 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
486 somebody does not branch between the sethi and jmp. */
487 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
488 #else
489 #define LEAF_SIBCALL_SLOT_RESERVED_P \
490 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
491 #endif
493 /* Vector to say how input registers are mapped to output registers.
494 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
495 eliminate it. You must use -fomit-frame-pointer to get that. */
496 char leaf_reg_remap[] =
497 { 0, 1, 2, 3, 4, 5, 6, 7,
498 -1, -1, -1, -1, -1, -1, 14, -1,
499 -1, -1, -1, -1, -1, -1, -1, -1,
500 8, 9, 10, 11, 12, 13, -1, 15,
502 32, 33, 34, 35, 36, 37, 38, 39,
503 40, 41, 42, 43, 44, 45, 46, 47,
504 48, 49, 50, 51, 52, 53, 54, 55,
505 56, 57, 58, 59, 60, 61, 62, 63,
506 64, 65, 66, 67, 68, 69, 70, 71,
507 72, 73, 74, 75, 76, 77, 78, 79,
508 80, 81, 82, 83, 84, 85, 86, 87,
509 88, 89, 90, 91, 92, 93, 94, 95,
510 96, 97, 98, 99, 100, 101, 102};
512 /* Vector, indexed by hard register number, which contains 1
513 for a register that is allowable in a candidate for leaf
514 function treatment. */
515 char sparc_leaf_regs[] =
516 { 1, 1, 1, 1, 1, 1, 1, 1,
517 0, 0, 0, 0, 0, 0, 1, 0,
518 0, 0, 0, 0, 0, 0, 0, 0,
519 1, 1, 1, 1, 1, 1, 0, 1,
520 1, 1, 1, 1, 1, 1, 1, 1,
521 1, 1, 1, 1, 1, 1, 1, 1,
522 1, 1, 1, 1, 1, 1, 1, 1,
523 1, 1, 1, 1, 1, 1, 1, 1,
524 1, 1, 1, 1, 1, 1, 1, 1,
525 1, 1, 1, 1, 1, 1, 1, 1,
526 1, 1, 1, 1, 1, 1, 1, 1,
527 1, 1, 1, 1, 1, 1, 1, 1,
528 1, 1, 1, 1, 1, 1, 1};
530 struct GTY(()) machine_function
532 /* Size of the frame of the function. */
533 HOST_WIDE_INT frame_size;
535 /* Size of the frame of the function minus the register window save area
536 and the outgoing argument area. */
537 HOST_WIDE_INT apparent_frame_size;
539 /* Register we pretend the frame pointer is allocated to. Normally, this
540 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
541 record "offset" separately as it may be too big for (reg + disp). */
542 rtx frame_base_reg;
543 HOST_WIDE_INT frame_base_offset;
545 /* Number of global or FP registers to be saved (as 4-byte quantities). */
546 int n_global_fp_regs;
548 /* True if the current function is leaf and uses only leaf regs,
549 so that the SPARC leaf function optimization can be applied.
550 Private version of crtl->uses_only_leaf_regs, see
551 sparc_expand_prologue for the rationale. */
552 int leaf_function_p;
554 /* True if the prologue saves local or in registers. */
555 bool save_local_in_regs_p;
557 /* True if the data calculated by sparc_expand_prologue are valid. */
558 bool prologue_data_valid_p;
561 #define sparc_frame_size cfun->machine->frame_size
562 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
563 #define sparc_frame_base_reg cfun->machine->frame_base_reg
564 #define sparc_frame_base_offset cfun->machine->frame_base_offset
565 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
566 #define sparc_leaf_function_p cfun->machine->leaf_function_p
567 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
568 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
570 /* 1 if the next opcode is to be specially indented. */
571 int sparc_indent_opcode = 0;
573 static void sparc_option_override (void);
574 static void sparc_init_modes (void);
575 static int function_arg_slotno (const CUMULATIVE_ARGS *, machine_mode,
576 const_tree, bool, bool, int *, int *);
578 static int supersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
579 static int hypersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
581 static void sparc_emit_set_const32 (rtx, rtx);
582 static void sparc_emit_set_const64 (rtx, rtx);
583 static void sparc_output_addr_vec (rtx);
584 static void sparc_output_addr_diff_vec (rtx);
585 static void sparc_output_deferred_case_vectors (void);
586 static bool sparc_legitimate_address_p (machine_mode, rtx, bool);
587 static bool sparc_legitimate_constant_p (machine_mode, rtx);
588 static rtx sparc_builtin_saveregs (void);
589 static int epilogue_renumber (rtx *, int);
590 static bool sparc_assemble_integer (rtx, unsigned int, int);
591 static int set_extends (rtx_insn *);
592 static void sparc_asm_function_prologue (FILE *);
593 static void sparc_asm_function_epilogue (FILE *);
594 #ifdef TARGET_SOLARIS
595 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
596 tree) ATTRIBUTE_UNUSED;
597 #endif
598 static int sparc_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
599 static int sparc_issue_rate (void);
600 static void sparc_sched_init (FILE *, int, int);
601 static int sparc_use_sched_lookahead (void);
603 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
604 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
605 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
606 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
607 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
609 static bool sparc_function_ok_for_sibcall (tree, tree);
610 static void sparc_init_libfuncs (void);
611 static void sparc_init_builtins (void);
612 static void sparc_fpu_init_builtins (void);
613 static void sparc_vis_init_builtins (void);
614 static tree sparc_builtin_decl (unsigned, bool);
615 static rtx sparc_expand_builtin (tree, rtx, rtx, machine_mode, int);
616 static tree sparc_fold_builtin (tree, int, tree *, bool);
617 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
618 HOST_WIDE_INT, tree);
619 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
620 HOST_WIDE_INT, const_tree);
621 static struct machine_function * sparc_init_machine_status (void);
622 static bool sparc_cannot_force_const_mem (machine_mode, rtx);
623 static rtx sparc_tls_get_addr (void);
624 static rtx sparc_tls_got (void);
625 static int sparc_register_move_cost (machine_mode,
626 reg_class_t, reg_class_t);
627 static bool sparc_rtx_costs (rtx, machine_mode, int, int, int *, bool);
628 static rtx sparc_function_value (const_tree, const_tree, bool);
629 static rtx sparc_libcall_value (machine_mode, const_rtx);
630 static bool sparc_function_value_regno_p (const unsigned int);
631 static rtx sparc_struct_value_rtx (tree, int);
632 static machine_mode sparc_promote_function_mode (const_tree, machine_mode,
633 int *, const_tree, int);
634 static bool sparc_return_in_memory (const_tree, const_tree);
635 static bool sparc_strict_argument_naming (cumulative_args_t);
636 static void sparc_va_start (tree, rtx);
637 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
638 static bool sparc_vector_mode_supported_p (machine_mode);
639 static bool sparc_tls_referenced_p (rtx);
640 static rtx sparc_legitimize_tls_address (rtx);
641 static rtx sparc_legitimize_pic_address (rtx, rtx);
642 static rtx sparc_legitimize_address (rtx, rtx, machine_mode);
643 static rtx sparc_delegitimize_address (rtx);
644 static bool sparc_mode_dependent_address_p (const_rtx, addr_space_t);
645 static bool sparc_pass_by_reference (cumulative_args_t,
646 machine_mode, const_tree, bool);
647 static void sparc_function_arg_advance (cumulative_args_t,
648 machine_mode, const_tree, bool);
649 static rtx sparc_function_arg_1 (cumulative_args_t,
650 machine_mode, const_tree, bool, bool);
651 static rtx sparc_function_arg (cumulative_args_t,
652 machine_mode, const_tree, bool);
653 static rtx sparc_function_incoming_arg (cumulative_args_t,
654 machine_mode, const_tree, bool);
655 static pad_direction sparc_function_arg_padding (machine_mode, const_tree);
656 static unsigned int sparc_function_arg_boundary (machine_mode,
657 const_tree);
658 static int sparc_arg_partial_bytes (cumulative_args_t,
659 machine_mode, tree, bool);
660 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
661 static void sparc_file_end (void);
662 static bool sparc_frame_pointer_required (void);
663 static bool sparc_can_eliminate (const int, const int);
664 static rtx sparc_builtin_setjmp_frame_value (void);
665 static void sparc_conditional_register_usage (void);
666 static bool sparc_use_pseudo_pic_reg (void);
667 static void sparc_init_pic_reg (void);
668 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
669 static const char *sparc_mangle_type (const_tree);
670 #endif
671 static void sparc_trampoline_init (rtx, tree, rtx);
672 static machine_mode sparc_preferred_simd_mode (scalar_mode);
673 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
674 static bool sparc_lra_p (void);
675 static bool sparc_print_operand_punct_valid_p (unsigned char);
676 static void sparc_print_operand (FILE *, rtx, int);
677 static void sparc_print_operand_address (FILE *, machine_mode, rtx);
678 static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
679 machine_mode,
680 secondary_reload_info *);
681 static bool sparc_secondary_memory_needed (machine_mode, reg_class_t,
682 reg_class_t);
683 static machine_mode sparc_secondary_memory_needed_mode (machine_mode);
684 static scalar_int_mode sparc_cstore_mode (enum insn_code icode);
685 static void sparc_atomic_assign_expand_fenv (tree *, tree *, tree *);
686 static bool sparc_fixed_condition_code_regs (unsigned int *, unsigned int *);
687 static unsigned int sparc_min_arithmetic_precision (void);
688 static unsigned int sparc_hard_regno_nregs (unsigned int, machine_mode);
689 static bool sparc_hard_regno_mode_ok (unsigned int, machine_mode);
690 static bool sparc_modes_tieable_p (machine_mode, machine_mode);
691 static bool sparc_can_change_mode_class (machine_mode, machine_mode,
692 reg_class_t);
693 static HOST_WIDE_INT sparc_constant_alignment (const_tree, HOST_WIDE_INT);
694 static bool sparc_vectorize_vec_perm_const (machine_mode, rtx, rtx, rtx,
695 const vec_perm_indices &);
697 #ifdef SUBTARGET_ATTRIBUTE_TABLE
698 /* Table of valid machine attributes. */
699 static const struct attribute_spec sparc_attribute_table[] =
701 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
702 do_diagnostic, handler, exclude } */
703 SUBTARGET_ATTRIBUTE_TABLE,
704 { NULL, 0, 0, false, false, false, false, NULL, NULL }
706 #endif
708 /* Option handling. */
710 /* Parsed value. */
711 enum cmodel sparc_cmodel;
713 char sparc_hard_reg_printed[8];
715 /* Initialize the GCC target structure. */
717 /* The default is to use .half rather than .short for aligned HI objects. */
718 #undef TARGET_ASM_ALIGNED_HI_OP
719 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
721 #undef TARGET_ASM_UNALIGNED_HI_OP
722 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
723 #undef TARGET_ASM_UNALIGNED_SI_OP
724 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
725 #undef TARGET_ASM_UNALIGNED_DI_OP
726 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
728 /* The target hook has to handle DI-mode values. */
729 #undef TARGET_ASM_INTEGER
730 #define TARGET_ASM_INTEGER sparc_assemble_integer
732 #undef TARGET_ASM_FUNCTION_PROLOGUE
733 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
734 #undef TARGET_ASM_FUNCTION_EPILOGUE
735 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
737 #undef TARGET_SCHED_ADJUST_COST
738 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
739 #undef TARGET_SCHED_ISSUE_RATE
740 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
741 #undef TARGET_SCHED_INIT
742 #define TARGET_SCHED_INIT sparc_sched_init
743 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
744 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
746 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
747 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
749 #undef TARGET_INIT_LIBFUNCS
750 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
752 #undef TARGET_LEGITIMIZE_ADDRESS
753 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
754 #undef TARGET_DELEGITIMIZE_ADDRESS
755 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
756 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
757 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
759 #undef TARGET_INIT_BUILTINS
760 #define TARGET_INIT_BUILTINS sparc_init_builtins
761 #undef TARGET_BUILTIN_DECL
762 #define TARGET_BUILTIN_DECL sparc_builtin_decl
763 #undef TARGET_EXPAND_BUILTIN
764 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
765 #undef TARGET_FOLD_BUILTIN
766 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
768 #if TARGET_TLS
769 #undef TARGET_HAVE_TLS
770 #define TARGET_HAVE_TLS true
771 #endif
773 #undef TARGET_CANNOT_FORCE_CONST_MEM
774 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
776 #undef TARGET_ASM_OUTPUT_MI_THUNK
777 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
778 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
779 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
781 #undef TARGET_RTX_COSTS
782 #define TARGET_RTX_COSTS sparc_rtx_costs
783 #undef TARGET_ADDRESS_COST
784 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
785 #undef TARGET_REGISTER_MOVE_COST
786 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
788 #undef TARGET_PROMOTE_FUNCTION_MODE
789 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
791 #undef TARGET_FUNCTION_VALUE
792 #define TARGET_FUNCTION_VALUE sparc_function_value
793 #undef TARGET_LIBCALL_VALUE
794 #define TARGET_LIBCALL_VALUE sparc_libcall_value
795 #undef TARGET_FUNCTION_VALUE_REGNO_P
796 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
798 #undef TARGET_STRUCT_VALUE_RTX
799 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
800 #undef TARGET_RETURN_IN_MEMORY
801 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
802 #undef TARGET_MUST_PASS_IN_STACK
803 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
804 #undef TARGET_PASS_BY_REFERENCE
805 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
806 #undef TARGET_ARG_PARTIAL_BYTES
807 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
808 #undef TARGET_FUNCTION_ARG_ADVANCE
809 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
810 #undef TARGET_FUNCTION_ARG
811 #define TARGET_FUNCTION_ARG sparc_function_arg
812 #undef TARGET_FUNCTION_INCOMING_ARG
813 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
814 #undef TARGET_FUNCTION_ARG_PADDING
815 #define TARGET_FUNCTION_ARG_PADDING sparc_function_arg_padding
816 #undef TARGET_FUNCTION_ARG_BOUNDARY
817 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
819 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
820 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
821 #undef TARGET_STRICT_ARGUMENT_NAMING
822 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
824 #undef TARGET_EXPAND_BUILTIN_VA_START
825 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
826 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
827 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
829 #undef TARGET_VECTOR_MODE_SUPPORTED_P
830 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
832 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
833 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
835 #ifdef SUBTARGET_INSERT_ATTRIBUTES
836 #undef TARGET_INSERT_ATTRIBUTES
837 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
838 #endif
840 #ifdef SUBTARGET_ATTRIBUTE_TABLE
841 #undef TARGET_ATTRIBUTE_TABLE
842 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
843 #endif
845 #undef TARGET_OPTION_OVERRIDE
846 #define TARGET_OPTION_OVERRIDE sparc_option_override
848 #ifdef TARGET_THREAD_SSP_OFFSET
849 #undef TARGET_STACK_PROTECT_GUARD
850 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
851 #endif
853 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
854 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
855 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
856 #endif
858 #undef TARGET_ASM_FILE_END
859 #define TARGET_ASM_FILE_END sparc_file_end
861 #undef TARGET_FRAME_POINTER_REQUIRED
862 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
864 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
865 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
867 #undef TARGET_CAN_ELIMINATE
868 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
870 #undef TARGET_PREFERRED_RELOAD_CLASS
871 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
873 #undef TARGET_SECONDARY_RELOAD
874 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
875 #undef TARGET_SECONDARY_MEMORY_NEEDED
876 #define TARGET_SECONDARY_MEMORY_NEEDED sparc_secondary_memory_needed
877 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
878 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE sparc_secondary_memory_needed_mode
880 #undef TARGET_CONDITIONAL_REGISTER_USAGE
881 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
883 #undef TARGET_INIT_PIC_REG
884 #define TARGET_INIT_PIC_REG sparc_init_pic_reg
886 #undef TARGET_USE_PSEUDO_PIC_REG
887 #define TARGET_USE_PSEUDO_PIC_REG sparc_use_pseudo_pic_reg
889 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
890 #undef TARGET_MANGLE_TYPE
891 #define TARGET_MANGLE_TYPE sparc_mangle_type
892 #endif
894 #undef TARGET_LRA_P
895 #define TARGET_LRA_P sparc_lra_p
897 #undef TARGET_LEGITIMATE_ADDRESS_P
898 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
900 #undef TARGET_LEGITIMATE_CONSTANT_P
901 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
903 #undef TARGET_TRAMPOLINE_INIT
904 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
906 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
907 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
908 #undef TARGET_PRINT_OPERAND
909 #define TARGET_PRINT_OPERAND sparc_print_operand
910 #undef TARGET_PRINT_OPERAND_ADDRESS
911 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
913 /* The value stored by LDSTUB. */
914 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
915 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0xff
917 #undef TARGET_CSTORE_MODE
918 #define TARGET_CSTORE_MODE sparc_cstore_mode
920 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
921 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sparc_atomic_assign_expand_fenv
923 #undef TARGET_FIXED_CONDITION_CODE_REGS
924 #define TARGET_FIXED_CONDITION_CODE_REGS sparc_fixed_condition_code_regs
926 #undef TARGET_MIN_ARITHMETIC_PRECISION
927 #define TARGET_MIN_ARITHMETIC_PRECISION sparc_min_arithmetic_precision
929 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
930 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
932 #undef TARGET_HARD_REGNO_NREGS
933 #define TARGET_HARD_REGNO_NREGS sparc_hard_regno_nregs
934 #undef TARGET_HARD_REGNO_MODE_OK
935 #define TARGET_HARD_REGNO_MODE_OK sparc_hard_regno_mode_ok
937 #undef TARGET_MODES_TIEABLE_P
938 #define TARGET_MODES_TIEABLE_P sparc_modes_tieable_p
940 #undef TARGET_CAN_CHANGE_MODE_CLASS
941 #define TARGET_CAN_CHANGE_MODE_CLASS sparc_can_change_mode_class
943 #undef TARGET_CONSTANT_ALIGNMENT
944 #define TARGET_CONSTANT_ALIGNMENT sparc_constant_alignment
946 #undef TARGET_VECTORIZE_VEC_PERM_CONST
947 #define TARGET_VECTORIZE_VEC_PERM_CONST sparc_vectorize_vec_perm_const
949 struct gcc_target targetm = TARGET_INITIALIZER;
951 /* Return the memory reference contained in X if any, zero otherwise. */
953 static rtx
954 mem_ref (rtx x)
956 if (GET_CODE (x) == SIGN_EXTEND || GET_CODE (x) == ZERO_EXTEND)
957 x = XEXP (x, 0);
959 if (MEM_P (x))
960 return x;
962 return NULL_RTX;
965 /* True if any of INSN's source register(s) is REG. */
967 static bool
968 insn_uses_reg_p (rtx_insn *insn, unsigned int reg)
970 extract_insn (insn);
971 return ((REG_P (recog_data.operand[1])
972 && REGNO (recog_data.operand[1]) == reg)
973 || (recog_data.n_operands == 3
974 && REG_P (recog_data.operand[2])
975 && REGNO (recog_data.operand[2]) == reg));
978 /* True if INSN is a floating-point division or square-root. */
980 static bool
981 div_sqrt_insn_p (rtx_insn *insn)
983 if (GET_CODE (PATTERN (insn)) != SET)
984 return false;
986 switch (get_attr_type (insn))
988 case TYPE_FPDIVS:
989 case TYPE_FPSQRTS:
990 case TYPE_FPDIVD:
991 case TYPE_FPSQRTD:
992 return true;
993 default:
994 return false;
998 /* True if INSN is a floating-point instruction. */
1000 static bool
1001 fpop_insn_p (rtx_insn *insn)
1003 if (GET_CODE (PATTERN (insn)) != SET)
1004 return false;
1006 switch (get_attr_type (insn))
1008 case TYPE_FPMOVE:
1009 case TYPE_FPCMOVE:
1010 case TYPE_FP:
1011 case TYPE_FPCMP:
1012 case TYPE_FPMUL:
1013 case TYPE_FPDIVS:
1014 case TYPE_FPSQRTS:
1015 case TYPE_FPDIVD:
1016 case TYPE_FPSQRTD:
1017 return true;
1018 default:
1019 return false;
1023 /* True if INSN is an atomic instruction. */
1025 static bool
1026 atomic_insn_for_leon3_p (rtx_insn *insn)
1028 switch (INSN_CODE (insn))
1030 case CODE_FOR_swapsi:
1031 case CODE_FOR_ldstub:
1032 case CODE_FOR_atomic_compare_and_swap_leon3_1:
1033 return true;
1034 default:
1035 return false;
1039 /* We use a machine specific pass to enable workarounds for errata.
1041 We need to have the (essentially) final form of the insn stream in order
1042 to properly detect the various hazards. Therefore, this machine specific
1043 pass runs as late as possible. */
1045 /* True if INSN is a md pattern or asm statement. */
1046 #define USEFUL_INSN_P(INSN) \
1047 (NONDEBUG_INSN_P (INSN) \
1048 && GET_CODE (PATTERN (INSN)) != USE \
1049 && GET_CODE (PATTERN (INSN)) != CLOBBER)
1051 static unsigned int
1052 sparc_do_work_around_errata (void)
1054 rtx_insn *insn, *next;
1056 /* Force all instructions to be split into their final form. */
1057 split_all_insns_noflow ();
1059 /* Now look for specific patterns in the insn stream. */
1060 for (insn = get_insns (); insn; insn = next)
1062 bool insert_nop = false;
1063 rtx set;
1064 rtx_insn *jump;
1065 rtx_sequence *seq;
1067 /* Look into the instruction in a delay slot. */
1068 if (NONJUMP_INSN_P (insn)
1069 && (seq = dyn_cast <rtx_sequence *> (PATTERN (insn))))
1071 jump = seq->insn (0);
1072 insn = seq->insn (1);
1074 else if (JUMP_P (insn))
1075 jump = insn;
1076 else
1077 jump = NULL;
1079 /* Place a NOP at the branch target of an integer branch if it is a
1080 floating-point operation or a floating-point branch. */
1081 if (sparc_fix_gr712rc
1082 && jump
1083 && jump_to_label_p (jump)
1084 && get_attr_branch_type (jump) == BRANCH_TYPE_ICC)
1086 rtx_insn *target = next_active_insn (JUMP_LABEL_AS_INSN (jump));
1087 if (target
1088 && (fpop_insn_p (target)
1089 || (JUMP_P (target)
1090 && get_attr_branch_type (target) == BRANCH_TYPE_FCC)))
1091 emit_insn_before (gen_nop (), target);
1094 /* Insert a NOP between load instruction and atomic instruction. Insert
1095 a NOP at branch target if there is a load in delay slot and an atomic
1096 instruction at branch target. */
1097 if (sparc_fix_ut700
1098 && NONJUMP_INSN_P (insn)
1099 && (set = single_set (insn)) != NULL_RTX
1100 && mem_ref (SET_SRC (set))
1101 && REG_P (SET_DEST (set)))
1103 if (jump && jump_to_label_p (jump))
1105 rtx_insn *target = next_active_insn (JUMP_LABEL_AS_INSN (jump));
1106 if (target && atomic_insn_for_leon3_p (target))
1107 emit_insn_before (gen_nop (), target);
1110 next = next_active_insn (insn);
1111 if (!next)
1112 break;
1114 if (atomic_insn_for_leon3_p (next))
1115 insert_nop = true;
1118 /* Look for a sequence that starts with a fdiv or fsqrt instruction and
1119 ends with another fdiv or fsqrt instruction with no dependencies on
1120 the former, along with an appropriate pattern in between. */
1121 if (sparc_fix_lost_divsqrt
1122 && NONJUMP_INSN_P (insn)
1123 && div_sqrt_insn_p (insn))
1125 int i;
1126 int fp_found = 0;
1127 rtx_insn *after;
1129 const unsigned int dest_reg = REGNO (SET_DEST (single_set (insn)));
1131 next = next_active_insn (insn);
1132 if (!next)
1133 break;
1135 for (after = next, i = 0; i < 4; i++)
1137 /* Count floating-point operations. */
1138 if (i != 3 && fpop_insn_p (after))
1140 /* If the insn uses the destination register of
1141 the div/sqrt, then it cannot be problematic. */
1142 if (insn_uses_reg_p (after, dest_reg))
1143 break;
1144 fp_found++;
1147 /* Count floating-point loads. */
1148 if (i != 3
1149 && (set = single_set (after)) != NULL_RTX
1150 && REG_P (SET_DEST (set))
1151 && REGNO (SET_DEST (set)) > 31)
1153 /* If the insn uses the destination register of
1154 the div/sqrt, then it cannot be problematic. */
1155 if (REGNO (SET_DEST (set)) == dest_reg)
1156 break;
1157 fp_found++;
1160 /* Check if this is a problematic sequence. */
1161 if (i > 1
1162 && fp_found >= 2
1163 && div_sqrt_insn_p (after))
1165 /* If this is the short version of the problematic
1166 sequence we add two NOPs in a row to also prevent
1167 the long version. */
1168 if (i == 2)
1169 emit_insn_before (gen_nop (), next);
1170 insert_nop = true;
1171 break;
1174 /* No need to scan past a second div/sqrt. */
1175 if (div_sqrt_insn_p (after))
1176 break;
1178 /* Insert NOP before branch. */
1179 if (i < 3
1180 && (!NONJUMP_INSN_P (after)
1181 || GET_CODE (PATTERN (after)) == SEQUENCE))
1183 insert_nop = true;
1184 break;
1187 after = next_active_insn (after);
1188 if (!after)
1189 break;
1193 /* Look for either of these two sequences:
1195 Sequence A:
1196 1. store of word size or less (e.g. st / stb / sth / stf)
1197 2. any single instruction that is not a load or store
1198 3. any store instruction (e.g. st / stb / sth / stf / std / stdf)
1200 Sequence B:
1201 1. store of double word size (e.g. std / stdf)
1202 2. any store instruction (e.g. st / stb / sth / stf / std / stdf) */
1203 if (sparc_fix_b2bst
1204 && NONJUMP_INSN_P (insn)
1205 && (set = single_set (insn)) != NULL_RTX
1206 && MEM_P (SET_DEST (set)))
1208 /* Sequence B begins with a double-word store. */
1209 bool seq_b = GET_MODE_SIZE (GET_MODE (SET_DEST (set))) == 8;
1210 rtx_insn *after;
1211 int i;
1213 next = next_active_insn (insn);
1214 if (!next)
1215 break;
1217 for (after = next, i = 0; i < 2; i++)
1219 /* Skip empty assembly statements. */
1220 if ((GET_CODE (PATTERN (after)) == UNSPEC_VOLATILE)
1221 || (USEFUL_INSN_P (after)
1222 && (asm_noperands (PATTERN (after))>=0)
1223 && !strcmp (decode_asm_operands (PATTERN (after),
1224 NULL, NULL, NULL,
1225 NULL, NULL), "")))
1226 after = next_active_insn (after);
1227 if (!after)
1228 break;
1230 /* If the insn is a branch, then it cannot be problematic. */
1231 if (!NONJUMP_INSN_P (after)
1232 || GET_CODE (PATTERN (after)) == SEQUENCE)
1233 break;
1235 /* Sequence B is only two instructions long. */
1236 if (seq_b)
1238 /* Add NOP if followed by a store. */
1239 if ((set = single_set (after)) != NULL_RTX
1240 && MEM_P (SET_DEST (set)))
1241 insert_nop = true;
1243 /* Otherwise it is ok. */
1244 break;
1247 /* If the second instruction is a load or a store,
1248 then the sequence cannot be problematic. */
1249 if (i == 0)
1251 if ((set = single_set (after)) != NULL_RTX
1252 && (MEM_P (SET_DEST (set)) || mem_ref (SET_SRC (set))))
1253 break;
1255 after = next_active_insn (after);
1256 if (!after)
1257 break;
1260 /* Add NOP if third instruction is a store. */
1261 if (i == 1
1262 && (set = single_set (after)) != NULL_RTX
1263 && MEM_P (SET_DEST (set)))
1264 insert_nop = true;
1268 /* Look for a single-word load into an odd-numbered FP register. */
1269 else if (sparc_fix_at697f
1270 && NONJUMP_INSN_P (insn)
1271 && (set = single_set (insn)) != NULL_RTX
1272 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1273 && mem_ref (SET_SRC (set))
1274 && REG_P (SET_DEST (set))
1275 && REGNO (SET_DEST (set)) > 31
1276 && REGNO (SET_DEST (set)) % 2 != 0)
1278 /* The wrong dependency is on the enclosing double register. */
1279 const unsigned int x = REGNO (SET_DEST (set)) - 1;
1280 unsigned int src1, src2, dest;
1281 int code;
1283 next = next_active_insn (insn);
1284 if (!next)
1285 break;
1286 /* If the insn is a branch, then it cannot be problematic. */
1287 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1288 continue;
1290 extract_insn (next);
1291 code = INSN_CODE (next);
1293 switch (code)
1295 case CODE_FOR_adddf3:
1296 case CODE_FOR_subdf3:
1297 case CODE_FOR_muldf3:
1298 case CODE_FOR_divdf3:
1299 dest = REGNO (recog_data.operand[0]);
1300 src1 = REGNO (recog_data.operand[1]);
1301 src2 = REGNO (recog_data.operand[2]);
1302 if (src1 != src2)
1304 /* Case [1-4]:
1305 ld [address], %fx+1
1306 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
1307 if ((src1 == x || src2 == x)
1308 && (dest == src1 || dest == src2))
1309 insert_nop = true;
1311 else
1313 /* Case 5:
1314 ld [address], %fx+1
1315 FPOPd %fx, %fx, %fx */
1316 if (src1 == x
1317 && dest == src1
1318 && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
1319 insert_nop = true;
1321 break;
1323 case CODE_FOR_sqrtdf2:
1324 dest = REGNO (recog_data.operand[0]);
1325 src1 = REGNO (recog_data.operand[1]);
1326 /* Case 6:
1327 ld [address], %fx+1
1328 fsqrtd %fx, %fx */
1329 if (src1 == x && dest == src1)
1330 insert_nop = true;
1331 break;
1333 default:
1334 break;
1338 /* Look for a single-word load into an integer register. */
1339 else if (sparc_fix_ut699
1340 && NONJUMP_INSN_P (insn)
1341 && (set = single_set (insn)) != NULL_RTX
1342 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) <= 4
1343 && (mem_ref (SET_SRC (set)) != NULL_RTX
1344 || INSN_CODE (insn) == CODE_FOR_movsi_pic_gotdata_op)
1345 && REG_P (SET_DEST (set))
1346 && REGNO (SET_DEST (set)) < 32)
1348 /* There is no problem if the second memory access has a data
1349 dependency on the first single-cycle load. */
1350 rtx x = SET_DEST (set);
1352 next = next_active_insn (insn);
1353 if (!next)
1354 break;
1355 /* If the insn is a branch, then it cannot be problematic. */
1356 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1357 continue;
1359 /* Look for a second memory access to/from an integer register. */
1360 if ((set = single_set (next)) != NULL_RTX)
1362 rtx src = SET_SRC (set);
1363 rtx dest = SET_DEST (set);
1364 rtx mem;
1366 /* LDD is affected. */
1367 if ((mem = mem_ref (src)) != NULL_RTX
1368 && REG_P (dest)
1369 && REGNO (dest) < 32
1370 && !reg_mentioned_p (x, XEXP (mem, 0)))
1371 insert_nop = true;
1373 /* STD is *not* affected. */
1374 else if (MEM_P (dest)
1375 && GET_MODE_SIZE (GET_MODE (dest)) <= 4
1376 && (src == CONST0_RTX (GET_MODE (dest))
1377 || (REG_P (src)
1378 && REGNO (src) < 32
1379 && REGNO (src) != REGNO (x)))
1380 && !reg_mentioned_p (x, XEXP (dest, 0)))
1381 insert_nop = true;
1383 /* GOT accesses uses LD. */
1384 else if (INSN_CODE (next) == CODE_FOR_movsi_pic_gotdata_op
1385 && !reg_mentioned_p (x, XEXP (XEXP (src, 0), 1)))
1386 insert_nop = true;
1390 /* Look for a single-word load/operation into an FP register. */
1391 else if (sparc_fix_ut699
1392 && NONJUMP_INSN_P (insn)
1393 && (set = single_set (insn)) != NULL_RTX
1394 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1395 && REG_P (SET_DEST (set))
1396 && REGNO (SET_DEST (set)) > 31)
1398 /* Number of instructions in the problematic window. */
1399 const int n_insns = 4;
1400 /* The problematic combination is with the sibling FP register. */
1401 const unsigned int x = REGNO (SET_DEST (set));
1402 const unsigned int y = x ^ 1;
1403 rtx_insn *after;
1404 int i;
1406 next = next_active_insn (insn);
1407 if (!next)
1408 break;
1409 /* If the insn is a branch, then it cannot be problematic. */
1410 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1411 continue;
1413 /* Look for a second load/operation into the sibling FP register. */
1414 if (!((set = single_set (next)) != NULL_RTX
1415 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1416 && REG_P (SET_DEST (set))
1417 && REGNO (SET_DEST (set)) == y))
1418 continue;
1420 /* Look for a (possible) store from the FP register in the next N
1421 instructions, but bail out if it is again modified or if there
1422 is a store from the sibling FP register before this store. */
1423 for (after = next, i = 0; i < n_insns; i++)
1425 bool branch_p;
1427 after = next_active_insn (after);
1428 if (!after)
1429 break;
1431 /* This is a branch with an empty delay slot. */
1432 if (!NONJUMP_INSN_P (after))
1434 if (++i == n_insns)
1435 break;
1436 branch_p = true;
1437 after = NULL;
1439 /* This is a branch with a filled delay slot. */
1440 else if (rtx_sequence *seq =
1441 dyn_cast <rtx_sequence *> (PATTERN (after)))
1443 if (++i == n_insns)
1444 break;
1445 branch_p = true;
1446 after = seq->insn (1);
1448 /* This is a regular instruction. */
1449 else
1450 branch_p = false;
1452 if (after && (set = single_set (after)) != NULL_RTX)
1454 const rtx src = SET_SRC (set);
1455 const rtx dest = SET_DEST (set);
1456 const unsigned int size = GET_MODE_SIZE (GET_MODE (dest));
1458 /* If the FP register is again modified before the store,
1459 then the store isn't affected. */
1460 if (REG_P (dest)
1461 && (REGNO (dest) == x
1462 || (REGNO (dest) == y && size == 8)))
1463 break;
1465 if (MEM_P (dest) && REG_P (src))
1467 /* If there is a store from the sibling FP register
1468 before the store, then the store is not affected. */
1469 if (REGNO (src) == y || (REGNO (src) == x && size == 8))
1470 break;
1472 /* Otherwise, the store is affected. */
1473 if (REGNO (src) == x && size == 4)
1475 insert_nop = true;
1476 break;
1481 /* If we have a branch in the first M instructions, then we
1482 cannot see the (M+2)th instruction so we play safe. */
1483 if (branch_p && i <= (n_insns - 2))
1485 insert_nop = true;
1486 break;
1491 else
1492 next = NEXT_INSN (insn);
1494 if (insert_nop)
1495 emit_insn_before (gen_nop (), next);
1498 return 0;
1501 namespace {
1503 const pass_data pass_data_work_around_errata =
1505 RTL_PASS, /* type */
1506 "errata", /* name */
1507 OPTGROUP_NONE, /* optinfo_flags */
1508 TV_MACH_DEP, /* tv_id */
1509 0, /* properties_required */
1510 0, /* properties_provided */
1511 0, /* properties_destroyed */
1512 0, /* todo_flags_start */
1513 0, /* todo_flags_finish */
1516 class pass_work_around_errata : public rtl_opt_pass
1518 public:
1519 pass_work_around_errata(gcc::context *ctxt)
1520 : rtl_opt_pass(pass_data_work_around_errata, ctxt)
1523 /* opt_pass methods: */
1524 virtual bool gate (function *)
1526 return sparc_fix_at697f || sparc_fix_ut699 || sparc_fix_b2bst
1527 || sparc_fix_gr712rc || sparc_fix_ut700 || sparc_fix_lost_divsqrt;
1530 virtual unsigned int execute (function *)
1532 return sparc_do_work_around_errata ();
1535 }; // class pass_work_around_errata
1537 } // anon namespace
1539 rtl_opt_pass *
1540 make_pass_work_around_errata (gcc::context *ctxt)
1542 return new pass_work_around_errata (ctxt);
1545 /* Helpers for TARGET_DEBUG_OPTIONS. */
1546 static void
1547 dump_target_flag_bits (const int flags)
1549 if (flags & MASK_64BIT)
1550 fprintf (stderr, "64BIT ");
1551 if (flags & MASK_APP_REGS)
1552 fprintf (stderr, "APP_REGS ");
1553 if (flags & MASK_FASTER_STRUCTS)
1554 fprintf (stderr, "FASTER_STRUCTS ");
1555 if (flags & MASK_FLAT)
1556 fprintf (stderr, "FLAT ");
1557 if (flags & MASK_FMAF)
1558 fprintf (stderr, "FMAF ");
1559 if (flags & MASK_FSMULD)
1560 fprintf (stderr, "FSMULD ");
1561 if (flags & MASK_FPU)
1562 fprintf (stderr, "FPU ");
1563 if (flags & MASK_HARD_QUAD)
1564 fprintf (stderr, "HARD_QUAD ");
1565 if (flags & MASK_POPC)
1566 fprintf (stderr, "POPC ");
1567 if (flags & MASK_PTR64)
1568 fprintf (stderr, "PTR64 ");
1569 if (flags & MASK_STACK_BIAS)
1570 fprintf (stderr, "STACK_BIAS ");
1571 if (flags & MASK_UNALIGNED_DOUBLES)
1572 fprintf (stderr, "UNALIGNED_DOUBLES ");
1573 if (flags & MASK_V8PLUS)
1574 fprintf (stderr, "V8PLUS ");
1575 if (flags & MASK_VIS)
1576 fprintf (stderr, "VIS ");
1577 if (flags & MASK_VIS2)
1578 fprintf (stderr, "VIS2 ");
1579 if (flags & MASK_VIS3)
1580 fprintf (stderr, "VIS3 ");
1581 if (flags & MASK_VIS4)
1582 fprintf (stderr, "VIS4 ");
1583 if (flags & MASK_VIS4B)
1584 fprintf (stderr, "VIS4B ");
1585 if (flags & MASK_CBCOND)
1586 fprintf (stderr, "CBCOND ");
1587 if (flags & MASK_DEPRECATED_V8_INSNS)
1588 fprintf (stderr, "DEPRECATED_V8_INSNS ");
1589 if (flags & MASK_SPARCLET)
1590 fprintf (stderr, "SPARCLET ");
1591 if (flags & MASK_SPARCLITE)
1592 fprintf (stderr, "SPARCLITE ");
1593 if (flags & MASK_V8)
1594 fprintf (stderr, "V8 ");
1595 if (flags & MASK_V9)
1596 fprintf (stderr, "V9 ");
1599 static void
1600 dump_target_flags (const char *prefix, const int flags)
1602 fprintf (stderr, "%s: (%08x) [ ", prefix, flags);
1603 dump_target_flag_bits (flags);
1604 fprintf(stderr, "]\n");
1607 /* Validate and override various options, and do some machine dependent
1608 initialization. */
1610 static void
1611 sparc_option_override (void)
1613 static struct code_model {
1614 const char *const name;
1615 const enum cmodel value;
1616 } const cmodels[] = {
1617 { "32", CM_32 },
1618 { "medlow", CM_MEDLOW },
1619 { "medmid", CM_MEDMID },
1620 { "medany", CM_MEDANY },
1621 { "embmedany", CM_EMBMEDANY },
1622 { NULL, (enum cmodel) 0 }
1624 const struct code_model *cmodel;
1625 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
1626 static struct cpu_default {
1627 const int cpu;
1628 const enum processor_type processor;
1629 } const cpu_default[] = {
1630 /* There must be one entry here for each TARGET_CPU value. */
1631 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
1632 { TARGET_CPU_v8, PROCESSOR_V8 },
1633 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
1634 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
1635 { TARGET_CPU_leon, PROCESSOR_LEON },
1636 { TARGET_CPU_leon3, PROCESSOR_LEON3 },
1637 { TARGET_CPU_leon3v7, PROCESSOR_LEON3V7 },
1638 { TARGET_CPU_sparclite, PROCESSOR_F930 },
1639 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
1640 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
1641 { TARGET_CPU_v9, PROCESSOR_V9 },
1642 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
1643 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
1644 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
1645 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
1646 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
1647 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
1648 { TARGET_CPU_niagara7, PROCESSOR_NIAGARA7 },
1649 { TARGET_CPU_m8, PROCESSOR_M8 },
1650 { -1, PROCESSOR_V7 }
1652 const struct cpu_default *def;
1653 /* Table of values for -m{cpu,tune}=. This must match the order of
1654 the enum processor_type in sparc-opts.h. */
1655 static struct cpu_table {
1656 const char *const name;
1657 const int disable;
1658 const int enable;
1659 } const cpu_table[] = {
1660 { "v7", MASK_ISA|MASK_FSMULD, 0 },
1661 { "cypress", MASK_ISA|MASK_FSMULD, 0 },
1662 { "v8", MASK_ISA, MASK_V8 },
1663 /* TI TMS390Z55 supersparc */
1664 { "supersparc", MASK_ISA, MASK_V8 },
1665 { "hypersparc", MASK_ISA, MASK_V8 },
1666 { "leon", MASK_ISA|MASK_FSMULD, MASK_V8|MASK_LEON },
1667 { "leon3", MASK_ISA, MASK_V8|MASK_LEON3 },
1668 { "leon3v7", MASK_ISA|MASK_FSMULD, MASK_LEON3 },
1669 { "sparclite", MASK_ISA|MASK_FSMULD, MASK_SPARCLITE },
1670 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
1671 { "f930", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1672 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
1673 { "f934", MASK_ISA|MASK_FSMULD, MASK_SPARCLITE },
1674 { "sparclite86x", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1675 { "sparclet", MASK_ISA|MASK_FSMULD, MASK_SPARCLET },
1676 /* TEMIC sparclet */
1677 { "tsc701", MASK_ISA|MASK_FSMULD, MASK_SPARCLET },
1678 { "v9", MASK_ISA, MASK_V9 },
1679 /* UltraSPARC I, II, IIi */
1680 { "ultrasparc", MASK_ISA,
1681 /* Although insns using %y are deprecated, it is a clear win. */
1682 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1683 /* UltraSPARC III */
1684 /* ??? Check if %y issue still holds true. */
1685 { "ultrasparc3", MASK_ISA,
1686 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2 },
1687 /* UltraSPARC T1 */
1688 { "niagara", MASK_ISA,
1689 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1690 /* UltraSPARC T2 */
1691 { "niagara2", MASK_ISA,
1692 MASK_V9|MASK_POPC|MASK_VIS2 },
1693 /* UltraSPARC T3 */
1694 { "niagara3", MASK_ISA,
1695 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF },
1696 /* UltraSPARC T4 */
1697 { "niagara4", MASK_ISA,
1698 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF|MASK_CBCOND },
1699 /* UltraSPARC M7 */
1700 { "niagara7", MASK_ISA,
1701 MASK_V9|MASK_POPC|MASK_VIS4|MASK_FMAF|MASK_CBCOND|MASK_SUBXC },
1702 /* UltraSPARC M8 */
1703 { "m8", MASK_ISA,
1704 MASK_V9|MASK_POPC|MASK_VIS4|MASK_FMAF|MASK_CBCOND|MASK_SUBXC|MASK_VIS4B }
1706 const struct cpu_table *cpu;
1707 unsigned int i;
1709 if (sparc_debug_string != NULL)
1711 const char *q;
1712 char *p;
1714 p = ASTRDUP (sparc_debug_string);
1715 while ((q = strtok (p, ",")) != NULL)
1717 bool invert;
1718 int mask;
1720 p = NULL;
1721 if (*q == '!')
1723 invert = true;
1724 q++;
1726 else
1727 invert = false;
1729 if (! strcmp (q, "all"))
1730 mask = MASK_DEBUG_ALL;
1731 else if (! strcmp (q, "options"))
1732 mask = MASK_DEBUG_OPTIONS;
1733 else
1734 error ("unknown -mdebug-%s switch", q);
1736 if (invert)
1737 sparc_debug &= ~mask;
1738 else
1739 sparc_debug |= mask;
1743 /* Enable the FsMULd instruction by default if not explicitly specified by
1744 the user. It may be later disabled by the CPU (explicitly or not). */
1745 if (TARGET_FPU && !(target_flags_explicit & MASK_FSMULD))
1746 target_flags |= MASK_FSMULD;
1748 if (TARGET_DEBUG_OPTIONS)
1750 dump_target_flags("Initial target_flags", target_flags);
1751 dump_target_flags("target_flags_explicit", target_flags_explicit);
1754 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1755 SUBTARGET_OVERRIDE_OPTIONS;
1756 #endif
1758 #ifndef SPARC_BI_ARCH
1759 /* Check for unsupported architecture size. */
1760 if (!TARGET_64BIT != DEFAULT_ARCH32_P)
1761 error ("%s is not supported by this configuration",
1762 DEFAULT_ARCH32_P ? "-m64" : "-m32");
1763 #endif
1765 /* We force all 64bit archs to use 128 bit long double */
1766 if (TARGET_ARCH64 && !TARGET_LONG_DOUBLE_128)
1768 error ("-mlong-double-64 not allowed with -m64");
1769 target_flags |= MASK_LONG_DOUBLE_128;
1772 /* Code model selection. */
1773 sparc_cmodel = SPARC_DEFAULT_CMODEL;
1775 #ifdef SPARC_BI_ARCH
1776 if (TARGET_ARCH32)
1777 sparc_cmodel = CM_32;
1778 #endif
1780 if (sparc_cmodel_string != NULL)
1782 if (TARGET_ARCH64)
1784 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
1785 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
1786 break;
1787 if (cmodel->name == NULL)
1788 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
1789 else
1790 sparc_cmodel = cmodel->value;
1792 else
1793 error ("-mcmodel= is not supported on 32-bit systems");
1796 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1797 for (i = 8; i < 16; i++)
1798 if (!call_used_regs [i])
1800 error ("-fcall-saved-REG is not supported for out registers");
1801 call_used_regs [i] = 1;
1804 /* Set the default CPU if no -mcpu option was specified. */
1805 if (!global_options_set.x_sparc_cpu_and_features)
1807 for (def = &cpu_default[0]; def->cpu != -1; ++def)
1808 if (def->cpu == TARGET_CPU_DEFAULT)
1809 break;
1810 gcc_assert (def->cpu != -1);
1811 sparc_cpu_and_features = def->processor;
1814 /* Set the default CPU if no -mtune option was specified. */
1815 if (!global_options_set.x_sparc_cpu)
1816 sparc_cpu = sparc_cpu_and_features;
1818 cpu = &cpu_table[(int) sparc_cpu_and_features];
1820 if (TARGET_DEBUG_OPTIONS)
1822 fprintf (stderr, "sparc_cpu_and_features: %s\n", cpu->name);
1823 dump_target_flags ("cpu->disable", cpu->disable);
1824 dump_target_flags ("cpu->enable", cpu->enable);
1827 target_flags &= ~cpu->disable;
1828 target_flags |= (cpu->enable
1829 #ifndef HAVE_AS_FMAF_HPC_VIS3
1830 & ~(MASK_FMAF | MASK_VIS3)
1831 #endif
1832 #ifndef HAVE_AS_SPARC4
1833 & ~MASK_CBCOND
1834 #endif
1835 #ifndef HAVE_AS_SPARC5_VIS4
1836 & ~(MASK_VIS4 | MASK_SUBXC)
1837 #endif
1838 #ifndef HAVE_AS_SPARC6
1839 & ~(MASK_VIS4B)
1840 #endif
1841 #ifndef HAVE_AS_LEON
1842 & ~(MASK_LEON | MASK_LEON3)
1843 #endif
1844 & ~(target_flags_explicit & MASK_FEATURES)
1847 /* -mvis2 implies -mvis. */
1848 if (TARGET_VIS2)
1849 target_flags |= MASK_VIS;
1851 /* -mvis3 implies -mvis2 and -mvis. */
1852 if (TARGET_VIS3)
1853 target_flags |= MASK_VIS2 | MASK_VIS;
1855 /* -mvis4 implies -mvis3, -mvis2 and -mvis. */
1856 if (TARGET_VIS4)
1857 target_flags |= MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1859 /* -mvis4b implies -mvis4, -mvis3, -mvis2 and -mvis */
1860 if (TARGET_VIS4B)
1861 target_flags |= MASK_VIS4 | MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1863 /* Don't allow -mvis, -mvis2, -mvis3, -mvis4, -mvis4b, -mfmaf and -mfsmuld if
1864 FPU is disabled. */
1865 if (!TARGET_FPU)
1866 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_VIS4
1867 | MASK_VIS4B | MASK_FMAF | MASK_FSMULD);
1869 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1870 are available; -m64 also implies v9. */
1871 if (TARGET_VIS || TARGET_ARCH64)
1873 target_flags |= MASK_V9;
1874 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
1877 /* -mvis also implies -mv8plus on 32-bit. */
1878 if (TARGET_VIS && !TARGET_ARCH64)
1879 target_flags |= MASK_V8PLUS;
1881 /* Use the deprecated v8 insns for sparc64 in 32-bit mode. */
1882 if (TARGET_V9 && TARGET_ARCH32)
1883 target_flags |= MASK_DEPRECATED_V8_INSNS;
1885 /* V8PLUS requires V9 and makes no sense in 64-bit mode. */
1886 if (!TARGET_V9 || TARGET_ARCH64)
1887 target_flags &= ~MASK_V8PLUS;
1889 /* Don't use stack biasing in 32-bit mode. */
1890 if (TARGET_ARCH32)
1891 target_flags &= ~MASK_STACK_BIAS;
1893 /* Use LRA instead of reload, unless otherwise instructed. */
1894 if (!(target_flags_explicit & MASK_LRA))
1895 target_flags |= MASK_LRA;
1897 /* Enable applicable errata workarounds for LEON3FT. */
1898 if (sparc_fix_ut699 || sparc_fix_ut700 || sparc_fix_gr712rc)
1900 sparc_fix_b2bst = 1;
1901 sparc_fix_lost_divsqrt = 1;
1904 /* Disable FsMULd for the UT699 since it doesn't work correctly. */
1905 if (sparc_fix_ut699)
1906 target_flags &= ~MASK_FSMULD;
1908 /* Supply a default value for align_functions. */
1909 if (align_functions == 0)
1911 if (sparc_cpu == PROCESSOR_ULTRASPARC
1912 || sparc_cpu == PROCESSOR_ULTRASPARC3
1913 || sparc_cpu == PROCESSOR_NIAGARA
1914 || sparc_cpu == PROCESSOR_NIAGARA2
1915 || sparc_cpu == PROCESSOR_NIAGARA3
1916 || sparc_cpu == PROCESSOR_NIAGARA4)
1917 align_functions = 32;
1918 else if (sparc_cpu == PROCESSOR_NIAGARA7
1919 || sparc_cpu == PROCESSOR_M8)
1920 align_functions = 64;
1923 /* Validate PCC_STRUCT_RETURN. */
1924 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
1925 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
1927 /* Only use .uaxword when compiling for a 64-bit target. */
1928 if (!TARGET_ARCH64)
1929 targetm.asm_out.unaligned_op.di = NULL;
1931 /* Do various machine dependent initializations. */
1932 sparc_init_modes ();
1934 /* Set up function hooks. */
1935 init_machine_status = sparc_init_machine_status;
1937 switch (sparc_cpu)
1939 case PROCESSOR_V7:
1940 case PROCESSOR_CYPRESS:
1941 sparc_costs = &cypress_costs;
1942 break;
1943 case PROCESSOR_V8:
1944 case PROCESSOR_SPARCLITE:
1945 case PROCESSOR_SUPERSPARC:
1946 sparc_costs = &supersparc_costs;
1947 break;
1948 case PROCESSOR_F930:
1949 case PROCESSOR_F934:
1950 case PROCESSOR_HYPERSPARC:
1951 case PROCESSOR_SPARCLITE86X:
1952 sparc_costs = &hypersparc_costs;
1953 break;
1954 case PROCESSOR_LEON:
1955 sparc_costs = &leon_costs;
1956 break;
1957 case PROCESSOR_LEON3:
1958 case PROCESSOR_LEON3V7:
1959 sparc_costs = &leon3_costs;
1960 break;
1961 case PROCESSOR_SPARCLET:
1962 case PROCESSOR_TSC701:
1963 sparc_costs = &sparclet_costs;
1964 break;
1965 case PROCESSOR_V9:
1966 case PROCESSOR_ULTRASPARC:
1967 sparc_costs = &ultrasparc_costs;
1968 break;
1969 case PROCESSOR_ULTRASPARC3:
1970 sparc_costs = &ultrasparc3_costs;
1971 break;
1972 case PROCESSOR_NIAGARA:
1973 sparc_costs = &niagara_costs;
1974 break;
1975 case PROCESSOR_NIAGARA2:
1976 sparc_costs = &niagara2_costs;
1977 break;
1978 case PROCESSOR_NIAGARA3:
1979 sparc_costs = &niagara3_costs;
1980 break;
1981 case PROCESSOR_NIAGARA4:
1982 sparc_costs = &niagara4_costs;
1983 break;
1984 case PROCESSOR_NIAGARA7:
1985 sparc_costs = &niagara7_costs;
1986 break;
1987 case PROCESSOR_M8:
1988 sparc_costs = &m8_costs;
1989 break;
1990 case PROCESSOR_NATIVE:
1991 gcc_unreachable ();
1994 if (sparc_memory_model == SMM_DEFAULT)
1996 /* Choose the memory model for the operating system. */
1997 enum sparc_memory_model_type os_default = SUBTARGET_DEFAULT_MEMORY_MODEL;
1998 if (os_default != SMM_DEFAULT)
1999 sparc_memory_model = os_default;
2000 /* Choose the most relaxed model for the processor. */
2001 else if (TARGET_V9)
2002 sparc_memory_model = SMM_RMO;
2003 else if (TARGET_LEON3)
2004 sparc_memory_model = SMM_TSO;
2005 else if (TARGET_LEON)
2006 sparc_memory_model = SMM_SC;
2007 else if (TARGET_V8)
2008 sparc_memory_model = SMM_PSO;
2009 else
2010 sparc_memory_model = SMM_SC;
2013 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
2014 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
2015 target_flags |= MASK_LONG_DOUBLE_128;
2016 #endif
2018 if (TARGET_DEBUG_OPTIONS)
2019 dump_target_flags ("Final target_flags", target_flags);
2021 /* PARAM_SIMULTANEOUS_PREFETCHES is the number of prefetches that
2022 can run at the same time. More important, it is the threshold
2023 defining when additional prefetches will be dropped by the
2024 hardware.
2026 The UltraSPARC-III features a documented prefetch queue with a
2027 size of 8. Additional prefetches issued in the cpu are
2028 dropped.
2030 Niagara processors are different. In these processors prefetches
2031 are handled much like regular loads. The L1 miss buffer is 32
2032 entries, but prefetches start getting affected when 30 entries
2033 become occupied. That occupation could be a mix of regular loads
2034 and prefetches though. And that buffer is shared by all threads.
2035 Once the threshold is reached, if the core is running a single
2036 thread the prefetch will retry. If more than one thread is
2037 running, the prefetch will be dropped.
2039 All this makes it very difficult to determine how many
2040 simultaneous prefetches can be issued simultaneously, even in a
2041 single-threaded program. Experimental results show that setting
2042 this parameter to 32 works well when the number of threads is not
2043 high. */
2044 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
2045 ((sparc_cpu == PROCESSOR_ULTRASPARC
2046 || sparc_cpu == PROCESSOR_NIAGARA
2047 || sparc_cpu == PROCESSOR_NIAGARA2
2048 || sparc_cpu == PROCESSOR_NIAGARA3
2049 || sparc_cpu == PROCESSOR_NIAGARA4)
2051 : (sparc_cpu == PROCESSOR_ULTRASPARC3
2052 ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
2053 || sparc_cpu == PROCESSOR_M8)
2054 ? 32 : 3))),
2055 global_options.x_param_values,
2056 global_options_set.x_param_values);
2058 /* PARAM_L1_CACHE_LINE_SIZE is the size of the L1 cache line, in
2059 bytes.
2061 The Oracle SPARC Architecture (previously the UltraSPARC
2062 Architecture) specification states that when a PREFETCH[A]
2063 instruction is executed an implementation-specific amount of data
2064 is prefetched, and that it is at least 64 bytes long (aligned to
2065 at least 64 bytes).
2067 However, this is not correct. The M7 (and implementations prior
2068 to that) does not guarantee a 64B prefetch into a cache if the
2069 line size is smaller. A single cache line is all that is ever
2070 prefetched. So for the M7, where the L1D$ has 32B lines and the
2071 L2D$ and L3 have 64B lines, a prefetch will prefetch 64B into the
2072 L2 and L3, but only 32B are brought into the L1D$. (Assuming it
2073 is a read_n prefetch, which is the only type which allocates to
2074 the L1.) */
2075 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
2076 (sparc_cpu == PROCESSOR_M8
2077 ? 64 : 32),
2078 global_options.x_param_values,
2079 global_options_set.x_param_values);
2081 /* PARAM_L1_CACHE_SIZE is the size of the L1D$ (most SPARC chips use
2082 Hardvard level-1 caches) in kilobytes. Both UltraSPARC and
2083 Niagara processors feature a L1D$ of 16KB. */
2084 maybe_set_param_value (PARAM_L1_CACHE_SIZE,
2085 ((sparc_cpu == PROCESSOR_ULTRASPARC
2086 || sparc_cpu == PROCESSOR_ULTRASPARC3
2087 || sparc_cpu == PROCESSOR_NIAGARA
2088 || sparc_cpu == PROCESSOR_NIAGARA2
2089 || sparc_cpu == PROCESSOR_NIAGARA3
2090 || sparc_cpu == PROCESSOR_NIAGARA4
2091 || sparc_cpu == PROCESSOR_NIAGARA7
2092 || sparc_cpu == PROCESSOR_M8)
2093 ? 16 : 64),
2094 global_options.x_param_values,
2095 global_options_set.x_param_values);
2098 /* PARAM_L2_CACHE_SIZE is the size fo the L2 in kilobytes. Note
2099 that 512 is the default in params.def. */
2100 maybe_set_param_value (PARAM_L2_CACHE_SIZE,
2101 ((sparc_cpu == PROCESSOR_NIAGARA4
2102 || sparc_cpu == PROCESSOR_M8)
2103 ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
2104 ? 256 : 512)),
2105 global_options.x_param_values,
2106 global_options_set.x_param_values);
2109 /* Disable save slot sharing for call-clobbered registers by default.
2110 The IRA sharing algorithm works on single registers only and this
2111 pessimizes for double floating-point registers. */
2112 if (!global_options_set.x_flag_ira_share_save_slots)
2113 flag_ira_share_save_slots = 0;
2115 /* Only enable REE by default in 64-bit mode where it helps to eliminate
2116 redundant 32-to-64-bit extensions. */
2117 if (!global_options_set.x_flag_ree && TARGET_ARCH32)
2118 flag_ree = 0;
2121 /* Miscellaneous utilities. */
2123 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
2124 or branch on register contents instructions. */
2127 v9_regcmp_p (enum rtx_code code)
2129 return (code == EQ || code == NE || code == GE || code == LT
2130 || code == LE || code == GT);
2133 /* Nonzero if OP is a floating point constant which can
2134 be loaded into an integer register using a single
2135 sethi instruction. */
2138 fp_sethi_p (rtx op)
2140 if (GET_CODE (op) == CONST_DOUBLE)
2142 long i;
2144 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2145 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
2148 return 0;
2151 /* Nonzero if OP is a floating point constant which can
2152 be loaded into an integer register using a single
2153 mov instruction. */
2156 fp_mov_p (rtx op)
2158 if (GET_CODE (op) == CONST_DOUBLE)
2160 long i;
2162 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2163 return SPARC_SIMM13_P (i);
2166 return 0;
2169 /* Nonzero if OP is a floating point constant which can
2170 be loaded into an integer register using a high/losum
2171 instruction sequence. */
2174 fp_high_losum_p (rtx op)
2176 /* The constraints calling this should only be in
2177 SFmode move insns, so any constant which cannot
2178 be moved using a single insn will do. */
2179 if (GET_CODE (op) == CONST_DOUBLE)
2181 long i;
2183 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2184 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
2187 return 0;
2190 /* Return true if the address of LABEL can be loaded by means of the
2191 mov{si,di}_pic_label_ref patterns in PIC mode. */
2193 static bool
2194 can_use_mov_pic_label_ref (rtx label)
2196 /* VxWorks does not impose a fixed gap between segments; the run-time
2197 gap can be different from the object-file gap. We therefore can't
2198 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
2199 are absolutely sure that X is in the same segment as the GOT.
2200 Unfortunately, the flexibility of linker scripts means that we
2201 can't be sure of that in general, so assume that GOT-relative
2202 accesses are never valid on VxWorks. */
2203 if (TARGET_VXWORKS_RTP)
2204 return false;
2206 /* Similarly, if the label is non-local, it might end up being placed
2207 in a different section than the current one; now mov_pic_label_ref
2208 requires the label and the code to be in the same section. */
2209 if (LABEL_REF_NONLOCAL_P (label))
2210 return false;
2212 /* Finally, if we are reordering basic blocks and partition into hot
2213 and cold sections, this might happen for any label. */
2214 if (flag_reorder_blocks_and_partition)
2215 return false;
2217 return true;
2220 /* Expand a move instruction. Return true if all work is done. */
2222 bool
2223 sparc_expand_move (machine_mode mode, rtx *operands)
2225 /* Handle sets of MEM first. */
2226 if (GET_CODE (operands[0]) == MEM)
2228 /* 0 is a register (or a pair of registers) on SPARC. */
2229 if (register_or_zero_operand (operands[1], mode))
2230 return false;
2232 if (!reload_in_progress)
2234 operands[0] = validize_mem (operands[0]);
2235 operands[1] = force_reg (mode, operands[1]);
2239 /* Fix up TLS cases. */
2240 if (TARGET_HAVE_TLS
2241 && CONSTANT_P (operands[1])
2242 && sparc_tls_referenced_p (operands [1]))
2244 operands[1] = sparc_legitimize_tls_address (operands[1]);
2245 return false;
2248 /* Fix up PIC cases. */
2249 if (flag_pic && CONSTANT_P (operands[1]))
2251 if (pic_address_needs_scratch (operands[1]))
2252 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
2254 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
2255 if ((GET_CODE (operands[1]) == LABEL_REF
2256 && can_use_mov_pic_label_ref (operands[1]))
2257 || (GET_CODE (operands[1]) == CONST
2258 && GET_CODE (XEXP (operands[1], 0)) == PLUS
2259 && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
2260 && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT
2261 && can_use_mov_pic_label_ref (XEXP (XEXP (operands[1], 0), 0))))
2263 if (mode == SImode)
2265 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
2266 return true;
2269 if (mode == DImode)
2271 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
2272 return true;
2276 if (symbolic_operand (operands[1], mode))
2278 operands[1]
2279 = sparc_legitimize_pic_address (operands[1],
2280 reload_in_progress
2281 ? operands[0] : NULL_RTX);
2282 return false;
2286 /* If we are trying to toss an integer constant into FP registers,
2287 or loading a FP or vector constant, force it into memory. */
2288 if (CONSTANT_P (operands[1])
2289 && REG_P (operands[0])
2290 && (SPARC_FP_REG_P (REGNO (operands[0]))
2291 || SCALAR_FLOAT_MODE_P (mode)
2292 || VECTOR_MODE_P (mode)))
2294 /* emit_group_store will send such bogosity to us when it is
2295 not storing directly into memory. So fix this up to avoid
2296 crashes in output_constant_pool. */
2297 if (operands [1] == const0_rtx)
2298 operands[1] = CONST0_RTX (mode);
2300 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
2301 always other regs. */
2302 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
2303 && (const_zero_operand (operands[1], mode)
2304 || const_all_ones_operand (operands[1], mode)))
2305 return false;
2307 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
2308 /* We are able to build any SF constant in integer registers
2309 with at most 2 instructions. */
2310 && (mode == SFmode
2311 /* And any DF constant in integer registers if needed. */
2312 || (mode == DFmode && !can_create_pseudo_p ())))
2313 return false;
2315 operands[1] = force_const_mem (mode, operands[1]);
2316 if (!reload_in_progress)
2317 operands[1] = validize_mem (operands[1]);
2318 return false;
2321 /* Accept non-constants and valid constants unmodified. */
2322 if (!CONSTANT_P (operands[1])
2323 || GET_CODE (operands[1]) == HIGH
2324 || input_operand (operands[1], mode))
2325 return false;
2327 switch (mode)
2329 case E_QImode:
2330 /* All QImode constants require only one insn, so proceed. */
2331 break;
2333 case E_HImode:
2334 case E_SImode:
2335 sparc_emit_set_const32 (operands[0], operands[1]);
2336 return true;
2338 case E_DImode:
2339 /* input_operand should have filtered out 32-bit mode. */
2340 sparc_emit_set_const64 (operands[0], operands[1]);
2341 return true;
2343 case E_TImode:
2345 rtx high, low;
2346 /* TImode isn't available in 32-bit mode. */
2347 split_double (operands[1], &high, &low);
2348 emit_insn (gen_movdi (operand_subword (operands[0], 0, 0, TImode),
2349 high));
2350 emit_insn (gen_movdi (operand_subword (operands[0], 1, 0, TImode),
2351 low));
2353 return true;
2355 default:
2356 gcc_unreachable ();
2359 return false;
2362 /* Load OP1, a 32-bit constant, into OP0, a register.
2363 We know it can't be done in one insn when we get
2364 here, the move expander guarantees this. */
2366 static void
2367 sparc_emit_set_const32 (rtx op0, rtx op1)
2369 machine_mode mode = GET_MODE (op0);
2370 rtx temp = op0;
2372 if (can_create_pseudo_p ())
2373 temp = gen_reg_rtx (mode);
2375 if (GET_CODE (op1) == CONST_INT)
2377 gcc_assert (!small_int_operand (op1, mode)
2378 && !const_high_operand (op1, mode));
2380 /* Emit them as real moves instead of a HIGH/LO_SUM,
2381 this way CSE can see everything and reuse intermediate
2382 values if it wants. */
2383 emit_insn (gen_rtx_SET (temp, GEN_INT (INTVAL (op1)
2384 & ~(HOST_WIDE_INT) 0x3ff)));
2386 emit_insn (gen_rtx_SET (op0,
2387 gen_rtx_IOR (mode, temp,
2388 GEN_INT (INTVAL (op1) & 0x3ff))));
2390 else
2392 /* A symbol, emit in the traditional way. */
2393 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, op1)));
2394 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (mode, temp, op1)));
2398 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
2399 If TEMP is nonzero, we are forbidden to use any other scratch
2400 registers. Otherwise, we are allowed to generate them as needed.
2402 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
2403 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
2405 void
2406 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
2408 rtx cst, temp1, temp2, temp3, temp4, temp5;
2409 rtx ti_temp = 0;
2411 /* Deal with too large offsets. */
2412 if (GET_CODE (op1) == CONST
2413 && GET_CODE (XEXP (op1, 0)) == PLUS
2414 && CONST_INT_P (cst = XEXP (XEXP (op1, 0), 1))
2415 && trunc_int_for_mode (INTVAL (cst), SImode) != INTVAL (cst))
2417 gcc_assert (!temp);
2418 temp1 = gen_reg_rtx (DImode);
2419 temp2 = gen_reg_rtx (DImode);
2420 sparc_emit_set_const64 (temp2, cst);
2421 sparc_emit_set_symbolic_const64 (temp1, XEXP (XEXP (op1, 0), 0),
2422 NULL_RTX);
2423 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp1, temp2)));
2424 return;
2427 if (temp && GET_MODE (temp) == TImode)
2429 ti_temp = temp;
2430 temp = gen_rtx_REG (DImode, REGNO (temp));
2433 /* SPARC-V9 code-model support. */
2434 switch (sparc_cmodel)
2436 case CM_MEDLOW:
2437 /* The range spanned by all instructions in the object is less
2438 than 2^31 bytes (2GB) and the distance from any instruction
2439 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2440 than 2^31 bytes (2GB).
2442 The executable must be in the low 4TB of the virtual address
2443 space.
2445 sethi %hi(symbol), %temp1
2446 or %temp1, %lo(symbol), %reg */
2447 if (temp)
2448 temp1 = temp; /* op0 is allowed. */
2449 else
2450 temp1 = gen_reg_rtx (DImode);
2452 emit_insn (gen_rtx_SET (temp1, gen_rtx_HIGH (DImode, op1)));
2453 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
2454 break;
2456 case CM_MEDMID:
2457 /* The range spanned by all instructions in the object is less
2458 than 2^31 bytes (2GB) and the distance from any instruction
2459 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2460 than 2^31 bytes (2GB).
2462 The executable must be in the low 16TB of the virtual address
2463 space.
2465 sethi %h44(symbol), %temp1
2466 or %temp1, %m44(symbol), %temp2
2467 sllx %temp2, 12, %temp3
2468 or %temp3, %l44(symbol), %reg */
2469 if (temp)
2471 temp1 = op0;
2472 temp2 = op0;
2473 temp3 = temp; /* op0 is allowed. */
2475 else
2477 temp1 = gen_reg_rtx (DImode);
2478 temp2 = gen_reg_rtx (DImode);
2479 temp3 = gen_reg_rtx (DImode);
2482 emit_insn (gen_seth44 (temp1, op1));
2483 emit_insn (gen_setm44 (temp2, temp1, op1));
2484 emit_insn (gen_rtx_SET (temp3,
2485 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
2486 emit_insn (gen_setl44 (op0, temp3, op1));
2487 break;
2489 case CM_MEDANY:
2490 /* The range spanned by all instructions in the object is less
2491 than 2^31 bytes (2GB) and the distance from any instruction
2492 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2493 than 2^31 bytes (2GB).
2495 The executable can be placed anywhere in the virtual address
2496 space.
2498 sethi %hh(symbol), %temp1
2499 sethi %lm(symbol), %temp2
2500 or %temp1, %hm(symbol), %temp3
2501 sllx %temp3, 32, %temp4
2502 or %temp4, %temp2, %temp5
2503 or %temp5, %lo(symbol), %reg */
2504 if (temp)
2506 /* It is possible that one of the registers we got for operands[2]
2507 might coincide with that of operands[0] (which is why we made
2508 it TImode). Pick the other one to use as our scratch. */
2509 if (rtx_equal_p (temp, op0))
2511 gcc_assert (ti_temp);
2512 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2514 temp1 = op0;
2515 temp2 = temp; /* op0 is _not_ allowed, see above. */
2516 temp3 = op0;
2517 temp4 = op0;
2518 temp5 = op0;
2520 else
2522 temp1 = gen_reg_rtx (DImode);
2523 temp2 = gen_reg_rtx (DImode);
2524 temp3 = gen_reg_rtx (DImode);
2525 temp4 = gen_reg_rtx (DImode);
2526 temp5 = gen_reg_rtx (DImode);
2529 emit_insn (gen_sethh (temp1, op1));
2530 emit_insn (gen_setlm (temp2, op1));
2531 emit_insn (gen_sethm (temp3, temp1, op1));
2532 emit_insn (gen_rtx_SET (temp4,
2533 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2534 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2535 emit_insn (gen_setlo (op0, temp5, op1));
2536 break;
2538 case CM_EMBMEDANY:
2539 /* Old old old backwards compatibility kruft here.
2540 Essentially it is MEDLOW with a fixed 64-bit
2541 virtual base added to all data segment addresses.
2542 Text-segment stuff is computed like MEDANY, we can't
2543 reuse the code above because the relocation knobs
2544 look different.
2546 Data segment: sethi %hi(symbol), %temp1
2547 add %temp1, EMBMEDANY_BASE_REG, %temp2
2548 or %temp2, %lo(symbol), %reg */
2549 if (data_segment_operand (op1, GET_MODE (op1)))
2551 if (temp)
2553 temp1 = temp; /* op0 is allowed. */
2554 temp2 = op0;
2556 else
2558 temp1 = gen_reg_rtx (DImode);
2559 temp2 = gen_reg_rtx (DImode);
2562 emit_insn (gen_embmedany_sethi (temp1, op1));
2563 emit_insn (gen_embmedany_brsum (temp2, temp1));
2564 emit_insn (gen_embmedany_losum (op0, temp2, op1));
2567 /* Text segment: sethi %uhi(symbol), %temp1
2568 sethi %hi(symbol), %temp2
2569 or %temp1, %ulo(symbol), %temp3
2570 sllx %temp3, 32, %temp4
2571 or %temp4, %temp2, %temp5
2572 or %temp5, %lo(symbol), %reg */
2573 else
2575 if (temp)
2577 /* It is possible that one of the registers we got for operands[2]
2578 might coincide with that of operands[0] (which is why we made
2579 it TImode). Pick the other one to use as our scratch. */
2580 if (rtx_equal_p (temp, op0))
2582 gcc_assert (ti_temp);
2583 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2585 temp1 = op0;
2586 temp2 = temp; /* op0 is _not_ allowed, see above. */
2587 temp3 = op0;
2588 temp4 = op0;
2589 temp5 = op0;
2591 else
2593 temp1 = gen_reg_rtx (DImode);
2594 temp2 = gen_reg_rtx (DImode);
2595 temp3 = gen_reg_rtx (DImode);
2596 temp4 = gen_reg_rtx (DImode);
2597 temp5 = gen_reg_rtx (DImode);
2600 emit_insn (gen_embmedany_textuhi (temp1, op1));
2601 emit_insn (gen_embmedany_texthi (temp2, op1));
2602 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
2603 emit_insn (gen_rtx_SET (temp4,
2604 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2605 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2606 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
2608 break;
2610 default:
2611 gcc_unreachable ();
2615 /* These avoid problems when cross compiling. If we do not
2616 go through all this hair then the optimizer will see
2617 invalid REG_EQUAL notes or in some cases none at all. */
2618 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
2619 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
2620 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
2621 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
2623 /* The optimizer is not to assume anything about exactly
2624 which bits are set for a HIGH, they are unspecified.
2625 Unfortunately this leads to many missed optimizations
2626 during CSE. We mask out the non-HIGH bits, and matches
2627 a plain movdi, to alleviate this problem. */
2628 static rtx
2629 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
2631 return gen_rtx_SET (dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
2634 static rtx
2635 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
2637 return gen_rtx_SET (dest, GEN_INT (val));
2640 static rtx
2641 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
2643 return gen_rtx_IOR (DImode, src, GEN_INT (val));
2646 static rtx
2647 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
2649 return gen_rtx_XOR (DImode, src, GEN_INT (val));
2652 /* Worker routines for 64-bit constant formation on arch64.
2653 One of the key things to be doing in these emissions is
2654 to create as many temp REGs as possible. This makes it
2655 possible for half-built constants to be used later when
2656 such values are similar to something required later on.
2657 Without doing this, the optimizer cannot see such
2658 opportunities. */
2660 static void sparc_emit_set_const64_quick1 (rtx, rtx,
2661 unsigned HOST_WIDE_INT, int);
2663 static void
2664 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
2665 unsigned HOST_WIDE_INT low_bits, int is_neg)
2667 unsigned HOST_WIDE_INT high_bits;
2669 if (is_neg)
2670 high_bits = (~low_bits) & 0xffffffff;
2671 else
2672 high_bits = low_bits;
2674 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2675 if (!is_neg)
2677 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2679 else
2681 /* If we are XOR'ing with -1, then we should emit a one's complement
2682 instead. This way the combiner will notice logical operations
2683 such as ANDN later on and substitute. */
2684 if ((low_bits & 0x3ff) == 0x3ff)
2686 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
2688 else
2690 emit_insn (gen_rtx_SET (op0,
2691 gen_safe_XOR64 (temp,
2692 (-(HOST_WIDE_INT)0x400
2693 | (low_bits & 0x3ff)))));
2698 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
2699 unsigned HOST_WIDE_INT, int);
2701 static void
2702 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
2703 unsigned HOST_WIDE_INT high_bits,
2704 unsigned HOST_WIDE_INT low_immediate,
2705 int shift_count)
2707 rtx temp2 = op0;
2709 if ((high_bits & 0xfffffc00) != 0)
2711 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2712 if ((high_bits & ~0xfffffc00) != 0)
2713 emit_insn (gen_rtx_SET (op0,
2714 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2715 else
2716 temp2 = temp;
2718 else
2720 emit_insn (gen_safe_SET64 (temp, high_bits));
2721 temp2 = temp;
2724 /* Now shift it up into place. */
2725 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp2,
2726 GEN_INT (shift_count))));
2728 /* If there is a low immediate part piece, finish up by
2729 putting that in as well. */
2730 if (low_immediate != 0)
2731 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (op0, low_immediate)));
2734 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
2735 unsigned HOST_WIDE_INT);
2737 /* Full 64-bit constant decomposition. Even though this is the
2738 'worst' case, we still optimize a few things away. */
2739 static void
2740 sparc_emit_set_const64_longway (rtx op0, rtx temp,
2741 unsigned HOST_WIDE_INT high_bits,
2742 unsigned HOST_WIDE_INT low_bits)
2744 rtx sub_temp = op0;
2746 if (can_create_pseudo_p ())
2747 sub_temp = gen_reg_rtx (DImode);
2749 if ((high_bits & 0xfffffc00) != 0)
2751 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2752 if ((high_bits & ~0xfffffc00) != 0)
2753 emit_insn (gen_rtx_SET (sub_temp,
2754 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2755 else
2756 sub_temp = temp;
2758 else
2760 emit_insn (gen_safe_SET64 (temp, high_bits));
2761 sub_temp = temp;
2764 if (can_create_pseudo_p ())
2766 rtx temp2 = gen_reg_rtx (DImode);
2767 rtx temp3 = gen_reg_rtx (DImode);
2768 rtx temp4 = gen_reg_rtx (DImode);
2770 emit_insn (gen_rtx_SET (temp4, gen_rtx_ASHIFT (DImode, sub_temp,
2771 GEN_INT (32))));
2773 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
2774 if ((low_bits & ~0xfffffc00) != 0)
2776 emit_insn (gen_rtx_SET (temp3,
2777 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
2778 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp3)));
2780 else
2782 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp2)));
2785 else
2787 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
2788 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
2789 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
2790 int to_shift = 12;
2792 /* We are in the middle of reload, so this is really
2793 painful. However we do still make an attempt to
2794 avoid emitting truly stupid code. */
2795 if (low1 != const0_rtx)
2797 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2798 GEN_INT (to_shift))));
2799 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low1)));
2800 sub_temp = op0;
2801 to_shift = 12;
2803 else
2805 to_shift += 12;
2807 if (low2 != const0_rtx)
2809 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2810 GEN_INT (to_shift))));
2811 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low2)));
2812 sub_temp = op0;
2813 to_shift = 8;
2815 else
2817 to_shift += 8;
2819 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2820 GEN_INT (to_shift))));
2821 if (low3 != const0_rtx)
2822 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low3)));
2823 /* phew... */
2827 /* Analyze a 64-bit constant for certain properties. */
2828 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
2829 unsigned HOST_WIDE_INT,
2830 int *, int *, int *);
2832 static void
2833 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
2834 unsigned HOST_WIDE_INT low_bits,
2835 int *hbsp, int *lbsp, int *abbasp)
2837 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
2838 int i;
2840 lowest_bit_set = highest_bit_set = -1;
2841 i = 0;
2844 if ((lowest_bit_set == -1)
2845 && ((low_bits >> i) & 1))
2846 lowest_bit_set = i;
2847 if ((highest_bit_set == -1)
2848 && ((high_bits >> (32 - i - 1)) & 1))
2849 highest_bit_set = (64 - i - 1);
2851 while (++i < 32
2852 && ((highest_bit_set == -1)
2853 || (lowest_bit_set == -1)));
2854 if (i == 32)
2856 i = 0;
2859 if ((lowest_bit_set == -1)
2860 && ((high_bits >> i) & 1))
2861 lowest_bit_set = i + 32;
2862 if ((highest_bit_set == -1)
2863 && ((low_bits >> (32 - i - 1)) & 1))
2864 highest_bit_set = 32 - i - 1;
2866 while (++i < 32
2867 && ((highest_bit_set == -1)
2868 || (lowest_bit_set == -1)));
2870 /* If there are no bits set this should have gone out
2871 as one instruction! */
2872 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
2873 all_bits_between_are_set = 1;
2874 for (i = lowest_bit_set; i <= highest_bit_set; i++)
2876 if (i < 32)
2878 if ((low_bits & (1 << i)) != 0)
2879 continue;
2881 else
2883 if ((high_bits & (1 << (i - 32))) != 0)
2884 continue;
2886 all_bits_between_are_set = 0;
2887 break;
2889 *hbsp = highest_bit_set;
2890 *lbsp = lowest_bit_set;
2891 *abbasp = all_bits_between_are_set;
2894 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
2896 static int
2897 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
2898 unsigned HOST_WIDE_INT low_bits)
2900 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
2902 if (high_bits == 0
2903 || high_bits == 0xffffffff)
2904 return 1;
2906 analyze_64bit_constant (high_bits, low_bits,
2907 &highest_bit_set, &lowest_bit_set,
2908 &all_bits_between_are_set);
2910 if ((highest_bit_set == 63
2911 || lowest_bit_set == 0)
2912 && all_bits_between_are_set != 0)
2913 return 1;
2915 if ((highest_bit_set - lowest_bit_set) < 21)
2916 return 1;
2918 return 0;
2921 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2922 unsigned HOST_WIDE_INT,
2923 int, int);
2925 static unsigned HOST_WIDE_INT
2926 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
2927 unsigned HOST_WIDE_INT low_bits,
2928 int lowest_bit_set, int shift)
2930 HOST_WIDE_INT hi, lo;
2932 if (lowest_bit_set < 32)
2934 lo = (low_bits >> lowest_bit_set) << shift;
2935 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
2937 else
2939 lo = 0;
2940 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2942 gcc_assert (! (hi & lo));
2943 return (hi | lo);
2946 /* Here we are sure to be arch64 and this is an integer constant
2947 being loaded into a register. Emit the most efficient
2948 insn sequence possible. Detection of all the 1-insn cases
2949 has been done already. */
2950 static void
2951 sparc_emit_set_const64 (rtx op0, rtx op1)
2953 unsigned HOST_WIDE_INT high_bits, low_bits;
2954 int lowest_bit_set, highest_bit_set;
2955 int all_bits_between_are_set;
2956 rtx temp = 0;
2958 /* Sanity check that we know what we are working with. */
2959 gcc_assert (TARGET_ARCH64
2960 && (GET_CODE (op0) == SUBREG
2961 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
2963 if (! can_create_pseudo_p ())
2964 temp = op0;
2966 if (GET_CODE (op1) != CONST_INT)
2968 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2969 return;
2972 if (! temp)
2973 temp = gen_reg_rtx (DImode);
2975 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
2976 low_bits = (INTVAL (op1) & 0xffffffff);
2978 /* low_bits bits 0 --> 31
2979 high_bits bits 32 --> 63 */
2981 analyze_64bit_constant (high_bits, low_bits,
2982 &highest_bit_set, &lowest_bit_set,
2983 &all_bits_between_are_set);
2985 /* First try for a 2-insn sequence. */
2987 /* These situations are preferred because the optimizer can
2988 * do more things with them:
2989 * 1) mov -1, %reg
2990 * sllx %reg, shift, %reg
2991 * 2) mov -1, %reg
2992 * srlx %reg, shift, %reg
2993 * 3) mov some_small_const, %reg
2994 * sllx %reg, shift, %reg
2996 if (((highest_bit_set == 63
2997 || lowest_bit_set == 0)
2998 && all_bits_between_are_set != 0)
2999 || ((highest_bit_set - lowest_bit_set) < 12))
3001 HOST_WIDE_INT the_const = -1;
3002 int shift = lowest_bit_set;
3004 if ((highest_bit_set != 63
3005 && lowest_bit_set != 0)
3006 || all_bits_between_are_set == 0)
3008 the_const =
3009 create_simple_focus_bits (high_bits, low_bits,
3010 lowest_bit_set, 0);
3012 else if (lowest_bit_set == 0)
3013 shift = -(63 - highest_bit_set);
3015 gcc_assert (SPARC_SIMM13_P (the_const));
3016 gcc_assert (shift != 0);
3018 emit_insn (gen_safe_SET64 (temp, the_const));
3019 if (shift > 0)
3020 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp,
3021 GEN_INT (shift))));
3022 else if (shift < 0)
3023 emit_insn (gen_rtx_SET (op0, gen_rtx_LSHIFTRT (DImode, temp,
3024 GEN_INT (-shift))));
3025 return;
3028 /* Now a range of 22 or less bits set somewhere.
3029 * 1) sethi %hi(focus_bits), %reg
3030 * sllx %reg, shift, %reg
3031 * 2) sethi %hi(focus_bits), %reg
3032 * srlx %reg, shift, %reg
3034 if ((highest_bit_set - lowest_bit_set) < 21)
3036 unsigned HOST_WIDE_INT focus_bits =
3037 create_simple_focus_bits (high_bits, low_bits,
3038 lowest_bit_set, 10);
3040 gcc_assert (SPARC_SETHI_P (focus_bits));
3041 gcc_assert (lowest_bit_set != 10);
3043 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
3045 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
3046 if (lowest_bit_set < 10)
3047 emit_insn (gen_rtx_SET (op0,
3048 gen_rtx_LSHIFTRT (DImode, temp,
3049 GEN_INT (10 - lowest_bit_set))));
3050 else if (lowest_bit_set > 10)
3051 emit_insn (gen_rtx_SET (op0,
3052 gen_rtx_ASHIFT (DImode, temp,
3053 GEN_INT (lowest_bit_set - 10))));
3054 return;
3057 /* 1) sethi %hi(low_bits), %reg
3058 * or %reg, %lo(low_bits), %reg
3059 * 2) sethi %hi(~low_bits), %reg
3060 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
3062 if (high_bits == 0
3063 || high_bits == 0xffffffff)
3065 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
3066 (high_bits == 0xffffffff));
3067 return;
3070 /* Now, try 3-insn sequences. */
3072 /* 1) sethi %hi(high_bits), %reg
3073 * or %reg, %lo(high_bits), %reg
3074 * sllx %reg, 32, %reg
3076 if (low_bits == 0)
3078 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
3079 return;
3082 /* We may be able to do something quick
3083 when the constant is negated, so try that. */
3084 if (const64_is_2insns ((~high_bits) & 0xffffffff,
3085 (~low_bits) & 0xfffffc00))
3087 /* NOTE: The trailing bits get XOR'd so we need the
3088 non-negated bits, not the negated ones. */
3089 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
3091 if ((((~high_bits) & 0xffffffff) == 0
3092 && ((~low_bits) & 0x80000000) == 0)
3093 || (((~high_bits) & 0xffffffff) == 0xffffffff
3094 && ((~low_bits) & 0x80000000) != 0))
3096 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
3098 if ((SPARC_SETHI_P (fast_int)
3099 && (~high_bits & 0xffffffff) == 0)
3100 || SPARC_SIMM13_P (fast_int))
3101 emit_insn (gen_safe_SET64 (temp, fast_int));
3102 else
3103 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
3105 else
3107 rtx negated_const;
3108 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
3109 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
3110 sparc_emit_set_const64 (temp, negated_const);
3113 /* If we are XOR'ing with -1, then we should emit a one's complement
3114 instead. This way the combiner will notice logical operations
3115 such as ANDN later on and substitute. */
3116 if (trailing_bits == 0x3ff)
3118 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
3120 else
3122 emit_insn (gen_rtx_SET (op0,
3123 gen_safe_XOR64 (temp,
3124 (-0x400 | trailing_bits))));
3126 return;
3129 /* 1) sethi %hi(xxx), %reg
3130 * or %reg, %lo(xxx), %reg
3131 * sllx %reg, yyy, %reg
3133 * ??? This is just a generalized version of the low_bits==0
3134 * thing above, FIXME...
3136 if ((highest_bit_set - lowest_bit_set) < 32)
3138 unsigned HOST_WIDE_INT focus_bits =
3139 create_simple_focus_bits (high_bits, low_bits,
3140 lowest_bit_set, 0);
3142 /* We can't get here in this state. */
3143 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
3145 /* So what we know is that the set bits straddle the
3146 middle of the 64-bit word. */
3147 sparc_emit_set_const64_quick2 (op0, temp,
3148 focus_bits, 0,
3149 lowest_bit_set);
3150 return;
3153 /* 1) sethi %hi(high_bits), %reg
3154 * or %reg, %lo(high_bits), %reg
3155 * sllx %reg, 32, %reg
3156 * or %reg, low_bits, %reg
3158 if (SPARC_SIMM13_P (low_bits) && ((int)low_bits > 0))
3160 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
3161 return;
3164 /* The easiest way when all else fails, is full decomposition. */
3165 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
3168 /* Implement TARGET_FIXED_CONDITION_CODE_REGS. */
3170 static bool
3171 sparc_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
3173 *p1 = SPARC_ICC_REG;
3174 *p2 = SPARC_FCC_REG;
3175 return true;
3178 /* Implement TARGET_MIN_ARITHMETIC_PRECISION. */
3180 static unsigned int
3181 sparc_min_arithmetic_precision (void)
3183 return 32;
3186 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
3187 return the mode to be used for the comparison. For floating-point,
3188 CCFP[E]mode is used. CCNZmode should be used when the first operand
3189 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
3190 processing is needed. */
3192 machine_mode
3193 select_cc_mode (enum rtx_code op, rtx x, rtx y)
3195 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3197 switch (op)
3199 case EQ:
3200 case NE:
3201 case UNORDERED:
3202 case ORDERED:
3203 case UNLT:
3204 case UNLE:
3205 case UNGT:
3206 case UNGE:
3207 case UNEQ:
3208 case LTGT:
3209 return CCFPmode;
3211 case LT:
3212 case LE:
3213 case GT:
3214 case GE:
3215 return CCFPEmode;
3217 default:
3218 gcc_unreachable ();
3221 else if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
3222 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
3223 && y == const0_rtx)
3225 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3226 return CCXNZmode;
3227 else
3228 return CCNZmode;
3230 else
3232 /* This is for the cmp<mode>_sne pattern. */
3233 if (GET_CODE (x) == NOT && y == constm1_rtx)
3235 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3236 return CCXCmode;
3237 else
3238 return CCCmode;
3241 /* This is for the [u]addvdi4_sp32 and [u]subvdi4_sp32 patterns. */
3242 if (!TARGET_ARCH64 && GET_MODE (x) == DImode)
3244 if (GET_CODE (y) == UNSPEC
3245 && (XINT (y, 1) == UNSPEC_ADDV
3246 || XINT (y, 1) == UNSPEC_SUBV
3247 || XINT (y, 1) == UNSPEC_NEGV))
3248 return CCVmode;
3249 else
3250 return CCCmode;
3253 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3254 return CCXmode;
3255 else
3256 return CCmode;
3260 /* Emit the compare insn and return the CC reg for a CODE comparison
3261 with operands X and Y. */
3263 static rtx
3264 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
3266 machine_mode mode;
3267 rtx cc_reg;
3269 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
3270 return x;
3272 mode = SELECT_CC_MODE (code, x, y);
3274 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
3275 fcc regs (cse can't tell they're really call clobbered regs and will
3276 remove a duplicate comparison even if there is an intervening function
3277 call - it will then try to reload the cc reg via an int reg which is why
3278 we need the movcc patterns). It is possible to provide the movcc
3279 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
3280 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
3281 to tell cse that CCFPE mode registers (even pseudos) are call
3282 clobbered. */
3284 /* ??? This is an experiment. Rather than making changes to cse which may
3285 or may not be easy/clean, we do our own cse. This is possible because
3286 we will generate hard registers. Cse knows they're call clobbered (it
3287 doesn't know the same thing about pseudos). If we guess wrong, no big
3288 deal, but if we win, great! */
3290 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3291 #if 1 /* experiment */
3293 int reg;
3294 /* We cycle through the registers to ensure they're all exercised. */
3295 static int next_fcc_reg = 0;
3296 /* Previous x,y for each fcc reg. */
3297 static rtx prev_args[4][2];
3299 /* Scan prev_args for x,y. */
3300 for (reg = 0; reg < 4; reg++)
3301 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
3302 break;
3303 if (reg == 4)
3305 reg = next_fcc_reg;
3306 prev_args[reg][0] = x;
3307 prev_args[reg][1] = y;
3308 next_fcc_reg = (next_fcc_reg + 1) & 3;
3310 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
3312 #else
3313 cc_reg = gen_reg_rtx (mode);
3314 #endif /* ! experiment */
3315 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3316 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
3317 else
3318 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
3320 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
3321 will only result in an unrecognizable insn so no point in asserting. */
3322 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_COMPARE (mode, x, y)));
3324 return cc_reg;
3328 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
3331 gen_compare_reg (rtx cmp)
3333 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
3336 /* This function is used for v9 only.
3337 DEST is the target of the Scc insn.
3338 CODE is the code for an Scc's comparison.
3339 X and Y are the values we compare.
3341 This function is needed to turn
3343 (set (reg:SI 110)
3344 (gt (reg:CCX 100 %icc)
3345 (const_int 0)))
3346 into
3347 (set (reg:SI 110)
3348 (gt:DI (reg:CCX 100 %icc)
3349 (const_int 0)))
3351 IE: The instruction recognizer needs to see the mode of the comparison to
3352 find the right instruction. We could use "gt:DI" right in the
3353 define_expand, but leaving it out allows us to handle DI, SI, etc. */
3355 static int
3356 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
3358 if (! TARGET_ARCH64
3359 && (GET_MODE (x) == DImode
3360 || GET_MODE (dest) == DImode))
3361 return 0;
3363 /* Try to use the movrCC insns. */
3364 if (TARGET_ARCH64
3365 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
3366 && y == const0_rtx
3367 && v9_regcmp_p (compare_code))
3369 rtx op0 = x;
3370 rtx temp;
3372 /* Special case for op0 != 0. This can be done with one instruction if
3373 dest == x. */
3375 if (compare_code == NE
3376 && GET_MODE (dest) == DImode
3377 && rtx_equal_p (op0, dest))
3379 emit_insn (gen_rtx_SET (dest,
3380 gen_rtx_IF_THEN_ELSE (DImode,
3381 gen_rtx_fmt_ee (compare_code, DImode,
3382 op0, const0_rtx),
3383 const1_rtx,
3384 dest)));
3385 return 1;
3388 if (reg_overlap_mentioned_p (dest, op0))
3390 /* Handle the case where dest == x.
3391 We "early clobber" the result. */
3392 op0 = gen_reg_rtx (GET_MODE (x));
3393 emit_move_insn (op0, x);
3396 emit_insn (gen_rtx_SET (dest, const0_rtx));
3397 if (GET_MODE (op0) != DImode)
3399 temp = gen_reg_rtx (DImode);
3400 convert_move (temp, op0, 0);
3402 else
3403 temp = op0;
3404 emit_insn (gen_rtx_SET (dest,
3405 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3406 gen_rtx_fmt_ee (compare_code, DImode,
3407 temp, const0_rtx),
3408 const1_rtx,
3409 dest)));
3410 return 1;
3412 else
3414 x = gen_compare_reg_1 (compare_code, x, y);
3415 y = const0_rtx;
3417 emit_insn (gen_rtx_SET (dest, const0_rtx));
3418 emit_insn (gen_rtx_SET (dest,
3419 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3420 gen_rtx_fmt_ee (compare_code,
3421 GET_MODE (x), x, y),
3422 const1_rtx, dest)));
3423 return 1;
3428 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
3429 without jumps using the addx/subx instructions. */
3431 bool
3432 emit_scc_insn (rtx operands[])
3434 rtx tem, x, y;
3435 enum rtx_code code;
3436 machine_mode mode;
3438 /* The quad-word fp compare library routines all return nonzero to indicate
3439 true, which is different from the equivalent libgcc routines, so we must
3440 handle them specially here. */
3441 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
3443 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
3444 GET_CODE (operands[1]));
3445 operands[2] = XEXP (operands[1], 0);
3446 operands[3] = XEXP (operands[1], 1);
3449 code = GET_CODE (operands[1]);
3450 x = operands[2];
3451 y = operands[3];
3452 mode = GET_MODE (x);
3454 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
3455 more applications). The exception to this is "reg != 0" which can
3456 be done in one instruction on v9 (so we do it). */
3457 if ((code == EQ || code == NE) && (mode == SImode || mode == DImode))
3459 if (y != const0_rtx)
3460 x = force_reg (mode, gen_rtx_XOR (mode, x, y));
3462 rtx pat = gen_rtx_SET (operands[0],
3463 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3464 x, const0_rtx));
3466 /* If we can use addx/subx or addxc, add a clobber for CC. */
3467 if (mode == SImode || (code == NE && TARGET_VIS3))
3469 rtx clobber
3470 = gen_rtx_CLOBBER (VOIDmode,
3471 gen_rtx_REG (mode == SImode ? CCmode : CCXmode,
3472 SPARC_ICC_REG));
3473 pat = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, pat, clobber));
3476 emit_insn (pat);
3477 return true;
3480 /* We can do LTU in DImode using the addxc instruction with VIS3. */
3481 if (TARGET_ARCH64
3482 && mode == DImode
3483 && !((code == LTU || code == GTU) && TARGET_VIS3)
3484 && gen_v9_scc (operands[0], code, x, y))
3485 return true;
3487 /* We can do LTU and GEU using the addx/subx instructions too. And
3488 for GTU/LEU, if both operands are registers swap them and fall
3489 back to the easy case. */
3490 if (code == GTU || code == LEU)
3492 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
3493 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
3495 tem = x;
3496 x = y;
3497 y = tem;
3498 code = swap_condition (code);
3502 if (code == LTU || code == GEU)
3504 emit_insn (gen_rtx_SET (operands[0],
3505 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3506 gen_compare_reg_1 (code, x, y),
3507 const0_rtx)));
3508 return true;
3511 /* All the posibilities to use addx/subx based sequences has been
3512 exhausted, try for a 3 instruction sequence using v9 conditional
3513 moves. */
3514 if (TARGET_V9 && gen_v9_scc (operands[0], code, x, y))
3515 return true;
3517 /* Nope, do branches. */
3518 return false;
3521 /* Emit a conditional jump insn for the v9 architecture using comparison code
3522 CODE and jump target LABEL.
3523 This function exists to take advantage of the v9 brxx insns. */
3525 static void
3526 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
3528 emit_jump_insn (gen_rtx_SET (pc_rtx,
3529 gen_rtx_IF_THEN_ELSE (VOIDmode,
3530 gen_rtx_fmt_ee (code, GET_MODE (op0),
3531 op0, const0_rtx),
3532 gen_rtx_LABEL_REF (VOIDmode, label),
3533 pc_rtx)));
3536 /* Emit a conditional jump insn for the UA2011 architecture using
3537 comparison code CODE and jump target LABEL. This function exists
3538 to take advantage of the UA2011 Compare and Branch insns. */
3540 static void
3541 emit_cbcond_insn (enum rtx_code code, rtx op0, rtx op1, rtx label)
3543 rtx if_then_else;
3545 if_then_else = gen_rtx_IF_THEN_ELSE (VOIDmode,
3546 gen_rtx_fmt_ee(code, GET_MODE(op0),
3547 op0, op1),
3548 gen_rtx_LABEL_REF (VOIDmode, label),
3549 pc_rtx);
3551 emit_jump_insn (gen_rtx_SET (pc_rtx, if_then_else));
3554 void
3555 emit_conditional_branch_insn (rtx operands[])
3557 /* The quad-word fp compare library routines all return nonzero to indicate
3558 true, which is different from the equivalent libgcc routines, so we must
3559 handle them specially here. */
3560 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
3562 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
3563 GET_CODE (operands[0]));
3564 operands[1] = XEXP (operands[0], 0);
3565 operands[2] = XEXP (operands[0], 1);
3568 /* If we can tell early on that the comparison is against a constant
3569 that won't fit in the 5-bit signed immediate field of a cbcond,
3570 use one of the other v9 conditional branch sequences. */
3571 if (TARGET_CBCOND
3572 && GET_CODE (operands[1]) == REG
3573 && (GET_MODE (operands[1]) == SImode
3574 || (TARGET_ARCH64 && GET_MODE (operands[1]) == DImode))
3575 && (GET_CODE (operands[2]) != CONST_INT
3576 || SPARC_SIMM5_P (INTVAL (operands[2]))))
3578 emit_cbcond_insn (GET_CODE (operands[0]), operands[1], operands[2], operands[3]);
3579 return;
3582 if (TARGET_ARCH64 && operands[2] == const0_rtx
3583 && GET_CODE (operands[1]) == REG
3584 && GET_MODE (operands[1]) == DImode)
3586 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
3587 return;
3590 operands[1] = gen_compare_reg (operands[0]);
3591 operands[2] = const0_rtx;
3592 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
3593 operands[1], operands[2]);
3594 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
3595 operands[3]));
3599 /* Generate a DFmode part of a hard TFmode register.
3600 REG is the TFmode hard register, LOW is 1 for the
3601 low 64bit of the register and 0 otherwise.
3604 gen_df_reg (rtx reg, int low)
3606 int regno = REGNO (reg);
3608 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
3609 regno += (TARGET_ARCH64 && SPARC_INT_REG_P (regno)) ? 1 : 2;
3610 return gen_rtx_REG (DFmode, regno);
3613 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
3614 Unlike normal calls, TFmode operands are passed by reference. It is
3615 assumed that no more than 3 operands are required. */
3617 static void
3618 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
3620 rtx ret_slot = NULL, arg[3], func_sym;
3621 int i;
3623 /* We only expect to be called for conversions, unary, and binary ops. */
3624 gcc_assert (nargs == 2 || nargs == 3);
3626 for (i = 0; i < nargs; ++i)
3628 rtx this_arg = operands[i];
3629 rtx this_slot;
3631 /* TFmode arguments and return values are passed by reference. */
3632 if (GET_MODE (this_arg) == TFmode)
3634 int force_stack_temp;
3636 force_stack_temp = 0;
3637 if (TARGET_BUGGY_QP_LIB && i == 0)
3638 force_stack_temp = 1;
3640 if (GET_CODE (this_arg) == MEM
3641 && ! force_stack_temp)
3643 tree expr = MEM_EXPR (this_arg);
3644 if (expr)
3645 mark_addressable (expr);
3646 this_arg = XEXP (this_arg, 0);
3648 else if (CONSTANT_P (this_arg)
3649 && ! force_stack_temp)
3651 this_slot = force_const_mem (TFmode, this_arg);
3652 this_arg = XEXP (this_slot, 0);
3654 else
3656 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode));
3658 /* Operand 0 is the return value. We'll copy it out later. */
3659 if (i > 0)
3660 emit_move_insn (this_slot, this_arg);
3661 else
3662 ret_slot = this_slot;
3664 this_arg = XEXP (this_slot, 0);
3668 arg[i] = this_arg;
3671 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
3673 if (GET_MODE (operands[0]) == TFmode)
3675 if (nargs == 2)
3676 emit_library_call (func_sym, LCT_NORMAL, VOIDmode,
3677 arg[0], GET_MODE (arg[0]),
3678 arg[1], GET_MODE (arg[1]));
3679 else
3680 emit_library_call (func_sym, LCT_NORMAL, VOIDmode,
3681 arg[0], GET_MODE (arg[0]),
3682 arg[1], GET_MODE (arg[1]),
3683 arg[2], GET_MODE (arg[2]));
3685 if (ret_slot)
3686 emit_move_insn (operands[0], ret_slot);
3688 else
3690 rtx ret;
3692 gcc_assert (nargs == 2);
3694 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
3695 GET_MODE (operands[0]),
3696 arg[1], GET_MODE (arg[1]));
3698 if (ret != operands[0])
3699 emit_move_insn (operands[0], ret);
3703 /* Expand soft-float TFmode calls to sparc abi routines. */
3705 static void
3706 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
3708 const char *func;
3710 switch (code)
3712 case PLUS:
3713 func = "_Qp_add";
3714 break;
3715 case MINUS:
3716 func = "_Qp_sub";
3717 break;
3718 case MULT:
3719 func = "_Qp_mul";
3720 break;
3721 case DIV:
3722 func = "_Qp_div";
3723 break;
3724 default:
3725 gcc_unreachable ();
3728 emit_soft_tfmode_libcall (func, 3, operands);
3731 static void
3732 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
3734 const char *func;
3736 gcc_assert (code == SQRT);
3737 func = "_Qp_sqrt";
3739 emit_soft_tfmode_libcall (func, 2, operands);
3742 static void
3743 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
3745 const char *func;
3747 switch (code)
3749 case FLOAT_EXTEND:
3750 switch (GET_MODE (operands[1]))
3752 case E_SFmode:
3753 func = "_Qp_stoq";
3754 break;
3755 case E_DFmode:
3756 func = "_Qp_dtoq";
3757 break;
3758 default:
3759 gcc_unreachable ();
3761 break;
3763 case FLOAT_TRUNCATE:
3764 switch (GET_MODE (operands[0]))
3766 case E_SFmode:
3767 func = "_Qp_qtos";
3768 break;
3769 case E_DFmode:
3770 func = "_Qp_qtod";
3771 break;
3772 default:
3773 gcc_unreachable ();
3775 break;
3777 case FLOAT:
3778 switch (GET_MODE (operands[1]))
3780 case E_SImode:
3781 func = "_Qp_itoq";
3782 if (TARGET_ARCH64)
3783 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
3784 break;
3785 case E_DImode:
3786 func = "_Qp_xtoq";
3787 break;
3788 default:
3789 gcc_unreachable ();
3791 break;
3793 case UNSIGNED_FLOAT:
3794 switch (GET_MODE (operands[1]))
3796 case E_SImode:
3797 func = "_Qp_uitoq";
3798 if (TARGET_ARCH64)
3799 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
3800 break;
3801 case E_DImode:
3802 func = "_Qp_uxtoq";
3803 break;
3804 default:
3805 gcc_unreachable ();
3807 break;
3809 case FIX:
3810 switch (GET_MODE (operands[0]))
3812 case E_SImode:
3813 func = "_Qp_qtoi";
3814 break;
3815 case E_DImode:
3816 func = "_Qp_qtox";
3817 break;
3818 default:
3819 gcc_unreachable ();
3821 break;
3823 case UNSIGNED_FIX:
3824 switch (GET_MODE (operands[0]))
3826 case E_SImode:
3827 func = "_Qp_qtoui";
3828 break;
3829 case E_DImode:
3830 func = "_Qp_qtoux";
3831 break;
3832 default:
3833 gcc_unreachable ();
3835 break;
3837 default:
3838 gcc_unreachable ();
3841 emit_soft_tfmode_libcall (func, 2, operands);
3844 /* Expand a hard-float tfmode operation. All arguments must be in
3845 registers. */
3847 static void
3848 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
3850 rtx op, dest;
3852 if (GET_RTX_CLASS (code) == RTX_UNARY)
3854 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3855 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
3857 else
3859 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3860 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
3861 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3862 operands[1], operands[2]);
3865 if (register_operand (operands[0], VOIDmode))
3866 dest = operands[0];
3867 else
3868 dest = gen_reg_rtx (GET_MODE (operands[0]));
3870 emit_insn (gen_rtx_SET (dest, op));
3872 if (dest != operands[0])
3873 emit_move_insn (operands[0], dest);
3876 void
3877 emit_tfmode_binop (enum rtx_code code, rtx *operands)
3879 if (TARGET_HARD_QUAD)
3880 emit_hard_tfmode_operation (code, operands);
3881 else
3882 emit_soft_tfmode_binop (code, operands);
3885 void
3886 emit_tfmode_unop (enum rtx_code code, rtx *operands)
3888 if (TARGET_HARD_QUAD)
3889 emit_hard_tfmode_operation (code, operands);
3890 else
3891 emit_soft_tfmode_unop (code, operands);
3894 void
3895 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
3897 if (TARGET_HARD_QUAD)
3898 emit_hard_tfmode_operation (code, operands);
3899 else
3900 emit_soft_tfmode_cvt (code, operands);
3903 /* Return nonzero if a branch/jump/call instruction will be emitting
3904 nop into its delay slot. */
3907 empty_delay_slot (rtx_insn *insn)
3909 rtx seq;
3911 /* If no previous instruction (should not happen), return true. */
3912 if (PREV_INSN (insn) == NULL)
3913 return 1;
3915 seq = NEXT_INSN (PREV_INSN (insn));
3916 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
3917 return 0;
3919 return 1;
3922 /* Return nonzero if we should emit a nop after a cbcond instruction.
3923 The cbcond instruction does not have a delay slot, however there is
3924 a severe performance penalty if a control transfer appears right
3925 after a cbcond. Therefore we emit a nop when we detect this
3926 situation. */
3929 emit_cbcond_nop (rtx_insn *insn)
3931 rtx next = next_active_insn (insn);
3933 if (!next)
3934 return 1;
3936 if (NONJUMP_INSN_P (next)
3937 && GET_CODE (PATTERN (next)) == SEQUENCE)
3938 next = XVECEXP (PATTERN (next), 0, 0);
3939 else if (CALL_P (next)
3940 && GET_CODE (PATTERN (next)) == PARALLEL)
3942 rtx delay = XVECEXP (PATTERN (next), 0, 1);
3944 if (GET_CODE (delay) == RETURN)
3946 /* It's a sibling call. Do not emit the nop if we're going
3947 to emit something other than the jump itself as the first
3948 instruction of the sibcall sequence. */
3949 if (sparc_leaf_function_p || TARGET_FLAT)
3950 return 0;
3954 if (NONJUMP_INSN_P (next))
3955 return 0;
3957 return 1;
3960 /* Return nonzero if TRIAL can go into the call delay slot. */
3963 eligible_for_call_delay (rtx_insn *trial)
3965 rtx pat;
3967 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
3968 return 0;
3970 /* Binutils allows
3971 call __tls_get_addr, %tgd_call (foo)
3972 add %l7, %o0, %o0, %tgd_add (foo)
3973 while Sun as/ld does not. */
3974 if (TARGET_GNU_TLS || !TARGET_TLS)
3975 return 1;
3977 pat = PATTERN (trial);
3979 /* We must reject tgd_add{32|64}, i.e.
3980 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
3981 and tldm_add{32|64}, i.e.
3982 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
3983 for Sun as/ld. */
3984 if (GET_CODE (pat) == SET
3985 && GET_CODE (SET_SRC (pat)) == PLUS)
3987 rtx unspec = XEXP (SET_SRC (pat), 1);
3989 if (GET_CODE (unspec) == UNSPEC
3990 && (XINT (unspec, 1) == UNSPEC_TLSGD
3991 || XINT (unspec, 1) == UNSPEC_TLSLDM))
3992 return 0;
3995 return 1;
3998 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
3999 instruction. RETURN_P is true if the v9 variant 'return' is to be
4000 considered in the test too.
4002 TRIAL must be a SET whose destination is a REG appropriate for the
4003 'restore' instruction or, if RETURN_P is true, for the 'return'
4004 instruction. */
4006 static int
4007 eligible_for_restore_insn (rtx trial, bool return_p)
4009 rtx pat = PATTERN (trial);
4010 rtx src = SET_SRC (pat);
4011 bool src_is_freg = false;
4012 rtx src_reg;
4014 /* Since we now can do moves between float and integer registers when
4015 VIS3 is enabled, we have to catch this case. We can allow such
4016 moves when doing a 'return' however. */
4017 src_reg = src;
4018 if (GET_CODE (src_reg) == SUBREG)
4019 src_reg = SUBREG_REG (src_reg);
4020 if (GET_CODE (src_reg) == REG
4021 && SPARC_FP_REG_P (REGNO (src_reg)))
4022 src_is_freg = true;
4024 /* The 'restore src,%g0,dest' pattern for word mode and below. */
4025 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
4026 && arith_operand (src, GET_MODE (src))
4027 && ! src_is_freg)
4029 if (TARGET_ARCH64)
4030 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
4031 else
4032 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
4035 /* The 'restore src,%g0,dest' pattern for double-word mode. */
4036 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
4037 && arith_double_operand (src, GET_MODE (src))
4038 && ! src_is_freg)
4039 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
4041 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
4042 else if (! TARGET_FPU && register_operand (src, SFmode))
4043 return 1;
4045 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
4046 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
4047 return 1;
4049 /* If we have the 'return' instruction, anything that does not use
4050 local or output registers and can go into a delay slot wins. */
4051 else if (return_p && TARGET_V9 && !epilogue_renumber (&pat, 1))
4052 return 1;
4054 /* The 'restore src1,src2,dest' pattern for SImode. */
4055 else if (GET_CODE (src) == PLUS
4056 && register_operand (XEXP (src, 0), SImode)
4057 && arith_operand (XEXP (src, 1), SImode))
4058 return 1;
4060 /* The 'restore src1,src2,dest' pattern for DImode. */
4061 else if (GET_CODE (src) == PLUS
4062 && register_operand (XEXP (src, 0), DImode)
4063 && arith_double_operand (XEXP (src, 1), DImode))
4064 return 1;
4066 /* The 'restore src1,%lo(src2),dest' pattern. */
4067 else if (GET_CODE (src) == LO_SUM
4068 && ! TARGET_CM_MEDMID
4069 && ((register_operand (XEXP (src, 0), SImode)
4070 && immediate_operand (XEXP (src, 1), SImode))
4071 || (TARGET_ARCH64
4072 && register_operand (XEXP (src, 0), DImode)
4073 && immediate_operand (XEXP (src, 1), DImode))))
4074 return 1;
4076 /* The 'restore src,src,dest' pattern. */
4077 else if (GET_CODE (src) == ASHIFT
4078 && (register_operand (XEXP (src, 0), SImode)
4079 || register_operand (XEXP (src, 0), DImode))
4080 && XEXP (src, 1) == const1_rtx)
4081 return 1;
4083 return 0;
4086 /* Return nonzero if TRIAL can go into the function return's delay slot. */
4089 eligible_for_return_delay (rtx_insn *trial)
4091 int regno;
4092 rtx pat;
4094 /* If the function uses __builtin_eh_return, the eh_return machinery
4095 occupies the delay slot. */
4096 if (crtl->calls_eh_return)
4097 return 0;
4099 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
4100 return 0;
4102 /* In the case of a leaf or flat function, anything can go into the slot. */
4103 if (sparc_leaf_function_p || TARGET_FLAT)
4104 return 1;
4106 if (!NONJUMP_INSN_P (trial))
4107 return 0;
4109 pat = PATTERN (trial);
4110 if (GET_CODE (pat) == PARALLEL)
4112 int i;
4114 if (! TARGET_V9)
4115 return 0;
4116 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
4118 rtx expr = XVECEXP (pat, 0, i);
4119 if (GET_CODE (expr) != SET)
4120 return 0;
4121 if (GET_CODE (SET_DEST (expr)) != REG)
4122 return 0;
4123 regno = REGNO (SET_DEST (expr));
4124 if (regno >= 8 && regno < 24)
4125 return 0;
4127 return !epilogue_renumber (&pat, 1);
4130 if (GET_CODE (pat) != SET)
4131 return 0;
4133 if (GET_CODE (SET_DEST (pat)) != REG)
4134 return 0;
4136 regno = REGNO (SET_DEST (pat));
4138 /* Otherwise, only operations which can be done in tandem with
4139 a `restore' or `return' insn can go into the delay slot. */
4140 if (regno >= 8 && regno < 24)
4141 return 0;
4143 /* If this instruction sets up floating point register and we have a return
4144 instruction, it can probably go in. But restore will not work
4145 with FP_REGS. */
4146 if (! SPARC_INT_REG_P (regno))
4147 return TARGET_V9 && !epilogue_renumber (&pat, 1);
4149 return eligible_for_restore_insn (trial, true);
4152 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
4155 eligible_for_sibcall_delay (rtx_insn *trial)
4157 rtx pat;
4159 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
4160 return 0;
4162 if (!NONJUMP_INSN_P (trial))
4163 return 0;
4165 pat = PATTERN (trial);
4167 if (sparc_leaf_function_p || TARGET_FLAT)
4169 /* If the tail call is done using the call instruction,
4170 we have to restore %o7 in the delay slot. */
4171 if (LEAF_SIBCALL_SLOT_RESERVED_P)
4172 return 0;
4174 /* %g1 is used to build the function address */
4175 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
4176 return 0;
4178 return 1;
4181 if (GET_CODE (pat) != SET)
4182 return 0;
4184 /* Otherwise, only operations which can be done in tandem with
4185 a `restore' insn can go into the delay slot. */
4186 if (GET_CODE (SET_DEST (pat)) != REG
4187 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
4188 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat))))
4189 return 0;
4191 /* If it mentions %o7, it can't go in, because sibcall will clobber it
4192 in most cases. */
4193 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
4194 return 0;
4196 return eligible_for_restore_insn (trial, false);
4199 /* Determine if it's legal to put X into the constant pool. This
4200 is not possible if X contains the address of a symbol that is
4201 not constant (TLS) or not known at final link time (PIC). */
4203 static bool
4204 sparc_cannot_force_const_mem (machine_mode mode, rtx x)
4206 switch (GET_CODE (x))
4208 case CONST_INT:
4209 case CONST_WIDE_INT:
4210 case CONST_DOUBLE:
4211 case CONST_VECTOR:
4212 /* Accept all non-symbolic constants. */
4213 return false;
4215 case LABEL_REF:
4216 /* Labels are OK iff we are non-PIC. */
4217 return flag_pic != 0;
4219 case SYMBOL_REF:
4220 /* 'Naked' TLS symbol references are never OK,
4221 non-TLS symbols are OK iff we are non-PIC. */
4222 if (SYMBOL_REF_TLS_MODEL (x))
4223 return true;
4224 else
4225 return flag_pic != 0;
4227 case CONST:
4228 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
4229 case PLUS:
4230 case MINUS:
4231 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
4232 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
4233 case UNSPEC:
4234 return true;
4235 default:
4236 gcc_unreachable ();
4240 /* Global Offset Table support. */
4241 static GTY(()) rtx got_helper_rtx = NULL_RTX;
4242 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
4244 /* Return the SYMBOL_REF for the Global Offset Table. */
4246 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
4248 static rtx
4249 sparc_got (void)
4251 if (!sparc_got_symbol)
4252 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
4254 return sparc_got_symbol;
4257 /* Ensure that we are not using patterns that are not OK with PIC. */
4260 check_pic (int i)
4262 rtx op;
4264 switch (flag_pic)
4266 case 1:
4267 op = recog_data.operand[i];
4268 gcc_assert (GET_CODE (op) != SYMBOL_REF
4269 && (GET_CODE (op) != CONST
4270 || (GET_CODE (XEXP (op, 0)) == MINUS
4271 && XEXP (XEXP (op, 0), 0) == sparc_got ()
4272 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
4273 /* fallthrough */
4274 case 2:
4275 default:
4276 return 1;
4280 /* Return true if X is an address which needs a temporary register when
4281 reloaded while generating PIC code. */
4284 pic_address_needs_scratch (rtx x)
4286 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
4287 if (GET_CODE (x) == CONST
4288 && GET_CODE (XEXP (x, 0)) == PLUS
4289 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
4290 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4291 && !SMALL_INT (XEXP (XEXP (x, 0), 1)))
4292 return 1;
4294 return 0;
4297 /* Determine if a given RTX is a valid constant. We already know this
4298 satisfies CONSTANT_P. */
4300 static bool
4301 sparc_legitimate_constant_p (machine_mode mode, rtx x)
4303 switch (GET_CODE (x))
4305 case CONST:
4306 case SYMBOL_REF:
4307 if (sparc_tls_referenced_p (x))
4308 return false;
4309 break;
4311 case CONST_DOUBLE:
4312 /* Floating point constants are generally not ok.
4313 The only exception is 0.0 and all-ones in VIS. */
4314 if (TARGET_VIS
4315 && SCALAR_FLOAT_MODE_P (mode)
4316 && (const_zero_operand (x, mode)
4317 || const_all_ones_operand (x, mode)))
4318 return true;
4320 return false;
4322 case CONST_VECTOR:
4323 /* Vector constants are generally not ok.
4324 The only exception is 0 or -1 in VIS. */
4325 if (TARGET_VIS
4326 && (const_zero_operand (x, mode)
4327 || const_all_ones_operand (x, mode)))
4328 return true;
4330 return false;
4332 default:
4333 break;
4336 return true;
4339 /* Determine if a given RTX is a valid constant address. */
4341 bool
4342 constant_address_p (rtx x)
4344 switch (GET_CODE (x))
4346 case LABEL_REF:
4347 case CONST_INT:
4348 case HIGH:
4349 return true;
4351 case CONST:
4352 if (flag_pic && pic_address_needs_scratch (x))
4353 return false;
4354 return sparc_legitimate_constant_p (Pmode, x);
4356 case SYMBOL_REF:
4357 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
4359 default:
4360 return false;
4364 /* Nonzero if the constant value X is a legitimate general operand
4365 when generating PIC code. It is given that flag_pic is on and
4366 that X satisfies CONSTANT_P. */
4368 bool
4369 legitimate_pic_operand_p (rtx x)
4371 if (pic_address_needs_scratch (x))
4372 return false;
4373 if (sparc_tls_referenced_p (x))
4374 return false;
4375 return true;
4378 /* Return true if X is a representation of the PIC register. */
4380 static bool
4381 sparc_pic_register_p (rtx x)
4383 if (!REG_P (x) || !pic_offset_table_rtx)
4384 return false;
4386 if (x == pic_offset_table_rtx)
4387 return true;
4389 if (!HARD_REGISTER_P (pic_offset_table_rtx)
4390 && (HARD_REGISTER_P (x) || lra_in_progress)
4391 && ORIGINAL_REGNO (x) == REGNO (pic_offset_table_rtx))
4392 return true;
4394 return false;
4397 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
4398 (CONST_INT_P (X) \
4399 && INTVAL (X) >= -0x1000 \
4400 && INTVAL (X) <= (0x1000 - GET_MODE_SIZE (MODE)))
4402 #define RTX_OK_FOR_OLO10_P(X, MODE) \
4403 (CONST_INT_P (X) \
4404 && INTVAL (X) >= -0x1000 \
4405 && INTVAL (X) <= (0xc00 - GET_MODE_SIZE (MODE)))
4407 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
4409 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
4410 ordinarily. This changes a bit when generating PIC. */
4412 static bool
4413 sparc_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4415 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
4417 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
4418 rs1 = addr;
4419 else if (GET_CODE (addr) == PLUS)
4421 rs1 = XEXP (addr, 0);
4422 rs2 = XEXP (addr, 1);
4424 /* Canonicalize. REG comes first, if there are no regs,
4425 LO_SUM comes first. */
4426 if (!REG_P (rs1)
4427 && GET_CODE (rs1) != SUBREG
4428 && (REG_P (rs2)
4429 || GET_CODE (rs2) == SUBREG
4430 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
4432 rs1 = XEXP (addr, 1);
4433 rs2 = XEXP (addr, 0);
4436 if ((flag_pic == 1
4437 && sparc_pic_register_p (rs1)
4438 && !REG_P (rs2)
4439 && GET_CODE (rs2) != SUBREG
4440 && GET_CODE (rs2) != LO_SUM
4441 && GET_CODE (rs2) != MEM
4442 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
4443 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
4444 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
4445 || ((REG_P (rs1)
4446 || GET_CODE (rs1) == SUBREG)
4447 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
4449 imm1 = rs2;
4450 rs2 = NULL;
4452 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
4453 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
4455 /* We prohibit REG + REG for TFmode when there are no quad move insns
4456 and we consequently need to split. We do this because REG+REG
4457 is not an offsettable address. If we get the situation in reload
4458 where source and destination of a movtf pattern are both MEMs with
4459 REG+REG address, then only one of them gets converted to an
4460 offsettable address. */
4461 if (mode == TFmode
4462 && ! (TARGET_ARCH64 && TARGET_HARD_QUAD))
4463 return 0;
4465 /* Likewise for TImode, but in all cases. */
4466 if (mode == TImode)
4467 return 0;
4469 /* We prohibit REG + REG on ARCH32 if not optimizing for
4470 DFmode/DImode because then mem_min_alignment is likely to be zero
4471 after reload and the forced split would lack a matching splitter
4472 pattern. */
4473 if (TARGET_ARCH32 && !optimize
4474 && (mode == DFmode || mode == DImode))
4475 return 0;
4477 else if (USE_AS_OFFSETABLE_LO10
4478 && GET_CODE (rs1) == LO_SUM
4479 && TARGET_ARCH64
4480 && ! TARGET_CM_MEDMID
4481 && RTX_OK_FOR_OLO10_P (rs2, mode))
4483 rs2 = NULL;
4484 imm1 = XEXP (rs1, 1);
4485 rs1 = XEXP (rs1, 0);
4486 if (!CONSTANT_P (imm1)
4487 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4488 return 0;
4491 else if (GET_CODE (addr) == LO_SUM)
4493 rs1 = XEXP (addr, 0);
4494 imm1 = XEXP (addr, 1);
4496 if (!CONSTANT_P (imm1)
4497 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4498 return 0;
4500 /* We can't allow TFmode in 32-bit mode, because an offset greater
4501 than the alignment (8) may cause the LO_SUM to overflow. */
4502 if (mode == TFmode && TARGET_ARCH32)
4503 return 0;
4505 /* During reload, accept the HIGH+LO_SUM construct generated by
4506 sparc_legitimize_reload_address. */
4507 if (reload_in_progress
4508 && GET_CODE (rs1) == HIGH
4509 && XEXP (rs1, 0) == imm1)
4510 return 1;
4512 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
4513 return 1;
4514 else
4515 return 0;
4517 if (GET_CODE (rs1) == SUBREG)
4518 rs1 = SUBREG_REG (rs1);
4519 if (!REG_P (rs1))
4520 return 0;
4522 if (rs2)
4524 if (GET_CODE (rs2) == SUBREG)
4525 rs2 = SUBREG_REG (rs2);
4526 if (!REG_P (rs2))
4527 return 0;
4530 if (strict)
4532 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
4533 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
4534 return 0;
4536 else
4538 if ((! SPARC_INT_REG_P (REGNO (rs1))
4539 && REGNO (rs1) != FRAME_POINTER_REGNUM
4540 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
4541 || (rs2
4542 && (! SPARC_INT_REG_P (REGNO (rs2))
4543 && REGNO (rs2) != FRAME_POINTER_REGNUM
4544 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
4545 return 0;
4547 return 1;
4550 /* Return the SYMBOL_REF for the tls_get_addr function. */
4552 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
4554 static rtx
4555 sparc_tls_get_addr (void)
4557 if (!sparc_tls_symbol)
4558 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
4560 return sparc_tls_symbol;
4563 /* Return the Global Offset Table to be used in TLS mode. */
4565 static rtx
4566 sparc_tls_got (void)
4568 /* In PIC mode, this is just the PIC offset table. */
4569 if (flag_pic)
4571 crtl->uses_pic_offset_table = 1;
4572 return pic_offset_table_rtx;
4575 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
4576 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
4577 if (TARGET_SUN_TLS && TARGET_ARCH32)
4579 load_got_register ();
4580 return global_offset_table_rtx;
4583 /* In all other cases, we load a new pseudo with the GOT symbol. */
4584 return copy_to_reg (sparc_got ());
4587 /* Return true if X contains a thread-local symbol. */
4589 static bool
4590 sparc_tls_referenced_p (rtx x)
4592 if (!TARGET_HAVE_TLS)
4593 return false;
4595 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
4596 x = XEXP (XEXP (x, 0), 0);
4598 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
4599 return true;
4601 /* That's all we handle in sparc_legitimize_tls_address for now. */
4602 return false;
4605 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4606 this (thread-local) address. */
4608 static rtx
4609 sparc_legitimize_tls_address (rtx addr)
4611 rtx temp1, temp2, temp3, ret, o0, got;
4612 rtx_insn *insn;
4614 gcc_assert (can_create_pseudo_p ());
4616 if (GET_CODE (addr) == SYMBOL_REF)
4617 switch (SYMBOL_REF_TLS_MODEL (addr))
4619 case TLS_MODEL_GLOBAL_DYNAMIC:
4620 start_sequence ();
4621 temp1 = gen_reg_rtx (SImode);
4622 temp2 = gen_reg_rtx (SImode);
4623 ret = gen_reg_rtx (Pmode);
4624 o0 = gen_rtx_REG (Pmode, 8);
4625 got = sparc_tls_got ();
4626 emit_insn (gen_tgd_hi22 (temp1, addr));
4627 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
4628 if (TARGET_ARCH32)
4630 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
4631 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
4632 addr, const1_rtx));
4634 else
4636 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
4637 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
4638 addr, const1_rtx));
4640 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4641 insn = get_insns ();
4642 end_sequence ();
4643 emit_libcall_block (insn, ret, o0, addr);
4644 break;
4646 case TLS_MODEL_LOCAL_DYNAMIC:
4647 start_sequence ();
4648 temp1 = gen_reg_rtx (SImode);
4649 temp2 = gen_reg_rtx (SImode);
4650 temp3 = gen_reg_rtx (Pmode);
4651 ret = gen_reg_rtx (Pmode);
4652 o0 = gen_rtx_REG (Pmode, 8);
4653 got = sparc_tls_got ();
4654 emit_insn (gen_tldm_hi22 (temp1));
4655 emit_insn (gen_tldm_lo10 (temp2, temp1));
4656 if (TARGET_ARCH32)
4658 emit_insn (gen_tldm_add32 (o0, got, temp2));
4659 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
4660 const1_rtx));
4662 else
4664 emit_insn (gen_tldm_add64 (o0, got, temp2));
4665 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
4666 const1_rtx));
4668 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4669 insn = get_insns ();
4670 end_sequence ();
4671 emit_libcall_block (insn, temp3, o0,
4672 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
4673 UNSPEC_TLSLD_BASE));
4674 temp1 = gen_reg_rtx (SImode);
4675 temp2 = gen_reg_rtx (SImode);
4676 emit_insn (gen_tldo_hix22 (temp1, addr));
4677 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
4678 if (TARGET_ARCH32)
4679 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
4680 else
4681 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
4682 break;
4684 case TLS_MODEL_INITIAL_EXEC:
4685 temp1 = gen_reg_rtx (SImode);
4686 temp2 = gen_reg_rtx (SImode);
4687 temp3 = gen_reg_rtx (Pmode);
4688 got = sparc_tls_got ();
4689 emit_insn (gen_tie_hi22 (temp1, addr));
4690 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
4691 if (TARGET_ARCH32)
4692 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
4693 else
4694 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
4695 if (TARGET_SUN_TLS)
4697 ret = gen_reg_rtx (Pmode);
4698 if (TARGET_ARCH32)
4699 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
4700 temp3, addr));
4701 else
4702 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
4703 temp3, addr));
4705 else
4706 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
4707 break;
4709 case TLS_MODEL_LOCAL_EXEC:
4710 temp1 = gen_reg_rtx (Pmode);
4711 temp2 = gen_reg_rtx (Pmode);
4712 if (TARGET_ARCH32)
4714 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
4715 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
4717 else
4719 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
4720 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
4722 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
4723 break;
4725 default:
4726 gcc_unreachable ();
4729 else if (GET_CODE (addr) == CONST)
4731 rtx base, offset;
4733 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
4735 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
4736 offset = XEXP (XEXP (addr, 0), 1);
4738 base = force_operand (base, NULL_RTX);
4739 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
4740 offset = force_reg (Pmode, offset);
4741 ret = gen_rtx_PLUS (Pmode, base, offset);
4744 else
4745 gcc_unreachable (); /* for now ... */
4747 return ret;
4750 /* Legitimize PIC addresses. If the address is already position-independent,
4751 we return ORIG. Newly generated position-independent addresses go into a
4752 reg. This is REG if nonzero, otherwise we allocate register(s) as
4753 necessary. */
4755 static rtx
4756 sparc_legitimize_pic_address (rtx orig, rtx reg)
4758 if (GET_CODE (orig) == SYMBOL_REF
4759 /* See the comment in sparc_expand_move. */
4760 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
4762 bool gotdata_op = false;
4763 rtx pic_ref, address;
4764 rtx_insn *insn;
4766 if (!reg)
4768 gcc_assert (can_create_pseudo_p ());
4769 reg = gen_reg_rtx (Pmode);
4772 if (flag_pic == 2)
4774 /* If not during reload, allocate another temp reg here for loading
4775 in the address, so that these instructions can be optimized
4776 properly. */
4777 rtx temp_reg = can_create_pseudo_p () ? gen_reg_rtx (Pmode) : reg;
4779 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
4780 won't get confused into thinking that these two instructions
4781 are loading in the true address of the symbol. If in the
4782 future a PIC rtx exists, that should be used instead. */
4783 if (TARGET_ARCH64)
4785 emit_insn (gen_movdi_high_pic (temp_reg, orig));
4786 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
4788 else
4790 emit_insn (gen_movsi_high_pic (temp_reg, orig));
4791 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
4794 address = temp_reg;
4795 gotdata_op = true;
4797 else
4798 address = orig;
4800 crtl->uses_pic_offset_table = 1;
4801 if (gotdata_op)
4803 if (TARGET_ARCH64)
4804 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
4805 pic_offset_table_rtx,
4806 address, orig));
4807 else
4808 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
4809 pic_offset_table_rtx,
4810 address, orig));
4812 else
4814 pic_ref
4815 = gen_const_mem (Pmode,
4816 gen_rtx_PLUS (Pmode,
4817 pic_offset_table_rtx, address));
4818 insn = emit_move_insn (reg, pic_ref);
4821 /* Put a REG_EQUAL note on this insn, so that it can be optimized
4822 by loop. */
4823 set_unique_reg_note (insn, REG_EQUAL, orig);
4824 return reg;
4826 else if (GET_CODE (orig) == CONST)
4828 rtx base, offset;
4830 if (GET_CODE (XEXP (orig, 0)) == PLUS
4831 && sparc_pic_register_p (XEXP (XEXP (orig, 0), 0)))
4832 return orig;
4834 if (!reg)
4836 gcc_assert (can_create_pseudo_p ());
4837 reg = gen_reg_rtx (Pmode);
4840 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
4841 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
4842 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
4843 base == reg ? NULL_RTX : reg);
4845 if (GET_CODE (offset) == CONST_INT)
4847 if (SMALL_INT (offset))
4848 return plus_constant (Pmode, base, INTVAL (offset));
4849 else if (can_create_pseudo_p ())
4850 offset = force_reg (Pmode, offset);
4851 else
4852 /* If we reach here, then something is seriously wrong. */
4853 gcc_unreachable ();
4855 return gen_rtx_PLUS (Pmode, base, offset);
4857 else if (GET_CODE (orig) == LABEL_REF)
4858 /* ??? We ought to be checking that the register is live instead, in case
4859 it is eliminated. */
4860 crtl->uses_pic_offset_table = 1;
4862 return orig;
4865 /* Try machine-dependent ways of modifying an illegitimate address X
4866 to be legitimate. If we find one, return the new, valid address.
4868 OLDX is the address as it was before break_out_memory_refs was called.
4869 In some cases it is useful to look at this to decide what needs to be done.
4871 MODE is the mode of the operand pointed to by X.
4873 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
4875 static rtx
4876 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
4877 machine_mode mode)
4879 rtx orig_x = x;
4881 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
4882 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4883 force_operand (XEXP (x, 0), NULL_RTX));
4884 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
4885 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4886 force_operand (XEXP (x, 1), NULL_RTX));
4887 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
4888 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
4889 XEXP (x, 1));
4890 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
4891 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4892 force_operand (XEXP (x, 1), NULL_RTX));
4894 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
4895 return x;
4897 if (sparc_tls_referenced_p (x))
4898 x = sparc_legitimize_tls_address (x);
4899 else if (flag_pic)
4900 x = sparc_legitimize_pic_address (x, NULL_RTX);
4901 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
4902 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4903 copy_to_mode_reg (Pmode, XEXP (x, 1)));
4904 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
4905 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4906 copy_to_mode_reg (Pmode, XEXP (x, 0)));
4907 else if (GET_CODE (x) == SYMBOL_REF
4908 || GET_CODE (x) == CONST
4909 || GET_CODE (x) == LABEL_REF)
4910 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
4912 return x;
4915 /* Delegitimize an address that was legitimized by the above function. */
4917 static rtx
4918 sparc_delegitimize_address (rtx x)
4920 x = delegitimize_mem_from_attrs (x);
4922 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 1)) == UNSPEC)
4923 switch (XINT (XEXP (x, 1), 1))
4925 case UNSPEC_MOVE_PIC:
4926 case UNSPEC_TLSLE:
4927 x = XVECEXP (XEXP (x, 1), 0, 0);
4928 gcc_assert (GET_CODE (x) == SYMBOL_REF);
4929 break;
4930 default:
4931 break;
4934 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
4935 if (GET_CODE (x) == MINUS
4936 && sparc_pic_register_p (XEXP (x, 0))
4937 && GET_CODE (XEXP (x, 1)) == LO_SUM
4938 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
4939 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
4941 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
4942 gcc_assert (GET_CODE (x) == LABEL_REF
4943 || (GET_CODE (x) == CONST
4944 && GET_CODE (XEXP (x, 0)) == PLUS
4945 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
4946 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT));
4949 return x;
4952 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
4953 replace the input X, or the original X if no replacement is called for.
4954 The output parameter *WIN is 1 if the calling macro should goto WIN,
4955 0 if it should not.
4957 For SPARC, we wish to handle addresses by splitting them into
4958 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
4959 This cuts the number of extra insns by one.
4961 Do nothing when generating PIC code and the address is a symbolic
4962 operand or requires a scratch register. */
4965 sparc_legitimize_reload_address (rtx x, machine_mode mode,
4966 int opnum, int type,
4967 int ind_levels ATTRIBUTE_UNUSED, int *win)
4969 /* Decompose SImode constants into HIGH+LO_SUM. */
4970 if (CONSTANT_P (x)
4971 && (mode != TFmode || TARGET_ARCH64)
4972 && GET_MODE (x) == SImode
4973 && GET_CODE (x) != LO_SUM
4974 && GET_CODE (x) != HIGH
4975 && sparc_cmodel <= CM_MEDLOW
4976 && !(flag_pic
4977 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
4979 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
4980 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4981 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4982 opnum, (enum reload_type)type);
4983 *win = 1;
4984 return x;
4987 /* We have to recognize what we have already generated above. */
4988 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
4990 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4991 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4992 opnum, (enum reload_type)type);
4993 *win = 1;
4994 return x;
4997 *win = 0;
4998 return x;
5001 /* Return true if ADDR (a legitimate address expression)
5002 has an effect that depends on the machine mode it is used for.
5004 In PIC mode,
5006 (mem:HI [%l7+a])
5008 is not equivalent to
5010 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
5012 because [%l7+a+1] is interpreted as the address of (a+1). */
5015 static bool
5016 sparc_mode_dependent_address_p (const_rtx addr,
5017 addr_space_t as ATTRIBUTE_UNUSED)
5019 if (GET_CODE (addr) == PLUS
5020 && sparc_pic_register_p (XEXP (addr, 0))
5021 && symbolic_operand (XEXP (addr, 1), VOIDmode))
5022 return true;
5024 return false;
5027 #ifdef HAVE_GAS_HIDDEN
5028 # define USE_HIDDEN_LINKONCE 1
5029 #else
5030 # define USE_HIDDEN_LINKONCE 0
5031 #endif
5033 static void
5034 get_pc_thunk_name (char name[32], unsigned int regno)
5036 const char *reg_name = reg_names[regno];
5038 /* Skip the leading '%' as that cannot be used in a
5039 symbol name. */
5040 reg_name += 1;
5042 if (USE_HIDDEN_LINKONCE)
5043 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
5044 else
5045 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
5048 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
5050 static rtx
5051 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2)
5053 int orig_flag_pic = flag_pic;
5054 rtx insn;
5056 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
5057 flag_pic = 0;
5058 if (TARGET_ARCH64)
5059 insn = gen_load_pcrel_symdi (op0, op1, op2, GEN_INT (REGNO (op0)));
5060 else
5061 insn = gen_load_pcrel_symsi (op0, op1, op2, GEN_INT (REGNO (op0)));
5062 flag_pic = orig_flag_pic;
5064 return insn;
5067 /* Emit code to load the GOT register. */
5069 void
5070 load_got_register (void)
5072 if (!global_offset_table_rtx)
5073 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
5075 if (TARGET_VXWORKS_RTP)
5076 emit_insn (gen_vxworks_load_got ());
5077 else
5079 /* The GOT symbol is subject to a PC-relative relocation so we need a
5080 helper function to add the PC value and thus get the final value. */
5081 if (!got_helper_rtx)
5083 char name[32];
5084 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
5085 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5088 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
5089 got_helper_rtx));
5093 /* Emit a call instruction with the pattern given by PAT. ADDR is the
5094 address of the call target. */
5096 void
5097 sparc_emit_call_insn (rtx pat, rtx addr)
5099 rtx_insn *insn;
5101 insn = emit_call_insn (pat);
5103 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
5104 if (TARGET_VXWORKS_RTP
5105 && flag_pic
5106 && GET_CODE (addr) == SYMBOL_REF
5107 && (SYMBOL_REF_DECL (addr)
5108 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
5109 : !SYMBOL_REF_LOCAL_P (addr)))
5111 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
5112 crtl->uses_pic_offset_table = 1;
5116 /* Return 1 if RTX is a MEM which is known to be aligned to at
5117 least a DESIRED byte boundary. */
5120 mem_min_alignment (rtx mem, int desired)
5122 rtx addr, base, offset;
5124 /* If it's not a MEM we can't accept it. */
5125 if (GET_CODE (mem) != MEM)
5126 return 0;
5128 /* Obviously... */
5129 if (!TARGET_UNALIGNED_DOUBLES
5130 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
5131 return 1;
5133 /* ??? The rest of the function predates MEM_ALIGN so
5134 there is probably a bit of redundancy. */
5135 addr = XEXP (mem, 0);
5136 base = offset = NULL_RTX;
5137 if (GET_CODE (addr) == PLUS)
5139 if (GET_CODE (XEXP (addr, 0)) == REG)
5141 base = XEXP (addr, 0);
5143 /* What we are saying here is that if the base
5144 REG is aligned properly, the compiler will make
5145 sure any REG based index upon it will be so
5146 as well. */
5147 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
5148 offset = XEXP (addr, 1);
5149 else
5150 offset = const0_rtx;
5153 else if (GET_CODE (addr) == REG)
5155 base = addr;
5156 offset = const0_rtx;
5159 if (base != NULL_RTX)
5161 int regno = REGNO (base);
5163 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
5165 /* Check if the compiler has recorded some information
5166 about the alignment of the base REG. If reload has
5167 completed, we already matched with proper alignments.
5168 If not running global_alloc, reload might give us
5169 unaligned pointer to local stack though. */
5170 if (((cfun != 0
5171 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
5172 || (optimize && reload_completed))
5173 && (INTVAL (offset) & (desired - 1)) == 0)
5174 return 1;
5176 else
5178 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
5179 return 1;
5182 else if (! TARGET_UNALIGNED_DOUBLES
5183 || CONSTANT_P (addr)
5184 || GET_CODE (addr) == LO_SUM)
5186 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
5187 is true, in which case we can only assume that an access is aligned if
5188 it is to a constant address, or the address involves a LO_SUM. */
5189 return 1;
5192 /* An obviously unaligned address. */
5193 return 0;
5197 /* Vectors to keep interesting information about registers where it can easily
5198 be got. We used to use the actual mode value as the bit number, but there
5199 are more than 32 modes now. Instead we use two tables: one indexed by
5200 hard register number, and one indexed by mode. */
5202 /* The purpose of sparc_mode_class is to shrink the range of modes so that
5203 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
5204 mapped into one sparc_mode_class mode. */
5206 enum sparc_mode_class {
5207 H_MODE, S_MODE, D_MODE, T_MODE, O_MODE,
5208 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
5209 CC_MODE, CCFP_MODE
5212 /* Modes for single-word and smaller quantities. */
5213 #define S_MODES \
5214 ((1 << (int) H_MODE) | (1 << (int) S_MODE) | (1 << (int) SF_MODE))
5216 /* Modes for double-word and smaller quantities. */
5217 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
5219 /* Modes for quad-word and smaller quantities. */
5220 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
5222 /* Modes for 8-word and smaller quantities. */
5223 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
5225 /* Modes for single-float quantities. */
5226 #define SF_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
5228 /* Modes for double-float and smaller quantities. */
5229 #define DF_MODES (SF_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
5231 /* Modes for quad-float and smaller quantities. */
5232 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
5234 /* Modes for quad-float pairs and smaller quantities. */
5235 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
5237 /* Modes for double-float only quantities. */
5238 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
5240 /* Modes for quad-float and double-float only quantities. */
5241 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
5243 /* Modes for quad-float pairs and double-float only quantities. */
5244 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
5246 /* Modes for condition codes. */
5247 #define CC_MODES (1 << (int) CC_MODE)
5248 #define CCFP_MODES (1 << (int) CCFP_MODE)
5250 /* Value is 1 if register/mode pair is acceptable on sparc.
5252 The funny mixture of D and T modes is because integer operations
5253 do not specially operate on tetra quantities, so non-quad-aligned
5254 registers can hold quadword quantities (except %o4 and %i4 because
5255 they cross fixed registers).
5257 ??? Note that, despite the settings, non-double-aligned parameter
5258 registers can hold double-word quantities in 32-bit mode. */
5260 /* This points to either the 32-bit or the 64-bit version. */
5261 static const int *hard_regno_mode_classes;
5263 static const int hard_32bit_mode_classes[] = {
5264 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
5265 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
5266 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
5267 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
5269 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5270 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5271 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5272 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
5274 /* FP regs f32 to f63. Only the even numbered registers actually exist,
5275 and none can hold SFmode/SImode values. */
5276 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5277 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5278 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5279 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5281 /* %fcc[0123] */
5282 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
5284 /* %icc, %sfp, %gsr */
5285 CC_MODES, 0, D_MODES
5288 static const int hard_64bit_mode_classes[] = {
5289 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5290 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5291 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5292 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5294 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5295 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5296 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5297 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
5299 /* FP regs f32 to f63. Only the even numbered registers actually exist,
5300 and none can hold SFmode/SImode values. */
5301 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5302 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5303 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5304 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5306 /* %fcc[0123] */
5307 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
5309 /* %icc, %sfp, %gsr */
5310 CC_MODES, 0, D_MODES
5313 static int sparc_mode_class [NUM_MACHINE_MODES];
5315 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
5317 static void
5318 sparc_init_modes (void)
5320 int i;
5322 for (i = 0; i < NUM_MACHINE_MODES; i++)
5324 machine_mode m = (machine_mode) i;
5325 unsigned int size = GET_MODE_SIZE (m);
5327 switch (GET_MODE_CLASS (m))
5329 case MODE_INT:
5330 case MODE_PARTIAL_INT:
5331 case MODE_COMPLEX_INT:
5332 if (size < 4)
5333 sparc_mode_class[i] = 1 << (int) H_MODE;
5334 else if (size == 4)
5335 sparc_mode_class[i] = 1 << (int) S_MODE;
5336 else if (size == 8)
5337 sparc_mode_class[i] = 1 << (int) D_MODE;
5338 else if (size == 16)
5339 sparc_mode_class[i] = 1 << (int) T_MODE;
5340 else if (size == 32)
5341 sparc_mode_class[i] = 1 << (int) O_MODE;
5342 else
5343 sparc_mode_class[i] = 0;
5344 break;
5345 case MODE_VECTOR_INT:
5346 if (size == 4)
5347 sparc_mode_class[i] = 1 << (int) SF_MODE;
5348 else if (size == 8)
5349 sparc_mode_class[i] = 1 << (int) DF_MODE;
5350 else
5351 sparc_mode_class[i] = 0;
5352 break;
5353 case MODE_FLOAT:
5354 case MODE_COMPLEX_FLOAT:
5355 if (size == 4)
5356 sparc_mode_class[i] = 1 << (int) SF_MODE;
5357 else if (size == 8)
5358 sparc_mode_class[i] = 1 << (int) DF_MODE;
5359 else if (size == 16)
5360 sparc_mode_class[i] = 1 << (int) TF_MODE;
5361 else if (size == 32)
5362 sparc_mode_class[i] = 1 << (int) OF_MODE;
5363 else
5364 sparc_mode_class[i] = 0;
5365 break;
5366 case MODE_CC:
5367 if (m == CCFPmode || m == CCFPEmode)
5368 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
5369 else
5370 sparc_mode_class[i] = 1 << (int) CC_MODE;
5371 break;
5372 default:
5373 sparc_mode_class[i] = 0;
5374 break;
5378 if (TARGET_ARCH64)
5379 hard_regno_mode_classes = hard_64bit_mode_classes;
5380 else
5381 hard_regno_mode_classes = hard_32bit_mode_classes;
5383 /* Initialize the array used by REGNO_REG_CLASS. */
5384 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5386 if (i < 16 && TARGET_V8PLUS)
5387 sparc_regno_reg_class[i] = I64_REGS;
5388 else if (i < 32 || i == FRAME_POINTER_REGNUM)
5389 sparc_regno_reg_class[i] = GENERAL_REGS;
5390 else if (i < 64)
5391 sparc_regno_reg_class[i] = FP_REGS;
5392 else if (i < 96)
5393 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
5394 else if (i < 100)
5395 sparc_regno_reg_class[i] = FPCC_REGS;
5396 else
5397 sparc_regno_reg_class[i] = NO_REGS;
5401 /* Return whether REGNO, a global or FP register, must be saved/restored. */
5403 static inline bool
5404 save_global_or_fp_reg_p (unsigned int regno,
5405 int leaf_function ATTRIBUTE_UNUSED)
5407 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
5410 /* Return whether the return address register (%i7) is needed. */
5412 static inline bool
5413 return_addr_reg_needed_p (int leaf_function)
5415 /* If it is live, for example because of __builtin_return_address (0). */
5416 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
5417 return true;
5419 /* Otherwise, it is needed as save register if %o7 is clobbered. */
5420 if (!leaf_function
5421 /* Loading the GOT register clobbers %o7. */
5422 || crtl->uses_pic_offset_table
5423 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
5424 return true;
5426 return false;
5429 /* Return whether REGNO, a local or in register, must be saved/restored. */
5431 static bool
5432 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
5434 /* General case: call-saved registers live at some point. */
5435 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
5436 return true;
5438 /* Frame pointer register (%fp) if needed. */
5439 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
5440 return true;
5442 /* Return address register (%i7) if needed. */
5443 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
5444 return true;
5446 /* GOT register (%l7) if needed. */
5447 if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
5448 return true;
5450 /* If the function accesses prior frames, the frame pointer and the return
5451 address of the previous frame must be saved on the stack. */
5452 if (crtl->accesses_prior_frames
5453 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
5454 return true;
5456 return false;
5459 /* Compute the frame size required by the function. This function is called
5460 during the reload pass and also by sparc_expand_prologue. */
5462 HOST_WIDE_INT
5463 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
5465 HOST_WIDE_INT frame_size, apparent_frame_size;
5466 int args_size, n_global_fp_regs = 0;
5467 bool save_local_in_regs_p = false;
5468 unsigned int i;
5470 /* If the function allocates dynamic stack space, the dynamic offset is
5471 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
5472 if (leaf_function && !cfun->calls_alloca)
5473 args_size = 0;
5474 else
5475 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
5477 /* Calculate space needed for global registers. */
5478 if (TARGET_ARCH64)
5480 for (i = 0; i < 8; i++)
5481 if (save_global_or_fp_reg_p (i, 0))
5482 n_global_fp_regs += 2;
5484 else
5486 for (i = 0; i < 8; i += 2)
5487 if (save_global_or_fp_reg_p (i, 0)
5488 || save_global_or_fp_reg_p (i + 1, 0))
5489 n_global_fp_regs += 2;
5492 /* In the flat window model, find out which local and in registers need to
5493 be saved. We don't reserve space in the current frame for them as they
5494 will be spilled into the register window save area of the caller's frame.
5495 However, as soon as we use this register window save area, we must create
5496 that of the current frame to make it the live one. */
5497 if (TARGET_FLAT)
5498 for (i = 16; i < 32; i++)
5499 if (save_local_or_in_reg_p (i, leaf_function))
5501 save_local_in_regs_p = true;
5502 break;
5505 /* Calculate space needed for FP registers. */
5506 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
5507 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
5508 n_global_fp_regs += 2;
5510 if (size == 0
5511 && n_global_fp_regs == 0
5512 && args_size == 0
5513 && !save_local_in_regs_p)
5514 frame_size = apparent_frame_size = 0;
5515 else
5517 /* Start from the apparent frame size. */
5518 apparent_frame_size = ROUND_UP (size, 8) + n_global_fp_regs * 4;
5520 /* We need to add the size of the outgoing argument area. */
5521 frame_size = apparent_frame_size + ROUND_UP (args_size, 8);
5523 /* And that of the register window save area. */
5524 frame_size += FIRST_PARM_OFFSET (cfun->decl);
5526 /* Finally, bump to the appropriate alignment. */
5527 frame_size = SPARC_STACK_ALIGN (frame_size);
5530 /* Set up values for use in prologue and epilogue. */
5531 sparc_frame_size = frame_size;
5532 sparc_apparent_frame_size = apparent_frame_size;
5533 sparc_n_global_fp_regs = n_global_fp_regs;
5534 sparc_save_local_in_regs_p = save_local_in_regs_p;
5536 return frame_size;
5539 /* Implement the macro INITIAL_ELIMINATION_OFFSET, return the OFFSET. */
5542 sparc_initial_elimination_offset (int to)
5544 int offset;
5546 if (to == STACK_POINTER_REGNUM)
5547 offset = sparc_compute_frame_size (get_frame_size (), crtl->is_leaf);
5548 else
5549 offset = 0;
5551 offset += SPARC_STACK_BIAS;
5552 return offset;
5555 /* Output any necessary .register pseudo-ops. */
5557 void
5558 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
5560 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
5561 int i;
5563 if (TARGET_ARCH32)
5564 return;
5566 /* Check if %g[2367] were used without
5567 .register being printed for them already. */
5568 for (i = 2; i < 8; i++)
5570 if (df_regs_ever_live_p (i)
5571 && ! sparc_hard_reg_printed [i])
5573 sparc_hard_reg_printed [i] = 1;
5574 /* %g7 is used as TLS base register, use #ignore
5575 for it instead of #scratch. */
5576 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
5577 i == 7 ? "ignore" : "scratch");
5579 if (i == 3) i = 5;
5581 #endif
5584 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
5586 #if PROBE_INTERVAL > 4096
5587 #error Cannot use indexed addressing mode for stack probing
5588 #endif
5590 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
5591 inclusive. These are offsets from the current stack pointer.
5593 Note that we don't use the REG+REG addressing mode for the probes because
5594 of the stack bias in 64-bit mode. And it doesn't really buy us anything
5595 so the advantages of having a single code win here. */
5597 static void
5598 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
5600 rtx g1 = gen_rtx_REG (Pmode, 1);
5602 /* See if we have a constant small number of probes to generate. If so,
5603 that's the easy case. */
5604 if (size <= PROBE_INTERVAL)
5606 emit_move_insn (g1, GEN_INT (first));
5607 emit_insn (gen_rtx_SET (g1,
5608 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5609 emit_stack_probe (plus_constant (Pmode, g1, -size));
5612 /* The run-time loop is made up of 9 insns in the generic case while the
5613 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
5614 else if (size <= 4 * PROBE_INTERVAL)
5616 HOST_WIDE_INT i;
5618 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
5619 emit_insn (gen_rtx_SET (g1,
5620 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5621 emit_stack_probe (g1);
5623 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
5624 it exceeds SIZE. If only two probes are needed, this will not
5625 generate any code. Then probe at FIRST + SIZE. */
5626 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
5628 emit_insn (gen_rtx_SET (g1,
5629 plus_constant (Pmode, g1, -PROBE_INTERVAL)));
5630 emit_stack_probe (g1);
5633 emit_stack_probe (plus_constant (Pmode, g1,
5634 (i - PROBE_INTERVAL) - size));
5637 /* Otherwise, do the same as above, but in a loop. Note that we must be
5638 extra careful with variables wrapping around because we might be at
5639 the very top (or the very bottom) of the address space and we have
5640 to be able to handle this case properly; in particular, we use an
5641 equality test for the loop condition. */
5642 else
5644 HOST_WIDE_INT rounded_size;
5645 rtx g4 = gen_rtx_REG (Pmode, 4);
5647 emit_move_insn (g1, GEN_INT (first));
5650 /* Step 1: round SIZE to the previous multiple of the interval. */
5652 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
5653 emit_move_insn (g4, GEN_INT (rounded_size));
5656 /* Step 2: compute initial and final value of the loop counter. */
5658 /* TEST_ADDR = SP + FIRST. */
5659 emit_insn (gen_rtx_SET (g1,
5660 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5662 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
5663 emit_insn (gen_rtx_SET (g4, gen_rtx_MINUS (Pmode, g1, g4)));
5666 /* Step 3: the loop
5668 while (TEST_ADDR != LAST_ADDR)
5670 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
5671 probe at TEST_ADDR
5674 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
5675 until it is equal to ROUNDED_SIZE. */
5677 if (TARGET_ARCH64)
5678 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
5679 else
5680 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
5683 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
5684 that SIZE is equal to ROUNDED_SIZE. */
5686 if (size != rounded_size)
5687 emit_stack_probe (plus_constant (Pmode, g4, rounded_size - size));
5690 /* Make sure nothing is scheduled before we are done. */
5691 emit_insn (gen_blockage ());
5694 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
5695 absolute addresses. */
5697 const char *
5698 output_probe_stack_range (rtx reg1, rtx reg2)
5700 static int labelno = 0;
5701 char loop_lab[32];
5702 rtx xops[2];
5704 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
5706 /* Loop. */
5707 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
5709 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
5710 xops[0] = reg1;
5711 xops[1] = GEN_INT (-PROBE_INTERVAL);
5712 output_asm_insn ("add\t%0, %1, %0", xops);
5714 /* Test if TEST_ADDR == LAST_ADDR. */
5715 xops[1] = reg2;
5716 output_asm_insn ("cmp\t%0, %1", xops);
5718 /* Probe at TEST_ADDR and branch. */
5719 if (TARGET_ARCH64)
5720 fputs ("\tbne,pt\t%xcc,", asm_out_file);
5721 else
5722 fputs ("\tbne\t", asm_out_file);
5723 assemble_name_raw (asm_out_file, loop_lab);
5724 fputc ('\n', asm_out_file);
5725 xops[1] = GEN_INT (SPARC_STACK_BIAS);
5726 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
5728 return "";
5731 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
5732 needed. LOW is supposed to be double-word aligned for 32-bit registers.
5733 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
5734 is the action to be performed if SAVE_P returns true and ACTION_FALSE
5735 the action to be performed if it returns false. Return the new offset. */
5737 typedef bool (*sorr_pred_t) (unsigned int, int);
5738 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
5740 static int
5741 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
5742 int offset, int leaf_function, sorr_pred_t save_p,
5743 sorr_act_t action_true, sorr_act_t action_false)
5745 unsigned int i;
5746 rtx mem;
5747 rtx_insn *insn;
5749 if (TARGET_ARCH64 && high <= 32)
5751 int fp_offset = -1;
5753 for (i = low; i < high; i++)
5755 if (save_p (i, leaf_function))
5757 mem = gen_frame_mem (DImode, plus_constant (Pmode,
5758 base, offset));
5759 if (action_true == SORR_SAVE)
5761 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
5762 RTX_FRAME_RELATED_P (insn) = 1;
5764 else /* action_true == SORR_RESTORE */
5766 /* The frame pointer must be restored last since its old
5767 value may be used as base address for the frame. This
5768 is problematic in 64-bit mode only because of the lack
5769 of double-word load instruction. */
5770 if (i == HARD_FRAME_POINTER_REGNUM)
5771 fp_offset = offset;
5772 else
5773 emit_move_insn (gen_rtx_REG (DImode, i), mem);
5775 offset += 8;
5777 else if (action_false == SORR_ADVANCE)
5778 offset += 8;
5781 if (fp_offset >= 0)
5783 mem = gen_frame_mem (DImode, plus_constant (Pmode, base, fp_offset));
5784 emit_move_insn (hard_frame_pointer_rtx, mem);
5787 else
5789 for (i = low; i < high; i += 2)
5791 bool reg0 = save_p (i, leaf_function);
5792 bool reg1 = save_p (i + 1, leaf_function);
5793 machine_mode mode;
5794 int regno;
5796 if (reg0 && reg1)
5798 mode = SPARC_INT_REG_P (i) ? E_DImode : E_DFmode;
5799 regno = i;
5801 else if (reg0)
5803 mode = SPARC_INT_REG_P (i) ? E_SImode : E_SFmode;
5804 regno = i;
5806 else if (reg1)
5808 mode = SPARC_INT_REG_P (i) ? E_SImode : E_SFmode;
5809 regno = i + 1;
5810 offset += 4;
5812 else
5814 if (action_false == SORR_ADVANCE)
5815 offset += 8;
5816 continue;
5819 mem = gen_frame_mem (mode, plus_constant (Pmode, base, offset));
5820 if (action_true == SORR_SAVE)
5822 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
5823 RTX_FRAME_RELATED_P (insn) = 1;
5824 if (mode == DImode)
5826 rtx set1, set2;
5827 mem = gen_frame_mem (SImode, plus_constant (Pmode, base,
5828 offset));
5829 set1 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno));
5830 RTX_FRAME_RELATED_P (set1) = 1;
5832 = gen_frame_mem (SImode, plus_constant (Pmode, base,
5833 offset + 4));
5834 set2 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno + 1));
5835 RTX_FRAME_RELATED_P (set2) = 1;
5836 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
5837 gen_rtx_PARALLEL (VOIDmode,
5838 gen_rtvec (2, set1, set2)));
5841 else /* action_true == SORR_RESTORE */
5842 emit_move_insn (gen_rtx_REG (mode, regno), mem);
5844 /* Bump and round down to double word
5845 in case we already bumped by 4. */
5846 offset = ROUND_DOWN (offset + 8, 8);
5850 return offset;
5853 /* Emit code to adjust BASE to OFFSET. Return the new base. */
5855 static rtx
5856 emit_adjust_base_to_offset (rtx base, int offset)
5858 /* ??? This might be optimized a little as %g1 might already have a
5859 value close enough that a single add insn will do. */
5860 /* ??? Although, all of this is probably only a temporary fix because
5861 if %g1 can hold a function result, then sparc_expand_epilogue will
5862 lose (the result will be clobbered). */
5863 rtx new_base = gen_rtx_REG (Pmode, 1);
5864 emit_move_insn (new_base, GEN_INT (offset));
5865 emit_insn (gen_rtx_SET (new_base, gen_rtx_PLUS (Pmode, base, new_base)));
5866 return new_base;
5869 /* Emit code to save/restore call-saved global and FP registers. */
5871 static void
5872 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
5874 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
5876 base = emit_adjust_base_to_offset (base, offset);
5877 offset = 0;
5880 offset
5881 = emit_save_or_restore_regs (0, 8, base, offset, 0,
5882 save_global_or_fp_reg_p, action, SORR_NONE);
5883 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
5884 save_global_or_fp_reg_p, action, SORR_NONE);
5887 /* Emit code to save/restore call-saved local and in registers. */
5889 static void
5890 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
5892 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
5894 base = emit_adjust_base_to_offset (base, offset);
5895 offset = 0;
5898 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
5899 save_local_or_in_reg_p, action, SORR_ADVANCE);
5902 /* Emit a window_save insn. */
5904 static rtx_insn *
5905 emit_window_save (rtx increment)
5907 rtx_insn *insn = emit_insn (gen_window_save (increment));
5908 RTX_FRAME_RELATED_P (insn) = 1;
5910 /* The incoming return address (%o7) is saved in %i7. */
5911 add_reg_note (insn, REG_CFA_REGISTER,
5912 gen_rtx_SET (gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
5913 gen_rtx_REG (Pmode,
5914 INCOMING_RETURN_ADDR_REGNUM)));
5916 /* The window save event. */
5917 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
5919 /* The CFA is %fp, the hard frame pointer. */
5920 add_reg_note (insn, REG_CFA_DEF_CFA,
5921 plus_constant (Pmode, hard_frame_pointer_rtx,
5922 INCOMING_FRAME_SP_OFFSET));
5924 return insn;
5927 /* Generate an increment for the stack pointer. */
5929 static rtx
5930 gen_stack_pointer_inc (rtx increment)
5932 return gen_rtx_SET (stack_pointer_rtx,
5933 gen_rtx_PLUS (Pmode,
5934 stack_pointer_rtx,
5935 increment));
5938 /* Expand the function prologue. The prologue is responsible for reserving
5939 storage for the frame, saving the call-saved registers and loading the
5940 GOT register if needed. */
5942 void
5943 sparc_expand_prologue (void)
5945 HOST_WIDE_INT size;
5946 rtx_insn *insn;
5948 /* Compute a snapshot of crtl->uses_only_leaf_regs. Relying
5949 on the final value of the flag means deferring the prologue/epilogue
5950 expansion until just before the second scheduling pass, which is too
5951 late to emit multiple epilogues or return insns.
5953 Of course we are making the assumption that the value of the flag
5954 will not change between now and its final value. Of the three parts
5955 of the formula, only the last one can reasonably vary. Let's take a
5956 closer look, after assuming that the first two ones are set to true
5957 (otherwise the last value is effectively silenced).
5959 If only_leaf_regs_used returns false, the global predicate will also
5960 be false so the actual frame size calculated below will be positive.
5961 As a consequence, the save_register_window insn will be emitted in
5962 the instruction stream; now this insn explicitly references %fp
5963 which is not a leaf register so only_leaf_regs_used will always
5964 return false subsequently.
5966 If only_leaf_regs_used returns true, we hope that the subsequent
5967 optimization passes won't cause non-leaf registers to pop up. For
5968 example, the regrename pass has special provisions to not rename to
5969 non-leaf registers in a leaf function. */
5970 sparc_leaf_function_p
5971 = optimize > 0 && crtl->is_leaf && only_leaf_regs_used ();
5973 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5975 if (flag_stack_usage_info)
5976 current_function_static_stack_size = size;
5978 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
5979 || flag_stack_clash_protection)
5981 if (crtl->is_leaf && !cfun->calls_alloca)
5983 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
5984 sparc_emit_probe_stack_range (get_stack_check_protect (),
5985 size - get_stack_check_protect ());
5987 else if (size > 0)
5988 sparc_emit_probe_stack_range (get_stack_check_protect (), size);
5991 if (size == 0)
5992 ; /* do nothing. */
5993 else if (sparc_leaf_function_p)
5995 rtx size_int_rtx = GEN_INT (-size);
5997 if (size <= 4096)
5998 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
5999 else if (size <= 8192)
6001 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
6002 RTX_FRAME_RELATED_P (insn) = 1;
6004 /* %sp is still the CFA register. */
6005 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6007 else
6009 rtx size_rtx = gen_rtx_REG (Pmode, 1);
6010 emit_move_insn (size_rtx, size_int_rtx);
6011 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
6012 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
6013 gen_stack_pointer_inc (size_int_rtx));
6016 RTX_FRAME_RELATED_P (insn) = 1;
6018 else
6020 rtx size_int_rtx = GEN_INT (-size);
6022 if (size <= 4096)
6023 emit_window_save (size_int_rtx);
6024 else if (size <= 8192)
6026 emit_window_save (GEN_INT (-4096));
6028 /* %sp is not the CFA register anymore. */
6029 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6031 /* Make sure no %fp-based store is issued until after the frame is
6032 established. The offset between the frame pointer and the stack
6033 pointer is calculated relative to the value of the stack pointer
6034 at the end of the function prologue, and moving instructions that
6035 access the stack via the frame pointer between the instructions
6036 that decrement the stack pointer could result in accessing the
6037 register window save area, which is volatile. */
6038 emit_insn (gen_frame_blockage ());
6040 else
6042 rtx size_rtx = gen_rtx_REG (Pmode, 1);
6043 emit_move_insn (size_rtx, size_int_rtx);
6044 emit_window_save (size_rtx);
6048 if (sparc_leaf_function_p)
6050 sparc_frame_base_reg = stack_pointer_rtx;
6051 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
6053 else
6055 sparc_frame_base_reg = hard_frame_pointer_rtx;
6056 sparc_frame_base_offset = SPARC_STACK_BIAS;
6059 if (sparc_n_global_fp_regs > 0)
6060 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6061 sparc_frame_base_offset
6062 - sparc_apparent_frame_size,
6063 SORR_SAVE);
6065 /* Advertise that the data calculated just above are now valid. */
6066 sparc_prologue_data_valid_p = true;
6069 /* Expand the function prologue. The prologue is responsible for reserving
6070 storage for the frame, saving the call-saved registers and loading the
6071 GOT register if needed. */
6073 void
6074 sparc_flat_expand_prologue (void)
6076 HOST_WIDE_INT size;
6077 rtx_insn *insn;
6079 sparc_leaf_function_p = optimize > 0 && crtl->is_leaf;
6081 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
6083 if (flag_stack_usage_info)
6084 current_function_static_stack_size = size;
6086 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
6087 || flag_stack_clash_protection)
6089 if (crtl->is_leaf && !cfun->calls_alloca)
6091 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
6092 sparc_emit_probe_stack_range (get_stack_check_protect (),
6093 size - get_stack_check_protect ());
6095 else if (size > 0)
6096 sparc_emit_probe_stack_range (get_stack_check_protect (), size);
6099 if (sparc_save_local_in_regs_p)
6100 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
6101 SORR_SAVE);
6103 if (size == 0)
6104 ; /* do nothing. */
6105 else
6107 rtx size_int_rtx, size_rtx;
6109 size_rtx = size_int_rtx = GEN_INT (-size);
6111 /* We establish the frame (i.e. decrement the stack pointer) first, even
6112 if we use a frame pointer, because we cannot clobber any call-saved
6113 registers, including the frame pointer, if we haven't created a new
6114 register save area, for the sake of compatibility with the ABI. */
6115 if (size <= 4096)
6116 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
6117 else if (size <= 8192 && !frame_pointer_needed)
6119 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
6120 RTX_FRAME_RELATED_P (insn) = 1;
6121 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6123 else
6125 size_rtx = gen_rtx_REG (Pmode, 1);
6126 emit_move_insn (size_rtx, size_int_rtx);
6127 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
6128 add_reg_note (insn, REG_CFA_ADJUST_CFA,
6129 gen_stack_pointer_inc (size_int_rtx));
6131 RTX_FRAME_RELATED_P (insn) = 1;
6133 /* Ensure nothing is scheduled until after the frame is established. */
6134 emit_insn (gen_blockage ());
6136 if (frame_pointer_needed)
6138 insn = emit_insn (gen_rtx_SET (hard_frame_pointer_rtx,
6139 gen_rtx_MINUS (Pmode,
6140 stack_pointer_rtx,
6141 size_rtx)));
6142 RTX_FRAME_RELATED_P (insn) = 1;
6144 add_reg_note (insn, REG_CFA_ADJUST_CFA,
6145 gen_rtx_SET (hard_frame_pointer_rtx,
6146 plus_constant (Pmode, stack_pointer_rtx,
6147 size)));
6150 if (return_addr_reg_needed_p (sparc_leaf_function_p))
6152 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
6153 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
6155 insn = emit_move_insn (i7, o7);
6156 RTX_FRAME_RELATED_P (insn) = 1;
6158 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (i7, o7));
6160 /* Prevent this instruction from ever being considered dead,
6161 even if this function has no epilogue. */
6162 emit_use (i7);
6166 if (frame_pointer_needed)
6168 sparc_frame_base_reg = hard_frame_pointer_rtx;
6169 sparc_frame_base_offset = SPARC_STACK_BIAS;
6171 else
6173 sparc_frame_base_reg = stack_pointer_rtx;
6174 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
6177 if (sparc_n_global_fp_regs > 0)
6178 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6179 sparc_frame_base_offset
6180 - sparc_apparent_frame_size,
6181 SORR_SAVE);
6183 /* Advertise that the data calculated just above are now valid. */
6184 sparc_prologue_data_valid_p = true;
6187 /* This function generates the assembly code for function entry, which boils
6188 down to emitting the necessary .register directives. */
6190 static void
6191 sparc_asm_function_prologue (FILE *file)
6193 /* Check that the assumption we made in sparc_expand_prologue is valid. */
6194 if (!TARGET_FLAT)
6195 gcc_assert (sparc_leaf_function_p == crtl->uses_only_leaf_regs);
6197 sparc_output_scratch_registers (file);
6200 /* Expand the function epilogue, either normal or part of a sibcall.
6201 We emit all the instructions except the return or the call. */
6203 void
6204 sparc_expand_epilogue (bool for_eh)
6206 HOST_WIDE_INT size = sparc_frame_size;
6208 if (cfun->calls_alloca)
6209 emit_insn (gen_frame_blockage ());
6211 if (sparc_n_global_fp_regs > 0)
6212 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6213 sparc_frame_base_offset
6214 - sparc_apparent_frame_size,
6215 SORR_RESTORE);
6217 if (size == 0 || for_eh)
6218 ; /* do nothing. */
6219 else if (sparc_leaf_function_p)
6221 if (size <= 4096)
6222 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
6223 else if (size <= 8192)
6225 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
6226 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
6228 else
6230 rtx reg = gen_rtx_REG (Pmode, 1);
6231 emit_move_insn (reg, GEN_INT (size));
6232 emit_insn (gen_stack_pointer_inc (reg));
6237 /* Expand the function epilogue, either normal or part of a sibcall.
6238 We emit all the instructions except the return or the call. */
6240 void
6241 sparc_flat_expand_epilogue (bool for_eh)
6243 HOST_WIDE_INT size = sparc_frame_size;
6245 if (sparc_n_global_fp_regs > 0)
6246 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6247 sparc_frame_base_offset
6248 - sparc_apparent_frame_size,
6249 SORR_RESTORE);
6251 /* If we have a frame pointer, we'll need both to restore it before the
6252 frame is destroyed and use its current value in destroying the frame.
6253 Since we don't have an atomic way to do that in the flat window model,
6254 we save the current value into a temporary register (%g1). */
6255 if (frame_pointer_needed && !for_eh)
6256 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
6258 if (return_addr_reg_needed_p (sparc_leaf_function_p))
6259 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
6260 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
6262 if (sparc_save_local_in_regs_p)
6263 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
6264 sparc_frame_base_offset,
6265 SORR_RESTORE);
6267 if (size == 0 || for_eh)
6268 ; /* do nothing. */
6269 else if (frame_pointer_needed)
6271 /* Make sure the frame is destroyed after everything else is done. */
6272 emit_insn (gen_blockage ());
6274 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
6276 else
6278 /* Likewise. */
6279 emit_insn (gen_blockage ());
6281 if (size <= 4096)
6282 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
6283 else if (size <= 8192)
6285 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
6286 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
6288 else
6290 rtx reg = gen_rtx_REG (Pmode, 1);
6291 emit_move_insn (reg, GEN_INT (size));
6292 emit_insn (gen_stack_pointer_inc (reg));
6297 /* Return true if it is appropriate to emit `return' instructions in the
6298 body of a function. */
6300 bool
6301 sparc_can_use_return_insn_p (void)
6303 return sparc_prologue_data_valid_p
6304 && sparc_n_global_fp_regs == 0
6305 && TARGET_FLAT
6306 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
6307 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
6310 /* This function generates the assembly code for function exit. */
6312 static void
6313 sparc_asm_function_epilogue (FILE *file)
6315 /* If the last two instructions of a function are "call foo; dslot;"
6316 the return address might point to the first instruction in the next
6317 function and we have to output a dummy nop for the sake of sane
6318 backtraces in such cases. This is pointless for sibling calls since
6319 the return address is explicitly adjusted. */
6321 rtx_insn *insn = get_last_insn ();
6323 rtx last_real_insn = prev_real_insn (insn);
6324 if (last_real_insn
6325 && NONJUMP_INSN_P (last_real_insn)
6326 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
6327 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
6329 if (last_real_insn
6330 && CALL_P (last_real_insn)
6331 && !SIBLING_CALL_P (last_real_insn))
6332 fputs("\tnop\n", file);
6334 sparc_output_deferred_case_vectors ();
6337 /* Output a 'restore' instruction. */
6339 static void
6340 output_restore (rtx pat)
6342 rtx operands[3];
6344 if (! pat)
6346 fputs ("\t restore\n", asm_out_file);
6347 return;
6350 gcc_assert (GET_CODE (pat) == SET);
6352 operands[0] = SET_DEST (pat);
6353 pat = SET_SRC (pat);
6355 switch (GET_CODE (pat))
6357 case PLUS:
6358 operands[1] = XEXP (pat, 0);
6359 operands[2] = XEXP (pat, 1);
6360 output_asm_insn (" restore %r1, %2, %Y0", operands);
6361 break;
6362 case LO_SUM:
6363 operands[1] = XEXP (pat, 0);
6364 operands[2] = XEXP (pat, 1);
6365 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
6366 break;
6367 case ASHIFT:
6368 operands[1] = XEXP (pat, 0);
6369 gcc_assert (XEXP (pat, 1) == const1_rtx);
6370 output_asm_insn (" restore %r1, %r1, %Y0", operands);
6371 break;
6372 default:
6373 operands[1] = pat;
6374 output_asm_insn (" restore %%g0, %1, %Y0", operands);
6375 break;
6379 /* Output a return. */
6381 const char *
6382 output_return (rtx_insn *insn)
6384 if (crtl->calls_eh_return)
6386 /* If the function uses __builtin_eh_return, the eh_return
6387 machinery occupies the delay slot. */
6388 gcc_assert (!final_sequence);
6390 if (flag_delayed_branch)
6392 if (!TARGET_FLAT && TARGET_V9)
6393 fputs ("\treturn\t%i7+8\n", asm_out_file);
6394 else
6396 if (!TARGET_FLAT)
6397 fputs ("\trestore\n", asm_out_file);
6399 fputs ("\tjmp\t%o7+8\n", asm_out_file);
6402 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
6404 else
6406 if (!TARGET_FLAT)
6407 fputs ("\trestore\n", asm_out_file);
6409 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
6410 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
6413 else if (sparc_leaf_function_p || TARGET_FLAT)
6415 /* This is a leaf or flat function so we don't have to bother restoring
6416 the register window, which frees us from dealing with the convoluted
6417 semantics of restore/return. We simply output the jump to the
6418 return address and the insn in the delay slot (if any). */
6420 return "jmp\t%%o7+%)%#";
6422 else
6424 /* This is a regular function so we have to restore the register window.
6425 We may have a pending insn for the delay slot, which will be either
6426 combined with the 'restore' instruction or put in the delay slot of
6427 the 'return' instruction. */
6429 if (final_sequence)
6431 rtx_insn *delay;
6432 rtx pat;
6434 delay = NEXT_INSN (insn);
6435 gcc_assert (delay);
6437 pat = PATTERN (delay);
6439 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
6441 epilogue_renumber (&pat, 0);
6442 return "return\t%%i7+%)%#";
6444 else
6446 output_asm_insn ("jmp\t%%i7+%)", NULL);
6448 /* We're going to output the insn in the delay slot manually.
6449 Make sure to output its source location first. */
6450 PATTERN (delay) = gen_blockage ();
6451 INSN_CODE (delay) = -1;
6452 final_scan_insn (delay, asm_out_file, optimize, 0, NULL);
6453 INSN_LOCATION (delay) = UNKNOWN_LOCATION;
6455 output_restore (pat);
6458 else
6460 /* The delay slot is empty. */
6461 if (TARGET_V9)
6462 return "return\t%%i7+%)\n\t nop";
6463 else if (flag_delayed_branch)
6464 return "jmp\t%%i7+%)\n\t restore";
6465 else
6466 return "restore\n\tjmp\t%%o7+%)\n\t nop";
6470 return "";
6473 /* Output a sibling call. */
6475 const char *
6476 output_sibcall (rtx_insn *insn, rtx call_operand)
6478 rtx operands[1];
6480 gcc_assert (flag_delayed_branch);
6482 operands[0] = call_operand;
6484 if (sparc_leaf_function_p || TARGET_FLAT)
6486 /* This is a leaf or flat function so we don't have to bother restoring
6487 the register window. We simply output the jump to the function and
6488 the insn in the delay slot (if any). */
6490 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
6492 if (final_sequence)
6493 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
6494 operands);
6495 else
6496 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
6497 it into branch if possible. */
6498 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
6499 operands);
6501 else
6503 /* This is a regular function so we have to restore the register window.
6504 We may have a pending insn for the delay slot, which will be combined
6505 with the 'restore' instruction. */
6507 output_asm_insn ("call\t%a0, 0", operands);
6509 if (final_sequence)
6511 rtx_insn *delay;
6512 rtx pat;
6514 delay = NEXT_INSN (insn);
6515 gcc_assert (delay);
6517 pat = PATTERN (delay);
6519 /* We're going to output the insn in the delay slot manually.
6520 Make sure to output its source location first. */
6521 PATTERN (delay) = gen_blockage ();
6522 INSN_CODE (delay) = -1;
6523 final_scan_insn (delay, asm_out_file, optimize, 0, NULL);
6524 INSN_LOCATION (delay) = UNKNOWN_LOCATION;
6526 output_restore (pat);
6528 else
6529 output_restore (NULL_RTX);
6532 return "";
6535 /* Functions for handling argument passing.
6537 For 32-bit, the first 6 args are normally in registers and the rest are
6538 pushed. Any arg that starts within the first 6 words is at least
6539 partially passed in a register unless its data type forbids.
6541 For 64-bit, the argument registers are laid out as an array of 16 elements
6542 and arguments are added sequentially. The first 6 int args and up to the
6543 first 16 fp args (depending on size) are passed in regs.
6545 Slot Stack Integral Float Float in structure Double Long Double
6546 ---- ----- -------- ----- ------------------ ------ -----------
6547 15 [SP+248] %f31 %f30,%f31 %d30
6548 14 [SP+240] %f29 %f28,%f29 %d28 %q28
6549 13 [SP+232] %f27 %f26,%f27 %d26
6550 12 [SP+224] %f25 %f24,%f25 %d24 %q24
6551 11 [SP+216] %f23 %f22,%f23 %d22
6552 10 [SP+208] %f21 %f20,%f21 %d20 %q20
6553 9 [SP+200] %f19 %f18,%f19 %d18
6554 8 [SP+192] %f17 %f16,%f17 %d16 %q16
6555 7 [SP+184] %f15 %f14,%f15 %d14
6556 6 [SP+176] %f13 %f12,%f13 %d12 %q12
6557 5 [SP+168] %o5 %f11 %f10,%f11 %d10
6558 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
6559 3 [SP+152] %o3 %f7 %f6,%f7 %d6
6560 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
6561 1 [SP+136] %o1 %f3 %f2,%f3 %d2
6562 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
6564 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
6566 Integral arguments are always passed as 64-bit quantities appropriately
6567 extended.
6569 Passing of floating point values is handled as follows.
6570 If a prototype is in scope:
6571 If the value is in a named argument (i.e. not a stdarg function or a
6572 value not part of the `...') then the value is passed in the appropriate
6573 fp reg.
6574 If the value is part of the `...' and is passed in one of the first 6
6575 slots then the value is passed in the appropriate int reg.
6576 If the value is part of the `...' and is not passed in one of the first 6
6577 slots then the value is passed in memory.
6578 If a prototype is not in scope:
6579 If the value is one of the first 6 arguments the value is passed in the
6580 appropriate integer reg and the appropriate fp reg.
6581 If the value is not one of the first 6 arguments the value is passed in
6582 the appropriate fp reg and in memory.
6585 Summary of the calling conventions implemented by GCC on the SPARC:
6587 32-bit ABI:
6588 size argument return value
6590 small integer <4 int. reg. int. reg.
6591 word 4 int. reg. int. reg.
6592 double word 8 int. reg. int. reg.
6594 _Complex small integer <8 int. reg. int. reg.
6595 _Complex word 8 int. reg. int. reg.
6596 _Complex double word 16 memory int. reg.
6598 vector integer <=8 int. reg. FP reg.
6599 vector integer >8 memory memory
6601 float 4 int. reg. FP reg.
6602 double 8 int. reg. FP reg.
6603 long double 16 memory memory
6605 _Complex float 8 memory FP reg.
6606 _Complex double 16 memory FP reg.
6607 _Complex long double 32 memory FP reg.
6609 vector float any memory memory
6611 aggregate any memory memory
6615 64-bit ABI:
6616 size argument return value
6618 small integer <8 int. reg. int. reg.
6619 word 8 int. reg. int. reg.
6620 double word 16 int. reg. int. reg.
6622 _Complex small integer <16 int. reg. int. reg.
6623 _Complex word 16 int. reg. int. reg.
6624 _Complex double word 32 memory int. reg.
6626 vector integer <=16 FP reg. FP reg.
6627 vector integer 16<s<=32 memory FP reg.
6628 vector integer >32 memory memory
6630 float 4 FP reg. FP reg.
6631 double 8 FP reg. FP reg.
6632 long double 16 FP reg. FP reg.
6634 _Complex float 8 FP reg. FP reg.
6635 _Complex double 16 FP reg. FP reg.
6636 _Complex long double 32 memory FP reg.
6638 vector float <=16 FP reg. FP reg.
6639 vector float 16<s<=32 memory FP reg.
6640 vector float >32 memory memory
6642 aggregate <=16 reg. reg.
6643 aggregate 16<s<=32 memory reg.
6644 aggregate >32 memory memory
6648 Note #1: complex floating-point types follow the extended SPARC ABIs as
6649 implemented by the Sun compiler.
6651 Note #2: integral vector types follow the scalar floating-point types
6652 conventions to match what is implemented by the Sun VIS SDK.
6654 Note #3: floating-point vector types follow the aggregate types
6655 conventions. */
6658 /* Maximum number of int regs for args. */
6659 #define SPARC_INT_ARG_MAX 6
6660 /* Maximum number of fp regs for args. */
6661 #define SPARC_FP_ARG_MAX 16
6662 /* Number of words (partially) occupied for a given size in units. */
6663 #define CEIL_NWORDS(SIZE) CEIL((SIZE), UNITS_PER_WORD)
6665 /* Handle the INIT_CUMULATIVE_ARGS macro.
6666 Initialize a variable CUM of type CUMULATIVE_ARGS
6667 for a call to a function whose data type is FNTYPE.
6668 For a library call, FNTYPE is 0. */
6670 void
6671 init_cumulative_args (struct sparc_args *cum, tree fntype, rtx, tree)
6673 cum->words = 0;
6674 cum->prototype_p = fntype && prototype_p (fntype);
6675 cum->libcall_p = !fntype;
6678 /* Handle promotion of pointer and integer arguments. */
6680 static machine_mode
6681 sparc_promote_function_mode (const_tree type, machine_mode mode,
6682 int *punsignedp, const_tree, int)
6684 if (type && POINTER_TYPE_P (type))
6686 *punsignedp = POINTERS_EXTEND_UNSIGNED;
6687 return Pmode;
6690 /* Integral arguments are passed as full words, as per the ABI. */
6691 if (GET_MODE_CLASS (mode) == MODE_INT
6692 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6693 return word_mode;
6695 return mode;
6698 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
6700 static bool
6701 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
6703 return TARGET_ARCH64 ? true : false;
6706 /* Traverse the record TYPE recursively and call FUNC on its fields.
6707 NAMED is true if this is for a named parameter. DATA is passed
6708 to FUNC for each field. OFFSET is the starting position and
6709 PACKED is true if we are inside a packed record. */
6711 template <typename T, void Func (const_tree, HOST_WIDE_INT, bool, T*)>
6712 static void
6713 traverse_record_type (const_tree type, bool named, T *data,
6714 HOST_WIDE_INT offset = 0, bool packed = false)
6716 /* The ABI obviously doesn't specify how packed structures are passed.
6717 These are passed in integer regs if possible, otherwise memory. */
6718 if (!packed)
6719 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6720 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
6722 packed = true;
6723 break;
6726 /* Walk the real fields, but skip those with no size or a zero size.
6727 ??? Fields with variable offset are handled as having zero offset. */
6728 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6729 if (TREE_CODE (field) == FIELD_DECL)
6731 if (!DECL_SIZE (field) || integer_zerop (DECL_SIZE (field)))
6732 continue;
6734 HOST_WIDE_INT bitpos = offset;
6735 if (TREE_CODE (DECL_FIELD_OFFSET (field)) == INTEGER_CST)
6736 bitpos += int_bit_position (field);
6738 tree field_type = TREE_TYPE (field);
6739 if (TREE_CODE (field_type) == RECORD_TYPE)
6740 traverse_record_type<T, Func> (field_type, named, data, bitpos,
6741 packed);
6742 else
6744 const bool fp_type
6745 = FLOAT_TYPE_P (field_type) || VECTOR_TYPE_P (field_type);
6746 Func (field, bitpos, fp_type && named && !packed && TARGET_FPU,
6747 data);
6752 /* Handle recursive register classifying for structure layout. */
6754 typedef struct
6756 bool fp_regs; /* true if field eligible to FP registers. */
6757 bool fp_regs_in_first_word; /* true if such field in first word. */
6758 } classify_data_t;
6760 /* A subroutine of function_arg_slotno. Classify the field. */
6762 inline void
6763 classify_registers (const_tree, HOST_WIDE_INT bitpos, bool fp,
6764 classify_data_t *data)
6766 if (fp)
6768 data->fp_regs = true;
6769 if (bitpos < BITS_PER_WORD)
6770 data->fp_regs_in_first_word = true;
6774 /* Compute the slot number to pass an argument in.
6775 Return the slot number or -1 if passing on the stack.
6777 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6778 the preceding args and about the function being called.
6779 MODE is the argument's machine mode.
6780 TYPE is the data type of the argument (as a tree).
6781 This is null for libcalls where that information may
6782 not be available.
6783 NAMED is nonzero if this argument is a named parameter
6784 (otherwise it is an extra parameter matching an ellipsis).
6785 INCOMING is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
6786 *PREGNO records the register number to use if scalar type.
6787 *PPADDING records the amount of padding needed in words. */
6789 static int
6790 function_arg_slotno (const struct sparc_args *cum, machine_mode mode,
6791 const_tree type, bool named, bool incoming,
6792 int *pregno, int *ppadding)
6794 int regbase = (incoming
6795 ? SPARC_INCOMING_INT_ARG_FIRST
6796 : SPARC_OUTGOING_INT_ARG_FIRST);
6797 int slotno = cum->words;
6798 enum mode_class mclass;
6799 int regno;
6801 *ppadding = 0;
6803 if (type && TREE_ADDRESSABLE (type))
6804 return -1;
6806 if (TARGET_ARCH32
6807 && mode == BLKmode
6808 && type
6809 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
6810 return -1;
6812 /* For SPARC64, objects requiring 16-byte alignment get it. */
6813 if (TARGET_ARCH64
6814 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
6815 && (slotno & 1) != 0)
6816 slotno++, *ppadding = 1;
6818 mclass = GET_MODE_CLASS (mode);
6819 if (type && TREE_CODE (type) == VECTOR_TYPE)
6821 /* Vector types deserve special treatment because they are
6822 polymorphic wrt their mode, depending upon whether VIS
6823 instructions are enabled. */
6824 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
6826 /* The SPARC port defines no floating-point vector modes. */
6827 gcc_assert (mode == BLKmode);
6829 else
6831 /* Integral vector types should either have a vector
6832 mode or an integral mode, because we are guaranteed
6833 by pass_by_reference that their size is not greater
6834 than 16 bytes and TImode is 16-byte wide. */
6835 gcc_assert (mode != BLKmode);
6837 /* Vector integers are handled like floats according to
6838 the Sun VIS SDK. */
6839 mclass = MODE_FLOAT;
6843 switch (mclass)
6845 case MODE_FLOAT:
6846 case MODE_COMPLEX_FLOAT:
6847 case MODE_VECTOR_INT:
6848 if (TARGET_ARCH64 && TARGET_FPU && named)
6850 /* If all arg slots are filled, then must pass on stack. */
6851 if (slotno >= SPARC_FP_ARG_MAX)
6852 return -1;
6854 regno = SPARC_FP_ARG_FIRST + slotno * 2;
6855 /* Arguments filling only one single FP register are
6856 right-justified in the outer double FP register. */
6857 if (GET_MODE_SIZE (mode) <= 4)
6858 regno++;
6859 break;
6861 /* fallthrough */
6863 case MODE_INT:
6864 case MODE_COMPLEX_INT:
6865 /* If all arg slots are filled, then must pass on stack. */
6866 if (slotno >= SPARC_INT_ARG_MAX)
6867 return -1;
6869 regno = regbase + slotno;
6870 break;
6872 case MODE_RANDOM:
6873 if (mode == VOIDmode)
6874 /* MODE is VOIDmode when generating the actual call. */
6875 return -1;
6877 gcc_assert (mode == BLKmode);
6879 if (TARGET_ARCH32
6880 || !type
6881 || (TREE_CODE (type) != RECORD_TYPE
6882 && TREE_CODE (type) != VECTOR_TYPE))
6884 /* If all arg slots are filled, then must pass on stack. */
6885 if (slotno >= SPARC_INT_ARG_MAX)
6886 return -1;
6888 regno = regbase + slotno;
6890 else /* TARGET_ARCH64 && type */
6892 /* If all arg slots are filled, then must pass on stack. */
6893 if (slotno >= SPARC_FP_ARG_MAX)
6894 return -1;
6896 if (TREE_CODE (type) == RECORD_TYPE)
6898 classify_data_t data = { false, false };
6899 traverse_record_type<classify_data_t, classify_registers>
6900 (type, named, &data);
6902 if (data.fp_regs)
6904 /* If all FP slots are filled except for the last one and
6905 there is no FP field in the first word, then must pass
6906 on stack. */
6907 if (slotno >= SPARC_FP_ARG_MAX - 1
6908 && !data.fp_regs_in_first_word)
6909 return -1;
6911 else
6913 /* If all int slots are filled, then must pass on stack. */
6914 if (slotno >= SPARC_INT_ARG_MAX)
6915 return -1;
6919 /* PREGNO isn't set since both int and FP regs can be used. */
6920 return slotno;
6922 break;
6924 default :
6925 gcc_unreachable ();
6928 *pregno = regno;
6929 return slotno;
6932 /* Handle recursive register counting/assigning for structure layout. */
6934 typedef struct
6936 int slotno; /* slot number of the argument. */
6937 int regbase; /* regno of the base register. */
6938 int intoffset; /* offset of the first pending integer field. */
6939 int nregs; /* number of words passed in registers. */
6940 bool stack; /* true if part of the argument is on the stack. */
6941 rtx ret; /* return expression being built. */
6942 } assign_data_t;
6944 /* A subroutine of function_arg_record_value. Compute the number of integer
6945 registers to be assigned between PARMS->intoffset and BITPOS. Return
6946 true if at least one integer register is assigned or false otherwise. */
6948 static bool
6949 compute_int_layout (HOST_WIDE_INT bitpos, assign_data_t *data, int *pnregs)
6951 if (data->intoffset < 0)
6952 return false;
6954 const int intoffset = data->intoffset;
6955 data->intoffset = -1;
6957 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
6958 const unsigned int startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
6959 const unsigned int endbit = ROUND_UP (bitpos, BITS_PER_WORD);
6960 int nregs = (endbit - startbit) / BITS_PER_WORD;
6962 if (nregs > 0 && nregs > SPARC_INT_ARG_MAX - this_slotno)
6964 nregs = SPARC_INT_ARG_MAX - this_slotno;
6966 /* We need to pass this field (partly) on the stack. */
6967 data->stack = 1;
6970 if (nregs <= 0)
6971 return false;
6973 *pnregs = nregs;
6974 return true;
6977 /* A subroutine of function_arg_record_value. Compute the number and the mode
6978 of the FP registers to be assigned for FIELD. Return true if at least one
6979 FP register is assigned or false otherwise. */
6981 static bool
6982 compute_fp_layout (const_tree field, HOST_WIDE_INT bitpos,
6983 assign_data_t *data,
6984 int *pnregs, machine_mode *pmode)
6986 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
6987 machine_mode mode = DECL_MODE (field);
6988 int nregs, nslots;
6990 /* Slots are counted as words while regs are counted as having the size of
6991 the (inner) mode. */
6992 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE && mode == BLKmode)
6994 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6995 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
6997 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
6999 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
7000 nregs = 2;
7002 else
7003 nregs = 1;
7005 nslots = CEIL_NWORDS (nregs * GET_MODE_SIZE (mode));
7007 if (nslots > SPARC_FP_ARG_MAX - this_slotno)
7009 nslots = SPARC_FP_ARG_MAX - this_slotno;
7010 nregs = (nslots * UNITS_PER_WORD) / GET_MODE_SIZE (mode);
7012 /* We need to pass this field (partly) on the stack. */
7013 data->stack = 1;
7015 if (nregs <= 0)
7016 return false;
7019 *pnregs = nregs;
7020 *pmode = mode;
7021 return true;
7024 /* A subroutine of function_arg_record_value. Count the number of registers
7025 to be assigned for FIELD and between PARMS->intoffset and BITPOS. */
7027 inline void
7028 count_registers (const_tree field, HOST_WIDE_INT bitpos, bool fp,
7029 assign_data_t *data)
7031 if (fp)
7033 int nregs;
7034 machine_mode mode;
7036 if (compute_int_layout (bitpos, data, &nregs))
7037 data->nregs += nregs;
7039 if (compute_fp_layout (field, bitpos, data, &nregs, &mode))
7040 data->nregs += nregs;
7042 else
7044 if (data->intoffset < 0)
7045 data->intoffset = bitpos;
7049 /* A subroutine of function_arg_record_value. Assign the bits of the
7050 structure between PARMS->intoffset and BITPOS to integer registers. */
7052 static void
7053 assign_int_registers (HOST_WIDE_INT bitpos, assign_data_t *data)
7055 int intoffset = data->intoffset;
7056 machine_mode mode;
7057 int nregs;
7059 if (!compute_int_layout (bitpos, data, &nregs))
7060 return;
7062 /* If this is the trailing part of a word, only load that much into
7063 the register. Otherwise load the whole register. Note that in
7064 the latter case we may pick up unwanted bits. It's not a problem
7065 at the moment but may wish to revisit. */
7066 if (intoffset % BITS_PER_WORD != 0)
7067 mode = smallest_int_mode_for_size (BITS_PER_WORD
7068 - intoffset % BITS_PER_WORD);
7069 else
7070 mode = word_mode;
7072 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
7073 unsigned int regno = data->regbase + this_slotno;
7074 intoffset /= BITS_PER_UNIT;
7078 rtx reg = gen_rtx_REG (mode, regno);
7079 XVECEXP (data->ret, 0, data->stack + data->nregs)
7080 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
7081 data->nregs += 1;
7082 mode = word_mode;
7083 regno += 1;
7084 intoffset = (intoffset | (UNITS_PER_WORD - 1)) + 1;
7086 while (--nregs > 0);
7089 /* A subroutine of function_arg_record_value. Assign FIELD at position
7090 BITPOS to FP registers. */
7092 static void
7093 assign_fp_registers (const_tree field, HOST_WIDE_INT bitpos,
7094 assign_data_t *data)
7096 int nregs;
7097 machine_mode mode;
7099 if (!compute_fp_layout (field, bitpos, data, &nregs, &mode))
7100 return;
7102 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
7103 int regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
7104 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
7105 regno++;
7106 int pos = bitpos / BITS_PER_UNIT;
7110 rtx reg = gen_rtx_REG (mode, regno);
7111 XVECEXP (data->ret, 0, data->stack + data->nregs)
7112 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
7113 data->nregs += 1;
7114 regno += GET_MODE_SIZE (mode) / 4;
7115 pos += GET_MODE_SIZE (mode);
7117 while (--nregs > 0);
7120 /* A subroutine of function_arg_record_value. Assign FIELD and the bits of
7121 the structure between PARMS->intoffset and BITPOS to registers. */
7123 inline void
7124 assign_registers (const_tree field, HOST_WIDE_INT bitpos, bool fp,
7125 assign_data_t *data)
7127 if (fp)
7129 assign_int_registers (bitpos, data);
7131 assign_fp_registers (field, bitpos, data);
7133 else
7135 if (data->intoffset < 0)
7136 data->intoffset = bitpos;
7140 /* Used by function_arg and sparc_function_value_1 to implement the complex
7141 conventions of the 64-bit ABI for passing and returning structures.
7142 Return an expression valid as a return value for the FUNCTION_ARG
7143 and TARGET_FUNCTION_VALUE.
7145 TYPE is the data type of the argument (as a tree).
7146 This is null for libcalls where that information may
7147 not be available.
7148 MODE is the argument's machine mode.
7149 SLOTNO is the index number of the argument's slot in the parameter array.
7150 NAMED is true if this argument is a named parameter
7151 (otherwise it is an extra parameter matching an ellipsis).
7152 REGBASE is the regno of the base register for the parameter array. */
7154 static rtx
7155 function_arg_record_value (const_tree type, machine_mode mode,
7156 int slotno, bool named, int regbase)
7158 HOST_WIDE_INT typesize = int_size_in_bytes (type);
7159 assign_data_t data;
7160 int nregs;
7162 data.slotno = slotno;
7163 data.regbase = regbase;
7165 /* Count how many registers we need. */
7166 data.nregs = 0;
7167 data.intoffset = 0;
7168 data.stack = false;
7169 traverse_record_type<assign_data_t, count_registers> (type, named, &data);
7171 /* Take into account pending integer fields. */
7172 if (compute_int_layout (typesize * BITS_PER_UNIT, &data, &nregs))
7173 data.nregs += nregs;
7175 /* Allocate the vector and handle some annoying special cases. */
7176 nregs = data.nregs;
7178 if (nregs == 0)
7180 /* ??? Empty structure has no value? Duh? */
7181 if (typesize <= 0)
7183 /* Though there's nothing really to store, return a word register
7184 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
7185 leads to breakage due to the fact that there are zero bytes to
7186 load. */
7187 return gen_rtx_REG (mode, regbase);
7190 /* ??? C++ has structures with no fields, and yet a size. Give up
7191 for now and pass everything back in integer registers. */
7192 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7193 if (nregs + slotno > SPARC_INT_ARG_MAX)
7194 nregs = SPARC_INT_ARG_MAX - slotno;
7197 gcc_assert (nregs > 0);
7199 data.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (data.stack + nregs));
7201 /* If at least one field must be passed on the stack, generate
7202 (parallel [(expr_list (nil) ...) ...]) so that all fields will
7203 also be passed on the stack. We can't do much better because the
7204 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
7205 of structures for which the fields passed exclusively in registers
7206 are not at the beginning of the structure. */
7207 if (data.stack)
7208 XVECEXP (data.ret, 0, 0)
7209 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7211 /* Assign the registers. */
7212 data.nregs = 0;
7213 data.intoffset = 0;
7214 traverse_record_type<assign_data_t, assign_registers> (type, named, &data);
7216 /* Assign pending integer fields. */
7217 assign_int_registers (typesize * BITS_PER_UNIT, &data);
7219 gcc_assert (data.nregs == nregs);
7221 return data.ret;
7224 /* Used by function_arg and sparc_function_value_1 to implement the conventions
7225 of the 64-bit ABI for passing and returning unions.
7226 Return an expression valid as a return value for the FUNCTION_ARG
7227 and TARGET_FUNCTION_VALUE.
7229 SIZE is the size in bytes of the union.
7230 MODE is the argument's machine mode.
7231 REGNO is the hard register the union will be passed in. */
7233 static rtx
7234 function_arg_union_value (int size, machine_mode mode, int slotno,
7235 int regno)
7237 int nwords = CEIL_NWORDS (size), i;
7238 rtx regs;
7240 /* See comment in previous function for empty structures. */
7241 if (nwords == 0)
7242 return gen_rtx_REG (mode, regno);
7244 if (slotno == SPARC_INT_ARG_MAX - 1)
7245 nwords = 1;
7247 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
7249 for (i = 0; i < nwords; i++)
7251 /* Unions are passed left-justified. */
7252 XVECEXP (regs, 0, i)
7253 = gen_rtx_EXPR_LIST (VOIDmode,
7254 gen_rtx_REG (word_mode, regno),
7255 GEN_INT (UNITS_PER_WORD * i));
7256 regno++;
7259 return regs;
7262 /* Used by function_arg and sparc_function_value_1 to implement the conventions
7263 for passing and returning BLKmode vectors.
7264 Return an expression valid as a return value for the FUNCTION_ARG
7265 and TARGET_FUNCTION_VALUE.
7267 SIZE is the size in bytes of the vector.
7268 REGNO is the FP hard register the vector will be passed in. */
7270 static rtx
7271 function_arg_vector_value (int size, int regno)
7273 const int nregs = MAX (1, size / 8);
7274 rtx regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
7276 if (size < 8)
7277 XVECEXP (regs, 0, 0)
7278 = gen_rtx_EXPR_LIST (VOIDmode,
7279 gen_rtx_REG (SImode, regno),
7280 const0_rtx);
7281 else
7282 for (int i = 0; i < nregs; i++)
7283 XVECEXP (regs, 0, i)
7284 = gen_rtx_EXPR_LIST (VOIDmode,
7285 gen_rtx_REG (DImode, regno + 2*i),
7286 GEN_INT (i*8));
7288 return regs;
7291 /* Determine where to put an argument to a function.
7292 Value is zero to push the argument on the stack,
7293 or a hard register in which to store the argument.
7295 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7296 the preceding args and about the function being called.
7297 MODE is the argument's machine mode.
7298 TYPE is the data type of the argument (as a tree).
7299 This is null for libcalls where that information may
7300 not be available.
7301 NAMED is true if this argument is a named parameter
7302 (otherwise it is an extra parameter matching an ellipsis).
7303 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
7304 TARGET_FUNCTION_INCOMING_ARG. */
7306 static rtx
7307 sparc_function_arg_1 (cumulative_args_t cum_v, machine_mode mode,
7308 const_tree type, bool named, bool incoming)
7310 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7312 int regbase = (incoming
7313 ? SPARC_INCOMING_INT_ARG_FIRST
7314 : SPARC_OUTGOING_INT_ARG_FIRST);
7315 int slotno, regno, padding;
7316 enum mode_class mclass = GET_MODE_CLASS (mode);
7318 slotno = function_arg_slotno (cum, mode, type, named, incoming,
7319 &regno, &padding);
7320 if (slotno == -1)
7321 return 0;
7323 /* Vector types deserve special treatment because they are polymorphic wrt
7324 their mode, depending upon whether VIS instructions are enabled. */
7325 if (type && TREE_CODE (type) == VECTOR_TYPE)
7327 HOST_WIDE_INT size = int_size_in_bytes (type);
7328 gcc_assert ((TARGET_ARCH32 && size <= 8)
7329 || (TARGET_ARCH64 && size <= 16));
7331 if (mode == BLKmode)
7332 return function_arg_vector_value (size, SPARC_FP_ARG_FIRST + 2*slotno);
7334 mclass = MODE_FLOAT;
7337 if (TARGET_ARCH32)
7338 return gen_rtx_REG (mode, regno);
7340 /* Structures up to 16 bytes in size are passed in arg slots on the stack
7341 and are promoted to registers if possible. */
7342 if (type && TREE_CODE (type) == RECORD_TYPE)
7344 HOST_WIDE_INT size = int_size_in_bytes (type);
7345 gcc_assert (size <= 16);
7347 return function_arg_record_value (type, mode, slotno, named, regbase);
7350 /* Unions up to 16 bytes in size are passed in integer registers. */
7351 else if (type && TREE_CODE (type) == UNION_TYPE)
7353 HOST_WIDE_INT size = int_size_in_bytes (type);
7354 gcc_assert (size <= 16);
7356 return function_arg_union_value (size, mode, slotno, regno);
7359 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
7360 but also have the slot allocated for them.
7361 If no prototype is in scope fp values in register slots get passed
7362 in two places, either fp regs and int regs or fp regs and memory. */
7363 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7364 && SPARC_FP_REG_P (regno))
7366 rtx reg = gen_rtx_REG (mode, regno);
7367 if (cum->prototype_p || cum->libcall_p)
7368 return reg;
7369 else
7371 rtx v0, v1;
7373 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
7375 int intreg;
7377 /* On incoming, we don't need to know that the value
7378 is passed in %f0 and %i0, and it confuses other parts
7379 causing needless spillage even on the simplest cases. */
7380 if (incoming)
7381 return reg;
7383 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
7384 + (regno - SPARC_FP_ARG_FIRST) / 2);
7386 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7387 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
7388 const0_rtx);
7389 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7391 else
7393 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7394 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7395 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7400 /* All other aggregate types are passed in an integer register in a mode
7401 corresponding to the size of the type. */
7402 else if (type && AGGREGATE_TYPE_P (type))
7404 HOST_WIDE_INT size = int_size_in_bytes (type);
7405 gcc_assert (size <= 16);
7407 mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
7410 return gen_rtx_REG (mode, regno);
7413 /* Handle the TARGET_FUNCTION_ARG target hook. */
7415 static rtx
7416 sparc_function_arg (cumulative_args_t cum, machine_mode mode,
7417 const_tree type, bool named)
7419 return sparc_function_arg_1 (cum, mode, type, named, false);
7422 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
7424 static rtx
7425 sparc_function_incoming_arg (cumulative_args_t cum, machine_mode mode,
7426 const_tree type, bool named)
7428 return sparc_function_arg_1 (cum, mode, type, named, true);
7431 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
7433 static unsigned int
7434 sparc_function_arg_boundary (machine_mode mode, const_tree type)
7436 return ((TARGET_ARCH64
7437 && (GET_MODE_ALIGNMENT (mode) == 128
7438 || (type && TYPE_ALIGN (type) == 128)))
7439 ? 128
7440 : PARM_BOUNDARY);
7443 /* For an arg passed partly in registers and partly in memory,
7444 this is the number of bytes of registers used.
7445 For args passed entirely in registers or entirely in memory, zero.
7447 Any arg that starts in the first 6 regs but won't entirely fit in them
7448 needs partial registers on v8. On v9, structures with integer
7449 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
7450 values that begin in the last fp reg [where "last fp reg" varies with the
7451 mode] will be split between that reg and memory. */
7453 static int
7454 sparc_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
7455 tree type, bool named)
7457 int slotno, regno, padding;
7459 /* We pass false for incoming here, it doesn't matter. */
7460 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
7461 false, &regno, &padding);
7463 if (slotno == -1)
7464 return 0;
7466 if (TARGET_ARCH32)
7468 if ((slotno + (mode == BLKmode
7469 ? CEIL_NWORDS (int_size_in_bytes (type))
7470 : CEIL_NWORDS (GET_MODE_SIZE (mode))))
7471 > SPARC_INT_ARG_MAX)
7472 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
7474 else
7476 /* We are guaranteed by pass_by_reference that the size of the
7477 argument is not greater than 16 bytes, so we only need to return
7478 one word if the argument is partially passed in registers. */
7480 if (type && AGGREGATE_TYPE_P (type))
7482 int size = int_size_in_bytes (type);
7484 if (size > UNITS_PER_WORD
7485 && (slotno == SPARC_INT_ARG_MAX - 1
7486 || slotno == SPARC_FP_ARG_MAX - 1))
7487 return UNITS_PER_WORD;
7489 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
7490 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7491 && ! (TARGET_FPU && named)))
7493 /* The complex types are passed as packed types. */
7494 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
7495 && slotno == SPARC_INT_ARG_MAX - 1)
7496 return UNITS_PER_WORD;
7498 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7500 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
7501 > SPARC_FP_ARG_MAX)
7502 return UNITS_PER_WORD;
7506 return 0;
7509 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
7510 Specify whether to pass the argument by reference. */
7512 static bool
7513 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
7514 machine_mode mode, const_tree type,
7515 bool named ATTRIBUTE_UNUSED)
7517 if (TARGET_ARCH32)
7518 /* Original SPARC 32-bit ABI says that structures and unions,
7519 and quad-precision floats are passed by reference. For Pascal,
7520 also pass arrays by reference. All other base types are passed
7521 in registers.
7523 Extended ABI (as implemented by the Sun compiler) says that all
7524 complex floats are passed by reference. Pass complex integers
7525 in registers up to 8 bytes. More generally, enforce the 2-word
7526 cap for passing arguments in registers.
7528 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7529 integers are passed like floats of the same size, that is in
7530 registers up to 8 bytes. Pass all vector floats by reference
7531 like structure and unions. */
7532 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
7533 || mode == SCmode
7534 /* Catch CDImode, TFmode, DCmode and TCmode. */
7535 || GET_MODE_SIZE (mode) > 8
7536 || (type
7537 && TREE_CODE (type) == VECTOR_TYPE
7538 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
7539 else
7540 /* Original SPARC 64-bit ABI says that structures and unions
7541 smaller than 16 bytes are passed in registers, as well as
7542 all other base types.
7544 Extended ABI (as implemented by the Sun compiler) says that
7545 complex floats are passed in registers up to 16 bytes. Pass
7546 all complex integers in registers up to 16 bytes. More generally,
7547 enforce the 2-word cap for passing arguments in registers.
7549 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7550 integers are passed like floats of the same size, that is in
7551 registers (up to 16 bytes). Pass all vector floats like structure
7552 and unions. */
7553 return ((type
7554 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
7555 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
7556 /* Catch CTImode and TCmode. */
7557 || GET_MODE_SIZE (mode) > 16);
7560 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
7561 Update the data in CUM to advance over an argument
7562 of mode MODE and data type TYPE.
7563 TYPE is null for libcalls where that information may not be available. */
7565 static void
7566 sparc_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
7567 const_tree type, bool named)
7569 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7570 int regno, padding;
7572 /* We pass false for incoming here, it doesn't matter. */
7573 function_arg_slotno (cum, mode, type, named, false, &regno, &padding);
7575 /* If argument requires leading padding, add it. */
7576 cum->words += padding;
7578 if (TARGET_ARCH32)
7579 cum->words += (mode == BLKmode
7580 ? CEIL_NWORDS (int_size_in_bytes (type))
7581 : CEIL_NWORDS (GET_MODE_SIZE (mode)));
7582 else
7584 if (type && AGGREGATE_TYPE_P (type))
7586 int size = int_size_in_bytes (type);
7588 if (size <= 8)
7589 ++cum->words;
7590 else if (size <= 16)
7591 cum->words += 2;
7592 else /* passed by reference */
7593 ++cum->words;
7595 else
7596 cum->words += (mode == BLKmode
7597 ? CEIL_NWORDS (int_size_in_bytes (type))
7598 : CEIL_NWORDS (GET_MODE_SIZE (mode)));
7602 /* Implement TARGET_FUNCTION_ARG_PADDING. For the 64-bit ABI structs
7603 are always stored left shifted in their argument slot. */
7605 static pad_direction
7606 sparc_function_arg_padding (machine_mode mode, const_tree type)
7608 if (TARGET_ARCH64 && type && AGGREGATE_TYPE_P (type))
7609 return PAD_UPWARD;
7611 /* Fall back to the default. */
7612 return default_function_arg_padding (mode, type);
7615 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
7616 Specify whether to return the return value in memory. */
7618 static bool
7619 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7621 if (TARGET_ARCH32)
7622 /* Original SPARC 32-bit ABI says that structures and unions,
7623 and quad-precision floats are returned in memory. All other
7624 base types are returned in registers.
7626 Extended ABI (as implemented by the Sun compiler) says that
7627 all complex floats are returned in registers (8 FP registers
7628 at most for '_Complex long double'). Return all complex integers
7629 in registers (4 at most for '_Complex long long').
7631 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7632 integers are returned like floats of the same size, that is in
7633 registers up to 8 bytes and in memory otherwise. Return all
7634 vector floats in memory like structure and unions; note that
7635 they always have BLKmode like the latter. */
7636 return (TYPE_MODE (type) == BLKmode
7637 || TYPE_MODE (type) == TFmode
7638 || (TREE_CODE (type) == VECTOR_TYPE
7639 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
7640 else
7641 /* Original SPARC 64-bit ABI says that structures and unions
7642 smaller than 32 bytes are returned in registers, as well as
7643 all other base types.
7645 Extended ABI (as implemented by the Sun compiler) says that all
7646 complex floats are returned in registers (8 FP registers at most
7647 for '_Complex long double'). Return all complex integers in
7648 registers (4 at most for '_Complex TItype').
7650 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7651 integers are returned like floats of the same size, that is in
7652 registers. Return all vector floats like structure and unions;
7653 note that they always have BLKmode like the latter. */
7654 return (TYPE_MODE (type) == BLKmode
7655 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
7658 /* Handle the TARGET_STRUCT_VALUE target hook.
7659 Return where to find the structure return value address. */
7661 static rtx
7662 sparc_struct_value_rtx (tree fndecl, int incoming)
7664 if (TARGET_ARCH64)
7665 return 0;
7666 else
7668 rtx mem;
7670 if (incoming)
7671 mem = gen_frame_mem (Pmode, plus_constant (Pmode, frame_pointer_rtx,
7672 STRUCT_VALUE_OFFSET));
7673 else
7674 mem = gen_frame_mem (Pmode, plus_constant (Pmode, stack_pointer_rtx,
7675 STRUCT_VALUE_OFFSET));
7677 /* Only follow the SPARC ABI for fixed-size structure returns.
7678 Variable size structure returns are handled per the normal
7679 procedures in GCC. This is enabled by -mstd-struct-return */
7680 if (incoming == 2
7681 && sparc_std_struct_return
7682 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
7683 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
7685 /* We must check and adjust the return address, as it is optional
7686 as to whether the return object is really provided. */
7687 rtx ret_reg = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
7688 rtx scratch = gen_reg_rtx (SImode);
7689 rtx_code_label *endlab = gen_label_rtx ();
7691 /* Calculate the return object size. */
7692 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
7693 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
7694 /* Construct a temporary return value. */
7695 rtx temp_val
7696 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
7698 /* Implement SPARC 32-bit psABI callee return struct checking:
7700 Fetch the instruction where we will return to and see if
7701 it's an unimp instruction (the most significant 10 bits
7702 will be zero). */
7703 emit_move_insn (scratch, gen_rtx_MEM (SImode,
7704 plus_constant (Pmode,
7705 ret_reg, 8)));
7706 /* Assume the size is valid and pre-adjust. */
7707 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
7708 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
7709 0, endlab);
7710 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
7711 /* Write the address of the memory pointed to by temp_val into
7712 the memory pointed to by mem. */
7713 emit_move_insn (mem, XEXP (temp_val, 0));
7714 emit_label (endlab);
7717 return mem;
7721 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
7722 For v9, function return values are subject to the same rules as arguments,
7723 except that up to 32 bytes may be returned in registers. */
7725 static rtx
7726 sparc_function_value_1 (const_tree type, machine_mode mode,
7727 bool outgoing)
7729 /* Beware that the two values are swapped here wrt function_arg. */
7730 int regbase = (outgoing
7731 ? SPARC_INCOMING_INT_ARG_FIRST
7732 : SPARC_OUTGOING_INT_ARG_FIRST);
7733 enum mode_class mclass = GET_MODE_CLASS (mode);
7734 int regno;
7736 /* Vector types deserve special treatment because they are polymorphic wrt
7737 their mode, depending upon whether VIS instructions are enabled. */
7738 if (type && TREE_CODE (type) == VECTOR_TYPE)
7740 HOST_WIDE_INT size = int_size_in_bytes (type);
7741 gcc_assert ((TARGET_ARCH32 && size <= 8)
7742 || (TARGET_ARCH64 && size <= 32));
7744 if (mode == BLKmode)
7745 return function_arg_vector_value (size, SPARC_FP_ARG_FIRST);
7747 mclass = MODE_FLOAT;
7750 if (TARGET_ARCH64 && type)
7752 /* Structures up to 32 bytes in size are returned in registers. */
7753 if (TREE_CODE (type) == RECORD_TYPE)
7755 HOST_WIDE_INT size = int_size_in_bytes (type);
7756 gcc_assert (size <= 32);
7758 return function_arg_record_value (type, mode, 0, 1, regbase);
7761 /* Unions up to 32 bytes in size are returned in integer registers. */
7762 else if (TREE_CODE (type) == UNION_TYPE)
7764 HOST_WIDE_INT size = int_size_in_bytes (type);
7765 gcc_assert (size <= 32);
7767 return function_arg_union_value (size, mode, 0, regbase);
7770 /* Objects that require it are returned in FP registers. */
7771 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7774 /* All other aggregate types are returned in an integer register in a
7775 mode corresponding to the size of the type. */
7776 else if (AGGREGATE_TYPE_P (type))
7778 /* All other aggregate types are passed in an integer register
7779 in a mode corresponding to the size of the type. */
7780 HOST_WIDE_INT size = int_size_in_bytes (type);
7781 gcc_assert (size <= 32);
7783 mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
7785 /* ??? We probably should have made the same ABI change in
7786 3.4.0 as the one we made for unions. The latter was
7787 required by the SCD though, while the former is not
7788 specified, so we favored compatibility and efficiency.
7790 Now we're stuck for aggregates larger than 16 bytes,
7791 because OImode vanished in the meantime. Let's not
7792 try to be unduly clever, and simply follow the ABI
7793 for unions in that case. */
7794 if (mode == BLKmode)
7795 return function_arg_union_value (size, mode, 0, regbase);
7796 else
7797 mclass = MODE_INT;
7800 /* We should only have pointer and integer types at this point. This
7801 must match sparc_promote_function_mode. */
7802 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7803 mode = word_mode;
7806 /* We should only have pointer and integer types at this point, except with
7807 -freg-struct-return. This must match sparc_promote_function_mode. */
7808 else if (TARGET_ARCH32
7809 && !(type && AGGREGATE_TYPE_P (type))
7810 && mclass == MODE_INT
7811 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7812 mode = word_mode;
7814 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
7815 regno = SPARC_FP_ARG_FIRST;
7816 else
7817 regno = regbase;
7819 return gen_rtx_REG (mode, regno);
7822 /* Handle TARGET_FUNCTION_VALUE.
7823 On the SPARC, the value is found in the first "output" register, but the
7824 called function leaves it in the first "input" register. */
7826 static rtx
7827 sparc_function_value (const_tree valtype,
7828 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
7829 bool outgoing)
7831 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
7834 /* Handle TARGET_LIBCALL_VALUE. */
7836 static rtx
7837 sparc_libcall_value (machine_mode mode,
7838 const_rtx fun ATTRIBUTE_UNUSED)
7840 return sparc_function_value_1 (NULL_TREE, mode, false);
7843 /* Handle FUNCTION_VALUE_REGNO_P.
7844 On the SPARC, the first "output" reg is used for integer values, and the
7845 first floating point register is used for floating point values. */
7847 static bool
7848 sparc_function_value_regno_p (const unsigned int regno)
7850 return (regno == 8 || (TARGET_FPU && regno == 32));
7853 /* Do what is necessary for `va_start'. We look at the current function
7854 to determine if stdarg or varargs is used and return the address of
7855 the first unnamed parameter. */
7857 static rtx
7858 sparc_builtin_saveregs (void)
7860 int first_reg = crtl->args.info.words;
7861 rtx address;
7862 int regno;
7864 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
7865 emit_move_insn (gen_rtx_MEM (word_mode,
7866 gen_rtx_PLUS (Pmode,
7867 frame_pointer_rtx,
7868 GEN_INT (FIRST_PARM_OFFSET (0)
7869 + (UNITS_PER_WORD
7870 * regno)))),
7871 gen_rtx_REG (word_mode,
7872 SPARC_INCOMING_INT_ARG_FIRST + regno));
7874 address = gen_rtx_PLUS (Pmode,
7875 frame_pointer_rtx,
7876 GEN_INT (FIRST_PARM_OFFSET (0)
7877 + UNITS_PER_WORD * first_reg));
7879 return address;
7882 /* Implement `va_start' for stdarg. */
7884 static void
7885 sparc_va_start (tree valist, rtx nextarg)
7887 nextarg = expand_builtin_saveregs ();
7888 std_expand_builtin_va_start (valist, nextarg);
7891 /* Implement `va_arg' for stdarg. */
7893 static tree
7894 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7895 gimple_seq *post_p)
7897 HOST_WIDE_INT size, rsize, align;
7898 tree addr, incr;
7899 bool indirect;
7900 tree ptrtype = build_pointer_type (type);
7902 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
7904 indirect = true;
7905 size = rsize = UNITS_PER_WORD;
7906 align = 0;
7908 else
7910 indirect = false;
7911 size = int_size_in_bytes (type);
7912 rsize = ROUND_UP (size, UNITS_PER_WORD);
7913 align = 0;
7915 if (TARGET_ARCH64)
7917 /* For SPARC64, objects requiring 16-byte alignment get it. */
7918 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
7919 align = 2 * UNITS_PER_WORD;
7921 /* SPARC-V9 ABI states that structures up to 16 bytes in size
7922 are left-justified in their slots. */
7923 if (AGGREGATE_TYPE_P (type))
7925 if (size == 0)
7926 size = rsize = UNITS_PER_WORD;
7927 else
7928 size = rsize;
7933 incr = valist;
7934 if (align)
7936 incr = fold_build_pointer_plus_hwi (incr, align - 1);
7937 incr = fold_convert (sizetype, incr);
7938 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
7939 size_int (-align));
7940 incr = fold_convert (ptr_type_node, incr);
7943 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
7944 addr = incr;
7946 if (BYTES_BIG_ENDIAN && size < rsize)
7947 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
7949 if (indirect)
7951 addr = fold_convert (build_pointer_type (ptrtype), addr);
7952 addr = build_va_arg_indirect_ref (addr);
7955 /* If the address isn't aligned properly for the type, we need a temporary.
7956 FIXME: This is inefficient, usually we can do this in registers. */
7957 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
7959 tree tmp = create_tmp_var (type, "va_arg_tmp");
7960 tree dest_addr = build_fold_addr_expr (tmp);
7961 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
7962 3, dest_addr, addr, size_int (rsize));
7963 TREE_ADDRESSABLE (tmp) = 1;
7964 gimplify_and_add (copy, pre_p);
7965 addr = dest_addr;
7968 else
7969 addr = fold_convert (ptrtype, addr);
7971 incr = fold_build_pointer_plus_hwi (incr, rsize);
7972 gimplify_assign (valist, incr, post_p);
7974 return build_va_arg_indirect_ref (addr);
7977 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
7978 Specify whether the vector mode is supported by the hardware. */
7980 static bool
7981 sparc_vector_mode_supported_p (machine_mode mode)
7983 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
7986 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
7988 static machine_mode
7989 sparc_preferred_simd_mode (scalar_mode mode)
7991 if (TARGET_VIS)
7992 switch (mode)
7994 case E_SImode:
7995 return V2SImode;
7996 case E_HImode:
7997 return V4HImode;
7998 case E_QImode:
7999 return V8QImode;
8001 default:;
8004 return word_mode;
8007 /* Return the string to output an unconditional branch to LABEL, which is
8008 the operand number of the label.
8010 DEST is the destination insn (i.e. the label), INSN is the source. */
8012 const char *
8013 output_ubranch (rtx dest, rtx_insn *insn)
8015 static char string[64];
8016 bool v9_form = false;
8017 int delta;
8018 char *p;
8020 /* Even if we are trying to use cbcond for this, evaluate
8021 whether we can use V9 branches as our backup plan. */
8023 delta = 5000000;
8024 if (INSN_ADDRESSES_SET_P ())
8025 delta = (INSN_ADDRESSES (INSN_UID (dest))
8026 - INSN_ADDRESSES (INSN_UID (insn)));
8028 /* Leave some instructions for "slop". */
8029 if (TARGET_V9 && delta >= -260000 && delta < 260000)
8030 v9_form = true;
8032 if (TARGET_CBCOND)
8034 bool emit_nop = emit_cbcond_nop (insn);
8035 bool far = false;
8036 const char *rval;
8038 if (delta < -500 || delta > 500)
8039 far = true;
8041 if (far)
8043 if (v9_form)
8044 rval = "ba,a,pt\t%%xcc, %l0";
8045 else
8046 rval = "b,a\t%l0";
8048 else
8050 if (emit_nop)
8051 rval = "cwbe\t%%g0, %%g0, %l0\n\tnop";
8052 else
8053 rval = "cwbe\t%%g0, %%g0, %l0";
8055 return rval;
8058 if (v9_form)
8059 strcpy (string, "ba%*,pt\t%%xcc, ");
8060 else
8061 strcpy (string, "b%*\t");
8063 p = strchr (string, '\0');
8064 *p++ = '%';
8065 *p++ = 'l';
8066 *p++ = '0';
8067 *p++ = '%';
8068 *p++ = '(';
8069 *p = '\0';
8071 return string;
8074 /* Return the string to output a conditional branch to LABEL, which is
8075 the operand number of the label. OP is the conditional expression.
8076 XEXP (OP, 0) is assumed to be a condition code register (integer or
8077 floating point) and its mode specifies what kind of comparison we made.
8079 DEST is the destination insn (i.e. the label), INSN is the source.
8081 REVERSED is nonzero if we should reverse the sense of the comparison.
8083 ANNUL is nonzero if we should generate an annulling branch. */
8085 const char *
8086 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
8087 rtx_insn *insn)
8089 static char string[64];
8090 enum rtx_code code = GET_CODE (op);
8091 rtx cc_reg = XEXP (op, 0);
8092 machine_mode mode = GET_MODE (cc_reg);
8093 const char *labelno, *branch;
8094 int spaces = 8, far;
8095 char *p;
8097 /* v9 branches are limited to +-1MB. If it is too far away,
8098 change
8100 bne,pt %xcc, .LC30
8104 be,pn %xcc, .+12
8106 ba .LC30
8110 fbne,a,pn %fcc2, .LC29
8114 fbe,pt %fcc2, .+16
8116 ba .LC29 */
8118 far = TARGET_V9 && (get_attr_length (insn) >= 3);
8119 if (reversed ^ far)
8121 /* Reversal of FP compares takes care -- an ordered compare
8122 becomes an unordered compare and vice versa. */
8123 if (mode == CCFPmode || mode == CCFPEmode)
8124 code = reverse_condition_maybe_unordered (code);
8125 else
8126 code = reverse_condition (code);
8129 /* Start by writing the branch condition. */
8130 if (mode == CCFPmode || mode == CCFPEmode)
8132 switch (code)
8134 case NE:
8135 branch = "fbne";
8136 break;
8137 case EQ:
8138 branch = "fbe";
8139 break;
8140 case GE:
8141 branch = "fbge";
8142 break;
8143 case GT:
8144 branch = "fbg";
8145 break;
8146 case LE:
8147 branch = "fble";
8148 break;
8149 case LT:
8150 branch = "fbl";
8151 break;
8152 case UNORDERED:
8153 branch = "fbu";
8154 break;
8155 case ORDERED:
8156 branch = "fbo";
8157 break;
8158 case UNGT:
8159 branch = "fbug";
8160 break;
8161 case UNLT:
8162 branch = "fbul";
8163 break;
8164 case UNEQ:
8165 branch = "fbue";
8166 break;
8167 case UNGE:
8168 branch = "fbuge";
8169 break;
8170 case UNLE:
8171 branch = "fbule";
8172 break;
8173 case LTGT:
8174 branch = "fblg";
8175 break;
8176 default:
8177 gcc_unreachable ();
8180 /* ??? !v9: FP branches cannot be preceded by another floating point
8181 insn. Because there is currently no concept of pre-delay slots,
8182 we can fix this only by always emitting a nop before a floating
8183 point branch. */
8185 string[0] = '\0';
8186 if (! TARGET_V9)
8187 strcpy (string, "nop\n\t");
8188 strcat (string, branch);
8190 else
8192 switch (code)
8194 case NE:
8195 if (mode == CCVmode || mode == CCXVmode)
8196 branch = "bvs";
8197 else
8198 branch = "bne";
8199 break;
8200 case EQ:
8201 if (mode == CCVmode || mode == CCXVmode)
8202 branch = "bvc";
8203 else
8204 branch = "be";
8205 break;
8206 case GE:
8207 if (mode == CCNZmode || mode == CCXNZmode)
8208 branch = "bpos";
8209 else
8210 branch = "bge";
8211 break;
8212 case GT:
8213 branch = "bg";
8214 break;
8215 case LE:
8216 branch = "ble";
8217 break;
8218 case LT:
8219 if (mode == CCNZmode || mode == CCXNZmode)
8220 branch = "bneg";
8221 else
8222 branch = "bl";
8223 break;
8224 case GEU:
8225 branch = "bgeu";
8226 break;
8227 case GTU:
8228 branch = "bgu";
8229 break;
8230 case LEU:
8231 branch = "bleu";
8232 break;
8233 case LTU:
8234 branch = "blu";
8235 break;
8236 default:
8237 gcc_unreachable ();
8239 strcpy (string, branch);
8241 spaces -= strlen (branch);
8242 p = strchr (string, '\0');
8244 /* Now add the annulling, the label, and a possible noop. */
8245 if (annul && ! far)
8247 strcpy (p, ",a");
8248 p += 2;
8249 spaces -= 2;
8252 if (TARGET_V9)
8254 rtx note;
8255 int v8 = 0;
8257 if (! far && insn && INSN_ADDRESSES_SET_P ())
8259 int delta = (INSN_ADDRESSES (INSN_UID (dest))
8260 - INSN_ADDRESSES (INSN_UID (insn)));
8261 /* Leave some instructions for "slop". */
8262 if (delta < -260000 || delta >= 260000)
8263 v8 = 1;
8266 switch (mode)
8268 case E_CCmode:
8269 case E_CCNZmode:
8270 case E_CCCmode:
8271 case E_CCVmode:
8272 labelno = "%%icc, ";
8273 if (v8)
8274 labelno = "";
8275 break;
8276 case E_CCXmode:
8277 case E_CCXNZmode:
8278 case E_CCXCmode:
8279 case E_CCXVmode:
8280 labelno = "%%xcc, ";
8281 gcc_assert (!v8);
8282 break;
8283 case E_CCFPmode:
8284 case E_CCFPEmode:
8286 static char v9_fcc_labelno[] = "%%fccX, ";
8287 /* Set the char indicating the number of the fcc reg to use. */
8288 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
8289 labelno = v9_fcc_labelno;
8290 if (v8)
8292 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
8293 labelno = "";
8296 break;
8297 default:
8298 gcc_unreachable ();
8301 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8303 strcpy (p,
8304 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8305 >= profile_probability::even ()) ^ far)
8306 ? ",pt" : ",pn");
8307 p += 3;
8308 spaces -= 3;
8311 else
8312 labelno = "";
8314 if (spaces > 0)
8315 *p++ = '\t';
8316 else
8317 *p++ = ' ';
8318 strcpy (p, labelno);
8319 p = strchr (p, '\0');
8320 if (far)
8322 strcpy (p, ".+12\n\t nop\n\tb\t");
8323 /* Skip the next insn if requested or
8324 if we know that it will be a nop. */
8325 if (annul || ! final_sequence)
8326 p[3] = '6';
8327 p += 14;
8329 *p++ = '%';
8330 *p++ = 'l';
8331 *p++ = label + '0';
8332 *p++ = '%';
8333 *p++ = '#';
8334 *p = '\0';
8336 return string;
8339 /* Emit a library call comparison between floating point X and Y.
8340 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
8341 Return the new operator to be used in the comparison sequence.
8343 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
8344 values as arguments instead of the TFmode registers themselves,
8345 that's why we cannot call emit_float_lib_cmp. */
8348 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
8350 const char *qpfunc;
8351 rtx slot0, slot1, result, tem, tem2, libfunc;
8352 machine_mode mode;
8353 enum rtx_code new_comparison;
8355 switch (comparison)
8357 case EQ:
8358 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
8359 break;
8361 case NE:
8362 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
8363 break;
8365 case GT:
8366 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
8367 break;
8369 case GE:
8370 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
8371 break;
8373 case LT:
8374 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
8375 break;
8377 case LE:
8378 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
8379 break;
8381 case ORDERED:
8382 case UNORDERED:
8383 case UNGT:
8384 case UNLT:
8385 case UNEQ:
8386 case UNGE:
8387 case UNLE:
8388 case LTGT:
8389 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
8390 break;
8392 default:
8393 gcc_unreachable ();
8396 if (TARGET_ARCH64)
8398 if (MEM_P (x))
8400 tree expr = MEM_EXPR (x);
8401 if (expr)
8402 mark_addressable (expr);
8403 slot0 = x;
8405 else
8407 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8408 emit_move_insn (slot0, x);
8411 if (MEM_P (y))
8413 tree expr = MEM_EXPR (y);
8414 if (expr)
8415 mark_addressable (expr);
8416 slot1 = y;
8418 else
8420 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8421 emit_move_insn (slot1, y);
8424 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8425 emit_library_call (libfunc, LCT_NORMAL,
8426 DImode,
8427 XEXP (slot0, 0), Pmode,
8428 XEXP (slot1, 0), Pmode);
8429 mode = DImode;
8431 else
8433 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8434 emit_library_call (libfunc, LCT_NORMAL,
8435 SImode,
8436 x, TFmode, y, TFmode);
8437 mode = SImode;
8441 /* Immediately move the result of the libcall into a pseudo
8442 register so reload doesn't clobber the value if it needs
8443 the return register for a spill reg. */
8444 result = gen_reg_rtx (mode);
8445 emit_move_insn (result, hard_libcall_value (mode, libfunc));
8447 switch (comparison)
8449 default:
8450 return gen_rtx_NE (VOIDmode, result, const0_rtx);
8451 case ORDERED:
8452 case UNORDERED:
8453 new_comparison = (comparison == UNORDERED ? EQ : NE);
8454 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
8455 case UNGT:
8456 case UNGE:
8457 new_comparison = (comparison == UNGT ? GT : NE);
8458 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
8459 case UNLE:
8460 return gen_rtx_NE (VOIDmode, result, const2_rtx);
8461 case UNLT:
8462 tem = gen_reg_rtx (mode);
8463 if (TARGET_ARCH32)
8464 emit_insn (gen_andsi3 (tem, result, const1_rtx));
8465 else
8466 emit_insn (gen_anddi3 (tem, result, const1_rtx));
8467 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
8468 case UNEQ:
8469 case LTGT:
8470 tem = gen_reg_rtx (mode);
8471 if (TARGET_ARCH32)
8472 emit_insn (gen_addsi3 (tem, result, const1_rtx));
8473 else
8474 emit_insn (gen_adddi3 (tem, result, const1_rtx));
8475 tem2 = gen_reg_rtx (mode);
8476 if (TARGET_ARCH32)
8477 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
8478 else
8479 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
8480 new_comparison = (comparison == UNEQ ? EQ : NE);
8481 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
8484 gcc_unreachable ();
8487 /* Generate an unsigned DImode to FP conversion. This is the same code
8488 optabs would emit if we didn't have TFmode patterns. */
8490 void
8491 sparc_emit_floatunsdi (rtx *operands, machine_mode mode)
8493 rtx i0, i1, f0, in, out;
8495 out = operands[0];
8496 in = force_reg (DImode, operands[1]);
8497 rtx_code_label *neglab = gen_label_rtx ();
8498 rtx_code_label *donelab = gen_label_rtx ();
8499 i0 = gen_reg_rtx (DImode);
8500 i1 = gen_reg_rtx (DImode);
8501 f0 = gen_reg_rtx (mode);
8503 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
8505 emit_insn (gen_rtx_SET (out, gen_rtx_FLOAT (mode, in)));
8506 emit_jump_insn (gen_jump (donelab));
8507 emit_barrier ();
8509 emit_label (neglab);
8511 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
8512 emit_insn (gen_anddi3 (i1, in, const1_rtx));
8513 emit_insn (gen_iordi3 (i0, i0, i1));
8514 emit_insn (gen_rtx_SET (f0, gen_rtx_FLOAT (mode, i0)));
8515 emit_insn (gen_rtx_SET (out, gen_rtx_PLUS (mode, f0, f0)));
8517 emit_label (donelab);
8520 /* Generate an FP to unsigned DImode conversion. This is the same code
8521 optabs would emit if we didn't have TFmode patterns. */
8523 void
8524 sparc_emit_fixunsdi (rtx *operands, machine_mode mode)
8526 rtx i0, i1, f0, in, out, limit;
8528 out = operands[0];
8529 in = force_reg (mode, operands[1]);
8530 rtx_code_label *neglab = gen_label_rtx ();
8531 rtx_code_label *donelab = gen_label_rtx ();
8532 i0 = gen_reg_rtx (DImode);
8533 i1 = gen_reg_rtx (DImode);
8534 limit = gen_reg_rtx (mode);
8535 f0 = gen_reg_rtx (mode);
8537 emit_move_insn (limit,
8538 const_double_from_real_value (
8539 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
8540 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
8542 emit_insn (gen_rtx_SET (out,
8543 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
8544 emit_jump_insn (gen_jump (donelab));
8545 emit_barrier ();
8547 emit_label (neglab);
8549 emit_insn (gen_rtx_SET (f0, gen_rtx_MINUS (mode, in, limit)));
8550 emit_insn (gen_rtx_SET (i0,
8551 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
8552 emit_insn (gen_movdi (i1, const1_rtx));
8553 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
8554 emit_insn (gen_xordi3 (out, i0, i1));
8556 emit_label (donelab);
8559 /* Return the string to output a compare and branch instruction to DEST.
8560 DEST is the destination insn (i.e. the label), INSN is the source,
8561 and OP is the conditional expression. */
8563 const char *
8564 output_cbcond (rtx op, rtx dest, rtx_insn *insn)
8566 machine_mode mode = GET_MODE (XEXP (op, 0));
8567 enum rtx_code code = GET_CODE (op);
8568 const char *cond_str, *tmpl;
8569 int far, emit_nop, len;
8570 static char string[64];
8571 char size_char;
8573 /* Compare and Branch is limited to +-2KB. If it is too far away,
8574 change
8576 cxbne X, Y, .LC30
8580 cxbe X, Y, .+16
8582 ba,pt xcc, .LC30
8583 nop */
8585 len = get_attr_length (insn);
8587 far = len == 4;
8588 emit_nop = len == 2;
8590 if (far)
8591 code = reverse_condition (code);
8593 size_char = ((mode == SImode) ? 'w' : 'x');
8595 switch (code)
8597 case NE:
8598 cond_str = "ne";
8599 break;
8601 case EQ:
8602 cond_str = "e";
8603 break;
8605 case GE:
8606 cond_str = "ge";
8607 break;
8609 case GT:
8610 cond_str = "g";
8611 break;
8613 case LE:
8614 cond_str = "le";
8615 break;
8617 case LT:
8618 cond_str = "l";
8619 break;
8621 case GEU:
8622 cond_str = "cc";
8623 break;
8625 case GTU:
8626 cond_str = "gu";
8627 break;
8629 case LEU:
8630 cond_str = "leu";
8631 break;
8633 case LTU:
8634 cond_str = "cs";
8635 break;
8637 default:
8638 gcc_unreachable ();
8641 if (far)
8643 int veryfar = 1, delta;
8645 if (INSN_ADDRESSES_SET_P ())
8647 delta = (INSN_ADDRESSES (INSN_UID (dest))
8648 - INSN_ADDRESSES (INSN_UID (insn)));
8649 /* Leave some instructions for "slop". */
8650 if (delta >= -260000 && delta < 260000)
8651 veryfar = 0;
8654 if (veryfar)
8655 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tb\t%%3\n\tnop";
8656 else
8657 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tba,pt\t%%%%xcc, %%3\n\tnop";
8659 else
8661 if (emit_nop)
8662 tmpl = "c%cb%s\t%%1, %%2, %%3\n\tnop";
8663 else
8664 tmpl = "c%cb%s\t%%1, %%2, %%3";
8667 snprintf (string, sizeof(string), tmpl, size_char, cond_str);
8669 return string;
8672 /* Return the string to output a conditional branch to LABEL, testing
8673 register REG. LABEL is the operand number of the label; REG is the
8674 operand number of the reg. OP is the conditional expression. The mode
8675 of REG says what kind of comparison we made.
8677 DEST is the destination insn (i.e. the label), INSN is the source.
8679 REVERSED is nonzero if we should reverse the sense of the comparison.
8681 ANNUL is nonzero if we should generate an annulling branch. */
8683 const char *
8684 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
8685 int annul, rtx_insn *insn)
8687 static char string[64];
8688 enum rtx_code code = GET_CODE (op);
8689 machine_mode mode = GET_MODE (XEXP (op, 0));
8690 rtx note;
8691 int far;
8692 char *p;
8694 /* branch on register are limited to +-128KB. If it is too far away,
8695 change
8697 brnz,pt %g1, .LC30
8701 brz,pn %g1, .+12
8703 ba,pt %xcc, .LC30
8707 brgez,a,pn %o1, .LC29
8711 brlz,pt %o1, .+16
8713 ba,pt %xcc, .LC29 */
8715 far = get_attr_length (insn) >= 3;
8717 /* If not floating-point or if EQ or NE, we can just reverse the code. */
8718 if (reversed ^ far)
8719 code = reverse_condition (code);
8721 /* Only 64-bit versions of these instructions exist. */
8722 gcc_assert (mode == DImode);
8724 /* Start by writing the branch condition. */
8726 switch (code)
8728 case NE:
8729 strcpy (string, "brnz");
8730 break;
8732 case EQ:
8733 strcpy (string, "brz");
8734 break;
8736 case GE:
8737 strcpy (string, "brgez");
8738 break;
8740 case LT:
8741 strcpy (string, "brlz");
8742 break;
8744 case LE:
8745 strcpy (string, "brlez");
8746 break;
8748 case GT:
8749 strcpy (string, "brgz");
8750 break;
8752 default:
8753 gcc_unreachable ();
8756 p = strchr (string, '\0');
8758 /* Now add the annulling, reg, label, and nop. */
8759 if (annul && ! far)
8761 strcpy (p, ",a");
8762 p += 2;
8765 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8767 strcpy (p,
8768 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8769 >= profile_probability::even ()) ^ far)
8770 ? ",pt" : ",pn");
8771 p += 3;
8774 *p = p < string + 8 ? '\t' : ' ';
8775 p++;
8776 *p++ = '%';
8777 *p++ = '0' + reg;
8778 *p++ = ',';
8779 *p++ = ' ';
8780 if (far)
8782 int veryfar = 1, delta;
8784 if (INSN_ADDRESSES_SET_P ())
8786 delta = (INSN_ADDRESSES (INSN_UID (dest))
8787 - INSN_ADDRESSES (INSN_UID (insn)));
8788 /* Leave some instructions for "slop". */
8789 if (delta >= -260000 && delta < 260000)
8790 veryfar = 0;
8793 strcpy (p, ".+12\n\t nop\n\t");
8794 /* Skip the next insn if requested or
8795 if we know that it will be a nop. */
8796 if (annul || ! final_sequence)
8797 p[3] = '6';
8798 p += 12;
8799 if (veryfar)
8801 strcpy (p, "b\t");
8802 p += 2;
8804 else
8806 strcpy (p, "ba,pt\t%%xcc, ");
8807 p += 13;
8810 *p++ = '%';
8811 *p++ = 'l';
8812 *p++ = '0' + label;
8813 *p++ = '%';
8814 *p++ = '#';
8815 *p = '\0';
8817 return string;
8820 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
8821 Such instructions cannot be used in the delay slot of return insn on v9.
8822 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
8825 static int
8826 epilogue_renumber (register rtx *where, int test)
8828 register const char *fmt;
8829 register int i;
8830 register enum rtx_code code;
8832 if (*where == 0)
8833 return 0;
8835 code = GET_CODE (*where);
8837 switch (code)
8839 case REG:
8840 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
8841 return 1;
8842 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
8843 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
8844 /* fallthrough */
8845 case SCRATCH:
8846 case CC0:
8847 case PC:
8848 case CONST_INT:
8849 case CONST_WIDE_INT:
8850 case CONST_DOUBLE:
8851 return 0;
8853 /* Do not replace the frame pointer with the stack pointer because
8854 it can cause the delayed instruction to load below the stack.
8855 This occurs when instructions like:
8857 (set (reg/i:SI 24 %i0)
8858 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
8859 (const_int -20 [0xffffffec])) 0))
8861 are in the return delayed slot. */
8862 case PLUS:
8863 if (GET_CODE (XEXP (*where, 0)) == REG
8864 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
8865 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
8866 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
8867 return 1;
8868 break;
8870 case MEM:
8871 if (SPARC_STACK_BIAS
8872 && GET_CODE (XEXP (*where, 0)) == REG
8873 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
8874 return 1;
8875 break;
8877 default:
8878 break;
8881 fmt = GET_RTX_FORMAT (code);
8883 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8885 if (fmt[i] == 'E')
8887 register int j;
8888 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
8889 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
8890 return 1;
8892 else if (fmt[i] == 'e'
8893 && epilogue_renumber (&(XEXP (*where, i)), test))
8894 return 1;
8896 return 0;
8899 /* Leaf functions and non-leaf functions have different needs. */
8901 static const int
8902 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
8904 static const int
8905 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
8907 static const int *const reg_alloc_orders[] = {
8908 reg_leaf_alloc_order,
8909 reg_nonleaf_alloc_order};
8911 void
8912 order_regs_for_local_alloc (void)
8914 static int last_order_nonleaf = 1;
8916 if (df_regs_ever_live_p (15) != last_order_nonleaf)
8918 last_order_nonleaf = !last_order_nonleaf;
8919 memcpy ((char *) reg_alloc_order,
8920 (const char *) reg_alloc_orders[last_order_nonleaf],
8921 FIRST_PSEUDO_REGISTER * sizeof (int));
8925 /* Return 1 if REG and MEM are legitimate enough to allow the various
8926 MEM<-->REG splits to be run. */
8929 sparc_split_reg_mem_legitimate (rtx reg, rtx mem)
8931 /* Punt if we are here by mistake. */
8932 gcc_assert (reload_completed);
8934 /* We must have an offsettable memory reference. */
8935 if (!offsettable_memref_p (mem))
8936 return 0;
8938 /* If we have legitimate args for ldd/std, we do not want
8939 the split to happen. */
8940 if ((REGNO (reg) % 2) == 0 && mem_min_alignment (mem, 8))
8941 return 0;
8943 /* Success. */
8944 return 1;
8947 /* Split a REG <-- MEM move into a pair of moves in MODE. */
8949 void
8950 sparc_split_reg_mem (rtx dest, rtx src, machine_mode mode)
8952 rtx high_part = gen_highpart (mode, dest);
8953 rtx low_part = gen_lowpart (mode, dest);
8954 rtx word0 = adjust_address (src, mode, 0);
8955 rtx word1 = adjust_address (src, mode, 4);
8957 if (reg_overlap_mentioned_p (high_part, word1))
8959 emit_move_insn_1 (low_part, word1);
8960 emit_move_insn_1 (high_part, word0);
8962 else
8964 emit_move_insn_1 (high_part, word0);
8965 emit_move_insn_1 (low_part, word1);
8969 /* Split a MEM <-- REG move into a pair of moves in MODE. */
8971 void
8972 sparc_split_mem_reg (rtx dest, rtx src, machine_mode mode)
8974 rtx word0 = adjust_address (dest, mode, 0);
8975 rtx word1 = adjust_address (dest, mode, 4);
8976 rtx high_part = gen_highpart (mode, src);
8977 rtx low_part = gen_lowpart (mode, src);
8979 emit_move_insn_1 (word0, high_part);
8980 emit_move_insn_1 (word1, low_part);
8983 /* Like sparc_split_reg_mem_legitimate but for REG <--> REG moves. */
8986 sparc_split_reg_reg_legitimate (rtx reg1, rtx reg2)
8988 /* Punt if we are here by mistake. */
8989 gcc_assert (reload_completed);
8991 if (GET_CODE (reg1) == SUBREG)
8992 reg1 = SUBREG_REG (reg1);
8993 if (GET_CODE (reg1) != REG)
8994 return 0;
8995 const int regno1 = REGNO (reg1);
8997 if (GET_CODE (reg2) == SUBREG)
8998 reg2 = SUBREG_REG (reg2);
8999 if (GET_CODE (reg2) != REG)
9000 return 0;
9001 const int regno2 = REGNO (reg2);
9003 if (SPARC_INT_REG_P (regno1) && SPARC_INT_REG_P (regno2))
9004 return 1;
9006 if (TARGET_VIS3)
9008 if ((SPARC_INT_REG_P (regno1) && SPARC_FP_REG_P (regno2))
9009 || (SPARC_FP_REG_P (regno1) && SPARC_INT_REG_P (regno2)))
9010 return 1;
9013 return 0;
9016 /* Split a REG <--> REG move into a pair of moves in MODE. */
9018 void
9019 sparc_split_reg_reg (rtx dest, rtx src, machine_mode mode)
9021 rtx dest1 = gen_highpart (mode, dest);
9022 rtx dest2 = gen_lowpart (mode, dest);
9023 rtx src1 = gen_highpart (mode, src);
9024 rtx src2 = gen_lowpart (mode, src);
9026 /* Now emit using the real source and destination we found, swapping
9027 the order if we detect overlap. */
9028 if (reg_overlap_mentioned_p (dest1, src2))
9030 emit_move_insn_1 (dest2, src2);
9031 emit_move_insn_1 (dest1, src1);
9033 else
9035 emit_move_insn_1 (dest1, src1);
9036 emit_move_insn_1 (dest2, src2);
9040 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
9041 This makes them candidates for using ldd and std insns.
9043 Note reg1 and reg2 *must* be hard registers. */
9046 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
9048 /* We might have been passed a SUBREG. */
9049 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
9050 return 0;
9052 if (REGNO (reg1) % 2 != 0)
9053 return 0;
9055 /* Integer ldd is deprecated in SPARC V9 */
9056 if (TARGET_V9 && SPARC_INT_REG_P (REGNO (reg1)))
9057 return 0;
9059 return (REGNO (reg1) == REGNO (reg2) - 1);
9062 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
9063 an ldd or std insn.
9065 This can only happen when addr1 and addr2, the addresses in mem1
9066 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
9067 addr1 must also be aligned on a 64-bit boundary.
9069 Also iff dependent_reg_rtx is not null it should not be used to
9070 compute the address for mem1, i.e. we cannot optimize a sequence
9071 like:
9072 ld [%o0], %o0
9073 ld [%o0 + 4], %o1
9075 ldd [%o0], %o0
9076 nor:
9077 ld [%g3 + 4], %g3
9078 ld [%g3], %g2
9080 ldd [%g3], %g2
9082 But, note that the transformation from:
9083 ld [%g2 + 4], %g3
9084 ld [%g2], %g2
9086 ldd [%g2], %g2
9087 is perfectly fine. Thus, the peephole2 patterns always pass us
9088 the destination register of the first load, never the second one.
9090 For stores we don't have a similar problem, so dependent_reg_rtx is
9091 NULL_RTX. */
9094 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
9096 rtx addr1, addr2;
9097 unsigned int reg1;
9098 HOST_WIDE_INT offset1;
9100 /* The mems cannot be volatile. */
9101 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
9102 return 0;
9104 /* MEM1 should be aligned on a 64-bit boundary. */
9105 if (MEM_ALIGN (mem1) < 64)
9106 return 0;
9108 addr1 = XEXP (mem1, 0);
9109 addr2 = XEXP (mem2, 0);
9111 /* Extract a register number and offset (if used) from the first addr. */
9112 if (GET_CODE (addr1) == PLUS)
9114 /* If not a REG, return zero. */
9115 if (GET_CODE (XEXP (addr1, 0)) != REG)
9116 return 0;
9117 else
9119 reg1 = REGNO (XEXP (addr1, 0));
9120 /* The offset must be constant! */
9121 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
9122 return 0;
9123 offset1 = INTVAL (XEXP (addr1, 1));
9126 else if (GET_CODE (addr1) != REG)
9127 return 0;
9128 else
9130 reg1 = REGNO (addr1);
9131 /* This was a simple (mem (reg)) expression. Offset is 0. */
9132 offset1 = 0;
9135 /* Make sure the second address is a (mem (plus (reg) (const_int). */
9136 if (GET_CODE (addr2) != PLUS)
9137 return 0;
9139 if (GET_CODE (XEXP (addr2, 0)) != REG
9140 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
9141 return 0;
9143 if (reg1 != REGNO (XEXP (addr2, 0)))
9144 return 0;
9146 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
9147 return 0;
9149 /* The first offset must be evenly divisible by 8 to ensure the
9150 address is 64-bit aligned. */
9151 if (offset1 % 8 != 0)
9152 return 0;
9154 /* The offset for the second addr must be 4 more than the first addr. */
9155 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
9156 return 0;
9158 /* All the tests passed. addr1 and addr2 are valid for ldd and std
9159 instructions. */
9160 return 1;
9163 /* Return the widened memory access made of MEM1 and MEM2 in MODE. */
9166 widen_mem_for_ldd_peep (rtx mem1, rtx mem2, machine_mode mode)
9168 rtx x = widen_memory_access (mem1, mode, 0);
9169 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (mem1) && MEM_NOTRAP_P (mem2);
9170 return x;
9173 /* Return 1 if reg is a pseudo, or is the first register in
9174 a hard register pair. This makes it suitable for use in
9175 ldd and std insns. */
9178 register_ok_for_ldd (rtx reg)
9180 /* We might have been passed a SUBREG. */
9181 if (!REG_P (reg))
9182 return 0;
9184 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
9185 return (REGNO (reg) % 2 == 0);
9187 return 1;
9190 /* Return 1 if OP, a MEM, has an address which is known to be
9191 aligned to an 8-byte boundary. */
9194 memory_ok_for_ldd (rtx op)
9196 /* In 64-bit mode, we assume that the address is word-aligned. */
9197 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
9198 return 0;
9200 if (! can_create_pseudo_p ()
9201 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
9202 return 0;
9204 return 1;
9207 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
9209 static bool
9210 sparc_print_operand_punct_valid_p (unsigned char code)
9212 if (code == '#'
9213 || code == '*'
9214 || code == '('
9215 || code == ')'
9216 || code == '_'
9217 || code == '&')
9218 return true;
9220 return false;
9223 /* Implement TARGET_PRINT_OPERAND.
9224 Print operand X (an rtx) in assembler syntax to file FILE.
9225 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
9226 For `%' followed by punctuation, CODE is the punctuation and X is null. */
9228 static void
9229 sparc_print_operand (FILE *file, rtx x, int code)
9231 const char *s;
9233 switch (code)
9235 case '#':
9236 /* Output an insn in a delay slot. */
9237 if (final_sequence)
9238 sparc_indent_opcode = 1;
9239 else
9240 fputs ("\n\t nop", file);
9241 return;
9242 case '*':
9243 /* Output an annul flag if there's nothing for the delay slot and we
9244 are optimizing. This is always used with '(' below.
9245 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
9246 this is a dbx bug. So, we only do this when optimizing.
9247 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
9248 Always emit a nop in case the next instruction is a branch. */
9249 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
9250 fputs (",a", file);
9251 return;
9252 case '(':
9253 /* Output a 'nop' if there's nothing for the delay slot and we are
9254 not optimizing. This is always used with '*' above. */
9255 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
9256 fputs ("\n\t nop", file);
9257 else if (final_sequence)
9258 sparc_indent_opcode = 1;
9259 return;
9260 case ')':
9261 /* Output the right displacement from the saved PC on function return.
9262 The caller may have placed an "unimp" insn immediately after the call
9263 so we have to account for it. This insn is used in the 32-bit ABI
9264 when calling a function that returns a non zero-sized structure. The
9265 64-bit ABI doesn't have it. Be careful to have this test be the same
9266 as that for the call. The exception is when sparc_std_struct_return
9267 is enabled, the psABI is followed exactly and the adjustment is made
9268 by the code in sparc_struct_value_rtx. The call emitted is the same
9269 when sparc_std_struct_return is enabled. */
9270 if (!TARGET_ARCH64
9271 && cfun->returns_struct
9272 && !sparc_std_struct_return
9273 && DECL_SIZE (DECL_RESULT (current_function_decl))
9274 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
9275 == INTEGER_CST
9276 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
9277 fputs ("12", file);
9278 else
9279 fputc ('8', file);
9280 return;
9281 case '_':
9282 /* Output the Embedded Medium/Anywhere code model base register. */
9283 fputs (EMBMEDANY_BASE_REG, file);
9284 return;
9285 case '&':
9286 /* Print some local dynamic TLS name. */
9287 if (const char *name = get_some_local_dynamic_name ())
9288 assemble_name (file, name);
9289 else
9290 output_operand_lossage ("'%%&' used without any "
9291 "local dynamic TLS references");
9292 return;
9294 case 'Y':
9295 /* Adjust the operand to take into account a RESTORE operation. */
9296 if (GET_CODE (x) == CONST_INT)
9297 break;
9298 else if (GET_CODE (x) != REG)
9299 output_operand_lossage ("invalid %%Y operand");
9300 else if (REGNO (x) < 8)
9301 fputs (reg_names[REGNO (x)], file);
9302 else if (REGNO (x) >= 24 && REGNO (x) < 32)
9303 fputs (reg_names[REGNO (x)-16], file);
9304 else
9305 output_operand_lossage ("invalid %%Y operand");
9306 return;
9307 case 'L':
9308 /* Print out the low order register name of a register pair. */
9309 if (WORDS_BIG_ENDIAN)
9310 fputs (reg_names[REGNO (x)+1], file);
9311 else
9312 fputs (reg_names[REGNO (x)], file);
9313 return;
9314 case 'H':
9315 /* Print out the high order register name of a register pair. */
9316 if (WORDS_BIG_ENDIAN)
9317 fputs (reg_names[REGNO (x)], file);
9318 else
9319 fputs (reg_names[REGNO (x)+1], file);
9320 return;
9321 case 'R':
9322 /* Print out the second register name of a register pair or quad.
9323 I.e., R (%o0) => %o1. */
9324 fputs (reg_names[REGNO (x)+1], file);
9325 return;
9326 case 'S':
9327 /* Print out the third register name of a register quad.
9328 I.e., S (%o0) => %o2. */
9329 fputs (reg_names[REGNO (x)+2], file);
9330 return;
9331 case 'T':
9332 /* Print out the fourth register name of a register quad.
9333 I.e., T (%o0) => %o3. */
9334 fputs (reg_names[REGNO (x)+3], file);
9335 return;
9336 case 'x':
9337 /* Print a condition code register. */
9338 if (REGNO (x) == SPARC_ICC_REG)
9340 switch (GET_MODE (x))
9342 case E_CCmode:
9343 case E_CCNZmode:
9344 case E_CCCmode:
9345 case E_CCVmode:
9346 s = "%icc";
9347 break;
9348 case E_CCXmode:
9349 case E_CCXNZmode:
9350 case E_CCXCmode:
9351 case E_CCXVmode:
9352 s = "%xcc";
9353 break;
9354 default:
9355 gcc_unreachable ();
9357 fputs (s, file);
9359 else
9360 /* %fccN register */
9361 fputs (reg_names[REGNO (x)], file);
9362 return;
9363 case 'm':
9364 /* Print the operand's address only. */
9365 output_address (GET_MODE (x), XEXP (x, 0));
9366 return;
9367 case 'r':
9368 /* In this case we need a register. Use %g0 if the
9369 operand is const0_rtx. */
9370 if (x == const0_rtx
9371 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
9373 fputs ("%g0", file);
9374 return;
9376 else
9377 break;
9379 case 'A':
9380 switch (GET_CODE (x))
9382 case IOR:
9383 s = "or";
9384 break;
9385 case AND:
9386 s = "and";
9387 break;
9388 case XOR:
9389 s = "xor";
9390 break;
9391 default:
9392 output_operand_lossage ("invalid %%A operand");
9393 s = "";
9394 break;
9396 fputs (s, file);
9397 return;
9399 case 'B':
9400 switch (GET_CODE (x))
9402 case IOR:
9403 s = "orn";
9404 break;
9405 case AND:
9406 s = "andn";
9407 break;
9408 case XOR:
9409 s = "xnor";
9410 break;
9411 default:
9412 output_operand_lossage ("invalid %%B operand");
9413 s = "";
9414 break;
9416 fputs (s, file);
9417 return;
9419 /* This is used by the conditional move instructions. */
9420 case 'C':
9422 machine_mode mode = GET_MODE (XEXP (x, 0));
9423 switch (GET_CODE (x))
9425 case NE:
9426 if (mode == CCVmode || mode == CCXVmode)
9427 s = "vs";
9428 else
9429 s = "ne";
9430 break;
9431 case EQ:
9432 if (mode == CCVmode || mode == CCXVmode)
9433 s = "vc";
9434 else
9435 s = "e";
9436 break;
9437 case GE:
9438 if (mode == CCNZmode || mode == CCXNZmode)
9439 s = "pos";
9440 else
9441 s = "ge";
9442 break;
9443 case GT:
9444 s = "g";
9445 break;
9446 case LE:
9447 s = "le";
9448 break;
9449 case LT:
9450 if (mode == CCNZmode || mode == CCXNZmode)
9451 s = "neg";
9452 else
9453 s = "l";
9454 break;
9455 case GEU:
9456 s = "geu";
9457 break;
9458 case GTU:
9459 s = "gu";
9460 break;
9461 case LEU:
9462 s = "leu";
9463 break;
9464 case LTU:
9465 s = "lu";
9466 break;
9467 case LTGT:
9468 s = "lg";
9469 break;
9470 case UNORDERED:
9471 s = "u";
9472 break;
9473 case ORDERED:
9474 s = "o";
9475 break;
9476 case UNLT:
9477 s = "ul";
9478 break;
9479 case UNLE:
9480 s = "ule";
9481 break;
9482 case UNGT:
9483 s = "ug";
9484 break;
9485 case UNGE:
9486 s = "uge"
9487 ; break;
9488 case UNEQ:
9489 s = "ue";
9490 break;
9491 default:
9492 output_operand_lossage ("invalid %%C operand");
9493 s = "";
9494 break;
9496 fputs (s, file);
9497 return;
9500 /* This are used by the movr instruction pattern. */
9501 case 'D':
9503 switch (GET_CODE (x))
9505 case NE:
9506 s = "ne";
9507 break;
9508 case EQ:
9509 s = "e";
9510 break;
9511 case GE:
9512 s = "gez";
9513 break;
9514 case LT:
9515 s = "lz";
9516 break;
9517 case LE:
9518 s = "lez";
9519 break;
9520 case GT:
9521 s = "gz";
9522 break;
9523 default:
9524 output_operand_lossage ("invalid %%D operand");
9525 s = "";
9526 break;
9528 fputs (s, file);
9529 return;
9532 case 'b':
9534 /* Print a sign-extended character. */
9535 int i = trunc_int_for_mode (INTVAL (x), QImode);
9536 fprintf (file, "%d", i);
9537 return;
9540 case 'f':
9541 /* Operand must be a MEM; write its address. */
9542 if (GET_CODE (x) != MEM)
9543 output_operand_lossage ("invalid %%f operand");
9544 output_address (GET_MODE (x), XEXP (x, 0));
9545 return;
9547 case 's':
9549 /* Print a sign-extended 32-bit value. */
9550 HOST_WIDE_INT i;
9551 if (GET_CODE(x) == CONST_INT)
9552 i = INTVAL (x);
9553 else
9555 output_operand_lossage ("invalid %%s operand");
9556 return;
9558 i = trunc_int_for_mode (i, SImode);
9559 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
9560 return;
9563 case 0:
9564 /* Do nothing special. */
9565 break;
9567 default:
9568 /* Undocumented flag. */
9569 output_operand_lossage ("invalid operand output code");
9572 if (GET_CODE (x) == REG)
9573 fputs (reg_names[REGNO (x)], file);
9574 else if (GET_CODE (x) == MEM)
9576 fputc ('[', file);
9577 /* Poor Sun assembler doesn't understand absolute addressing. */
9578 if (CONSTANT_P (XEXP (x, 0)))
9579 fputs ("%g0+", file);
9580 output_address (GET_MODE (x), XEXP (x, 0));
9581 fputc (']', file);
9583 else if (GET_CODE (x) == HIGH)
9585 fputs ("%hi(", file);
9586 output_addr_const (file, XEXP (x, 0));
9587 fputc (')', file);
9589 else if (GET_CODE (x) == LO_SUM)
9591 sparc_print_operand (file, XEXP (x, 0), 0);
9592 if (TARGET_CM_MEDMID)
9593 fputs ("+%l44(", file);
9594 else
9595 fputs ("+%lo(", file);
9596 output_addr_const (file, XEXP (x, 1));
9597 fputc (')', file);
9599 else if (GET_CODE (x) == CONST_DOUBLE)
9600 output_operand_lossage ("floating-point constant not a valid immediate operand");
9601 else
9602 output_addr_const (file, x);
9605 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
9607 static void
9608 sparc_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x)
9610 register rtx base, index = 0;
9611 int offset = 0;
9612 register rtx addr = x;
9614 if (REG_P (addr))
9615 fputs (reg_names[REGNO (addr)], file);
9616 else if (GET_CODE (addr) == PLUS)
9618 if (CONST_INT_P (XEXP (addr, 0)))
9619 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
9620 else if (CONST_INT_P (XEXP (addr, 1)))
9621 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
9622 else
9623 base = XEXP (addr, 0), index = XEXP (addr, 1);
9624 if (GET_CODE (base) == LO_SUM)
9626 gcc_assert (USE_AS_OFFSETABLE_LO10
9627 && TARGET_ARCH64
9628 && ! TARGET_CM_MEDMID);
9629 output_operand (XEXP (base, 0), 0);
9630 fputs ("+%lo(", file);
9631 output_address (VOIDmode, XEXP (base, 1));
9632 fprintf (file, ")+%d", offset);
9634 else
9636 fputs (reg_names[REGNO (base)], file);
9637 if (index == 0)
9638 fprintf (file, "%+d", offset);
9639 else if (REG_P (index))
9640 fprintf (file, "+%s", reg_names[REGNO (index)]);
9641 else if (GET_CODE (index) == SYMBOL_REF
9642 || GET_CODE (index) == LABEL_REF
9643 || GET_CODE (index) == CONST)
9644 fputc ('+', file), output_addr_const (file, index);
9645 else gcc_unreachable ();
9648 else if (GET_CODE (addr) == MINUS
9649 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
9651 output_addr_const (file, XEXP (addr, 0));
9652 fputs ("-(", file);
9653 output_addr_const (file, XEXP (addr, 1));
9654 fputs ("-.)", file);
9656 else if (GET_CODE (addr) == LO_SUM)
9658 output_operand (XEXP (addr, 0), 0);
9659 if (TARGET_CM_MEDMID)
9660 fputs ("+%l44(", file);
9661 else
9662 fputs ("+%lo(", file);
9663 output_address (VOIDmode, XEXP (addr, 1));
9664 fputc (')', file);
9666 else if (flag_pic
9667 && GET_CODE (addr) == CONST
9668 && GET_CODE (XEXP (addr, 0)) == MINUS
9669 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
9670 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
9671 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
9673 addr = XEXP (addr, 0);
9674 output_addr_const (file, XEXP (addr, 0));
9675 /* Group the args of the second CONST in parenthesis. */
9676 fputs ("-(", file);
9677 /* Skip past the second CONST--it does nothing for us. */
9678 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
9679 /* Close the parenthesis. */
9680 fputc (')', file);
9682 else
9684 output_addr_const (file, addr);
9688 /* Target hook for assembling integer objects. The sparc version has
9689 special handling for aligned DI-mode objects. */
9691 static bool
9692 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
9694 /* ??? We only output .xword's for symbols and only then in environments
9695 where the assembler can handle them. */
9696 if (aligned_p && size == 8 && GET_CODE (x) != CONST_INT)
9698 if (TARGET_V9)
9700 assemble_integer_with_op ("\t.xword\t", x);
9701 return true;
9703 else
9705 assemble_aligned_integer (4, const0_rtx);
9706 assemble_aligned_integer (4, x);
9707 return true;
9710 return default_assemble_integer (x, size, aligned_p);
9713 /* Return the value of a code used in the .proc pseudo-op that says
9714 what kind of result this function returns. For non-C types, we pick
9715 the closest C type. */
9717 #ifndef SHORT_TYPE_SIZE
9718 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
9719 #endif
9721 #ifndef INT_TYPE_SIZE
9722 #define INT_TYPE_SIZE BITS_PER_WORD
9723 #endif
9725 #ifndef LONG_TYPE_SIZE
9726 #define LONG_TYPE_SIZE BITS_PER_WORD
9727 #endif
9729 #ifndef LONG_LONG_TYPE_SIZE
9730 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
9731 #endif
9733 #ifndef FLOAT_TYPE_SIZE
9734 #define FLOAT_TYPE_SIZE BITS_PER_WORD
9735 #endif
9737 #ifndef DOUBLE_TYPE_SIZE
9738 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9739 #endif
9741 #ifndef LONG_DOUBLE_TYPE_SIZE
9742 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9743 #endif
9745 unsigned long
9746 sparc_type_code (register tree type)
9748 register unsigned long qualifiers = 0;
9749 register unsigned shift;
9751 /* Only the first 30 bits of the qualifier are valid. We must refrain from
9752 setting more, since some assemblers will give an error for this. Also,
9753 we must be careful to avoid shifts of 32 bits or more to avoid getting
9754 unpredictable results. */
9756 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
9758 switch (TREE_CODE (type))
9760 case ERROR_MARK:
9761 return qualifiers;
9763 case ARRAY_TYPE:
9764 qualifiers |= (3 << shift);
9765 break;
9767 case FUNCTION_TYPE:
9768 case METHOD_TYPE:
9769 qualifiers |= (2 << shift);
9770 break;
9772 case POINTER_TYPE:
9773 case REFERENCE_TYPE:
9774 case OFFSET_TYPE:
9775 qualifiers |= (1 << shift);
9776 break;
9778 case RECORD_TYPE:
9779 return (qualifiers | 8);
9781 case UNION_TYPE:
9782 case QUAL_UNION_TYPE:
9783 return (qualifiers | 9);
9785 case ENUMERAL_TYPE:
9786 return (qualifiers | 10);
9788 case VOID_TYPE:
9789 return (qualifiers | 16);
9791 case INTEGER_TYPE:
9792 /* If this is a range type, consider it to be the underlying
9793 type. */
9794 if (TREE_TYPE (type) != 0)
9795 break;
9797 /* Carefully distinguish all the standard types of C,
9798 without messing up if the language is not C. We do this by
9799 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
9800 look at both the names and the above fields, but that's redundant.
9801 Any type whose size is between two C types will be considered
9802 to be the wider of the two types. Also, we do not have a
9803 special code to use for "long long", so anything wider than
9804 long is treated the same. Note that we can't distinguish
9805 between "int" and "long" in this code if they are the same
9806 size, but that's fine, since neither can the assembler. */
9808 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
9809 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
9811 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
9812 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
9814 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
9815 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
9817 else
9818 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
9820 case REAL_TYPE:
9821 /* If this is a range type, consider it to be the underlying
9822 type. */
9823 if (TREE_TYPE (type) != 0)
9824 break;
9826 /* Carefully distinguish all the standard types of C,
9827 without messing up if the language is not C. */
9829 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
9830 return (qualifiers | 6);
9832 else
9833 return (qualifiers | 7);
9835 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
9836 /* ??? We need to distinguish between double and float complex types,
9837 but I don't know how yet because I can't reach this code from
9838 existing front-ends. */
9839 return (qualifiers | 7); /* Who knows? */
9841 case VECTOR_TYPE:
9842 case BOOLEAN_TYPE: /* Boolean truth value type. */
9843 case LANG_TYPE:
9844 case NULLPTR_TYPE:
9845 return qualifiers;
9847 default:
9848 gcc_unreachable (); /* Not a type! */
9852 return qualifiers;
9855 /* Nested function support. */
9857 /* Emit RTL insns to initialize the variable parts of a trampoline.
9858 FNADDR is an RTX for the address of the function's pure code.
9859 CXT is an RTX for the static chain value for the function.
9861 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
9862 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
9863 (to store insns). This is a bit excessive. Perhaps a different
9864 mechanism would be better here.
9866 Emit enough FLUSH insns to synchronize the data and instruction caches. */
9868 static void
9869 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9871 /* SPARC 32-bit trampoline:
9873 sethi %hi(fn), %g1
9874 sethi %hi(static), %g2
9875 jmp %g1+%lo(fn)
9876 or %g2, %lo(static), %g2
9878 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
9879 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
9882 emit_move_insn
9883 (adjust_address (m_tramp, SImode, 0),
9884 expand_binop (SImode, ior_optab,
9885 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
9886 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
9887 NULL_RTX, 1, OPTAB_DIRECT));
9889 emit_move_insn
9890 (adjust_address (m_tramp, SImode, 4),
9891 expand_binop (SImode, ior_optab,
9892 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
9893 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
9894 NULL_RTX, 1, OPTAB_DIRECT));
9896 emit_move_insn
9897 (adjust_address (m_tramp, SImode, 8),
9898 expand_binop (SImode, ior_optab,
9899 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
9900 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
9901 NULL_RTX, 1, OPTAB_DIRECT));
9903 emit_move_insn
9904 (adjust_address (m_tramp, SImode, 12),
9905 expand_binop (SImode, ior_optab,
9906 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
9907 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
9908 NULL_RTX, 1, OPTAB_DIRECT));
9910 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
9911 aligned on a 16 byte boundary so one flush clears it all. */
9912 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 0))));
9913 if (sparc_cpu != PROCESSOR_ULTRASPARC
9914 && sparc_cpu != PROCESSOR_ULTRASPARC3
9915 && sparc_cpu != PROCESSOR_NIAGARA
9916 && sparc_cpu != PROCESSOR_NIAGARA2
9917 && sparc_cpu != PROCESSOR_NIAGARA3
9918 && sparc_cpu != PROCESSOR_NIAGARA4
9919 && sparc_cpu != PROCESSOR_NIAGARA7
9920 && sparc_cpu != PROCESSOR_M8)
9921 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 8))));
9923 /* Call __enable_execute_stack after writing onto the stack to make sure
9924 the stack address is accessible. */
9925 #ifdef HAVE_ENABLE_EXECUTE_STACK
9926 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
9927 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
9928 #endif
9932 /* The 64-bit version is simpler because it makes more sense to load the
9933 values as "immediate" data out of the trampoline. It's also easier since
9934 we can read the PC without clobbering a register. */
9936 static void
9937 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9939 /* SPARC 64-bit trampoline:
9941 rd %pc, %g1
9942 ldx [%g1+24], %g5
9943 jmp %g5
9944 ldx [%g1+16], %g5
9945 +16 bytes data
9948 emit_move_insn (adjust_address (m_tramp, SImode, 0),
9949 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
9950 emit_move_insn (adjust_address (m_tramp, SImode, 4),
9951 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
9952 emit_move_insn (adjust_address (m_tramp, SImode, 8),
9953 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
9954 emit_move_insn (adjust_address (m_tramp, SImode, 12),
9955 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
9956 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
9957 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
9958 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
9960 if (sparc_cpu != PROCESSOR_ULTRASPARC
9961 && sparc_cpu != PROCESSOR_ULTRASPARC3
9962 && sparc_cpu != PROCESSOR_NIAGARA
9963 && sparc_cpu != PROCESSOR_NIAGARA2
9964 && sparc_cpu != PROCESSOR_NIAGARA3
9965 && sparc_cpu != PROCESSOR_NIAGARA4
9966 && sparc_cpu != PROCESSOR_NIAGARA7
9967 && sparc_cpu != PROCESSOR_M8)
9968 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
9970 /* Call __enable_execute_stack after writing onto the stack to make sure
9971 the stack address is accessible. */
9972 #ifdef HAVE_ENABLE_EXECUTE_STACK
9973 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
9974 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
9975 #endif
9978 /* Worker for TARGET_TRAMPOLINE_INIT. */
9980 static void
9981 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9983 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
9984 cxt = force_reg (Pmode, cxt);
9985 if (TARGET_ARCH64)
9986 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
9987 else
9988 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
9991 /* Adjust the cost of a scheduling dependency. Return the new cost of
9992 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
9994 static int
9995 supersparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
9996 int cost)
9998 enum attr_type insn_type;
10000 if (recog_memoized (insn) < 0)
10001 return cost;
10003 insn_type = get_attr_type (insn);
10005 if (dep_type == 0)
10007 /* Data dependency; DEP_INSN writes a register that INSN reads some
10008 cycles later. */
10010 /* if a load, then the dependence must be on the memory address;
10011 add an extra "cycle". Note that the cost could be two cycles
10012 if the reg was written late in an instruction group; we ca not tell
10013 here. */
10014 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
10015 return cost + 3;
10017 /* Get the delay only if the address of the store is the dependence. */
10018 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
10020 rtx pat = PATTERN(insn);
10021 rtx dep_pat = PATTERN (dep_insn);
10023 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
10024 return cost; /* This should not happen! */
10026 /* The dependency between the two instructions was on the data that
10027 is being stored. Assume that this implies that the address of the
10028 store is not dependent. */
10029 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
10030 return cost;
10032 return cost + 3; /* An approximation. */
10035 /* A shift instruction cannot receive its data from an instruction
10036 in the same cycle; add a one cycle penalty. */
10037 if (insn_type == TYPE_SHIFT)
10038 return cost + 3; /* Split before cascade into shift. */
10040 else
10042 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
10043 INSN writes some cycles later. */
10045 /* These are only significant for the fpu unit; writing a fp reg before
10046 the fpu has finished with it stalls the processor. */
10048 /* Reusing an integer register causes no problems. */
10049 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
10050 return 0;
10053 return cost;
10056 static int
10057 hypersparc_adjust_cost (rtx_insn *insn, int dtype, rtx_insn *dep_insn,
10058 int cost)
10060 enum attr_type insn_type, dep_type;
10061 rtx pat = PATTERN(insn);
10062 rtx dep_pat = PATTERN (dep_insn);
10064 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
10065 return cost;
10067 insn_type = get_attr_type (insn);
10068 dep_type = get_attr_type (dep_insn);
10070 switch (dtype)
10072 case 0:
10073 /* Data dependency; DEP_INSN writes a register that INSN reads some
10074 cycles later. */
10076 switch (insn_type)
10078 case TYPE_STORE:
10079 case TYPE_FPSTORE:
10080 /* Get the delay iff the address of the store is the dependence. */
10081 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
10082 return cost;
10084 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
10085 return cost;
10086 return cost + 3;
10088 case TYPE_LOAD:
10089 case TYPE_SLOAD:
10090 case TYPE_FPLOAD:
10091 /* If a load, then the dependence must be on the memory address. If
10092 the addresses aren't equal, then it might be a false dependency */
10093 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
10095 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
10096 || GET_CODE (SET_DEST (dep_pat)) != MEM
10097 || GET_CODE (SET_SRC (pat)) != MEM
10098 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
10099 XEXP (SET_SRC (pat), 0)))
10100 return cost + 2;
10102 return cost + 8;
10104 break;
10106 case TYPE_BRANCH:
10107 /* Compare to branch latency is 0. There is no benefit from
10108 separating compare and branch. */
10109 if (dep_type == TYPE_COMPARE)
10110 return 0;
10111 /* Floating point compare to branch latency is less than
10112 compare to conditional move. */
10113 if (dep_type == TYPE_FPCMP)
10114 return cost - 1;
10115 break;
10116 default:
10117 break;
10119 break;
10121 case REG_DEP_ANTI:
10122 /* Anti-dependencies only penalize the fpu unit. */
10123 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
10124 return 0;
10125 break;
10127 default:
10128 break;
10131 return cost;
10134 static int
10135 sparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep, int cost,
10136 unsigned int)
10138 switch (sparc_cpu)
10140 case PROCESSOR_SUPERSPARC:
10141 cost = supersparc_adjust_cost (insn, dep_type, dep, cost);
10142 break;
10143 case PROCESSOR_HYPERSPARC:
10144 case PROCESSOR_SPARCLITE86X:
10145 cost = hypersparc_adjust_cost (insn, dep_type, dep, cost);
10146 break;
10147 default:
10148 break;
10150 return cost;
10153 static void
10154 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
10155 int sched_verbose ATTRIBUTE_UNUSED,
10156 int max_ready ATTRIBUTE_UNUSED)
10159 static int
10160 sparc_use_sched_lookahead (void)
10162 if (sparc_cpu == PROCESSOR_NIAGARA
10163 || sparc_cpu == PROCESSOR_NIAGARA2
10164 || sparc_cpu == PROCESSOR_NIAGARA3)
10165 return 0;
10166 if (sparc_cpu == PROCESSOR_NIAGARA4
10167 || sparc_cpu == PROCESSOR_NIAGARA7
10168 || sparc_cpu == PROCESSOR_M8)
10169 return 2;
10170 if (sparc_cpu == PROCESSOR_ULTRASPARC
10171 || sparc_cpu == PROCESSOR_ULTRASPARC3)
10172 return 4;
10173 if ((1 << sparc_cpu) &
10174 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
10175 (1 << PROCESSOR_SPARCLITE86X)))
10176 return 3;
10177 return 0;
10180 static int
10181 sparc_issue_rate (void)
10183 switch (sparc_cpu)
10185 case PROCESSOR_NIAGARA:
10186 case PROCESSOR_NIAGARA2:
10187 case PROCESSOR_NIAGARA3:
10188 default:
10189 return 1;
10190 case PROCESSOR_NIAGARA4:
10191 case PROCESSOR_NIAGARA7:
10192 case PROCESSOR_V9:
10193 /* Assume V9 processors are capable of at least dual-issue. */
10194 return 2;
10195 case PROCESSOR_SUPERSPARC:
10196 return 3;
10197 case PROCESSOR_HYPERSPARC:
10198 case PROCESSOR_SPARCLITE86X:
10199 return 2;
10200 case PROCESSOR_ULTRASPARC:
10201 case PROCESSOR_ULTRASPARC3:
10202 case PROCESSOR_M8:
10203 return 4;
10207 static int
10208 set_extends (rtx_insn *insn)
10210 register rtx pat = PATTERN (insn);
10212 switch (GET_CODE (SET_SRC (pat)))
10214 /* Load and some shift instructions zero extend. */
10215 case MEM:
10216 case ZERO_EXTEND:
10217 /* sethi clears the high bits */
10218 case HIGH:
10219 /* LO_SUM is used with sethi. sethi cleared the high
10220 bits and the values used with lo_sum are positive */
10221 case LO_SUM:
10222 /* Store flag stores 0 or 1 */
10223 case LT: case LTU:
10224 case GT: case GTU:
10225 case LE: case LEU:
10226 case GE: case GEU:
10227 case EQ:
10228 case NE:
10229 return 1;
10230 case AND:
10232 rtx op0 = XEXP (SET_SRC (pat), 0);
10233 rtx op1 = XEXP (SET_SRC (pat), 1);
10234 if (GET_CODE (op1) == CONST_INT)
10235 return INTVAL (op1) >= 0;
10236 if (GET_CODE (op0) != REG)
10237 return 0;
10238 if (sparc_check_64 (op0, insn) == 1)
10239 return 1;
10240 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
10242 case IOR:
10243 case XOR:
10245 rtx op0 = XEXP (SET_SRC (pat), 0);
10246 rtx op1 = XEXP (SET_SRC (pat), 1);
10247 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
10248 return 0;
10249 if (GET_CODE (op1) == CONST_INT)
10250 return INTVAL (op1) >= 0;
10251 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
10253 case LSHIFTRT:
10254 return GET_MODE (SET_SRC (pat)) == SImode;
10255 /* Positive integers leave the high bits zero. */
10256 case CONST_INT:
10257 return !(INTVAL (SET_SRC (pat)) & 0x80000000);
10258 case ASHIFTRT:
10259 case SIGN_EXTEND:
10260 return - (GET_MODE (SET_SRC (pat)) == SImode);
10261 case REG:
10262 return sparc_check_64 (SET_SRC (pat), insn);
10263 default:
10264 return 0;
10268 /* We _ought_ to have only one kind per function, but... */
10269 static GTY(()) rtx sparc_addr_diff_list;
10270 static GTY(()) rtx sparc_addr_list;
10272 void
10273 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
10275 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10276 if (diff)
10277 sparc_addr_diff_list
10278 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
10279 else
10280 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
10283 static void
10284 sparc_output_addr_vec (rtx vec)
10286 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
10287 int idx, vlen = XVECLEN (body, 0);
10289 #ifdef ASM_OUTPUT_ADDR_VEC_START
10290 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
10291 #endif
10293 #ifdef ASM_OUTPUT_CASE_LABEL
10294 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
10295 NEXT_INSN (lab));
10296 #else
10297 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10298 #endif
10300 for (idx = 0; idx < vlen; idx++)
10302 ASM_OUTPUT_ADDR_VEC_ELT
10303 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10306 #ifdef ASM_OUTPUT_ADDR_VEC_END
10307 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10308 #endif
10311 static void
10312 sparc_output_addr_diff_vec (rtx vec)
10314 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
10315 rtx base = XEXP (XEXP (body, 0), 0);
10316 int idx, vlen = XVECLEN (body, 1);
10318 #ifdef ASM_OUTPUT_ADDR_VEC_START
10319 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
10320 #endif
10322 #ifdef ASM_OUTPUT_CASE_LABEL
10323 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
10324 NEXT_INSN (lab));
10325 #else
10326 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10327 #endif
10329 for (idx = 0; idx < vlen; idx++)
10331 ASM_OUTPUT_ADDR_DIFF_ELT
10332 (asm_out_file,
10333 body,
10334 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10335 CODE_LABEL_NUMBER (base));
10338 #ifdef ASM_OUTPUT_ADDR_VEC_END
10339 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10340 #endif
10343 static void
10344 sparc_output_deferred_case_vectors (void)
10346 rtx t;
10347 int align;
10349 if (sparc_addr_list == NULL_RTX
10350 && sparc_addr_diff_list == NULL_RTX)
10351 return;
10353 /* Align to cache line in the function's code section. */
10354 switch_to_section (current_function_section ());
10356 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10357 if (align > 0)
10358 ASM_OUTPUT_ALIGN (asm_out_file, align);
10360 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
10361 sparc_output_addr_vec (XEXP (t, 0));
10362 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
10363 sparc_output_addr_diff_vec (XEXP (t, 0));
10365 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
10368 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
10369 unknown. Return 1 if the high bits are zero, -1 if the register is
10370 sign extended. */
10372 sparc_check_64 (rtx x, rtx_insn *insn)
10374 /* If a register is set only once it is safe to ignore insns this
10375 code does not know how to handle. The loop will either recognize
10376 the single set and return the correct value or fail to recognize
10377 it and return 0. */
10378 int set_once = 0;
10379 rtx y = x;
10381 gcc_assert (GET_CODE (x) == REG);
10383 if (GET_MODE (x) == DImode)
10384 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
10386 if (flag_expensive_optimizations
10387 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
10388 set_once = 1;
10390 if (insn == 0)
10392 if (set_once)
10393 insn = get_last_insn_anywhere ();
10394 else
10395 return 0;
10398 while ((insn = PREV_INSN (insn)))
10400 switch (GET_CODE (insn))
10402 case JUMP_INSN:
10403 case NOTE:
10404 break;
10405 case CODE_LABEL:
10406 case CALL_INSN:
10407 default:
10408 if (! set_once)
10409 return 0;
10410 break;
10411 case INSN:
10413 rtx pat = PATTERN (insn);
10414 if (GET_CODE (pat) != SET)
10415 return 0;
10416 if (rtx_equal_p (x, SET_DEST (pat)))
10417 return set_extends (insn);
10418 if (y && rtx_equal_p (y, SET_DEST (pat)))
10419 return set_extends (insn);
10420 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
10421 return 0;
10425 return 0;
10428 /* Output a wide shift instruction in V8+ mode. INSN is the instruction,
10429 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
10431 const char *
10432 output_v8plus_shift (rtx_insn *insn, rtx *operands, const char *opcode)
10434 static char asm_code[60];
10436 /* The scratch register is only required when the destination
10437 register is not a 64-bit global or out register. */
10438 if (which_alternative != 2)
10439 operands[3] = operands[0];
10441 /* We can only shift by constants <= 63. */
10442 if (GET_CODE (operands[2]) == CONST_INT)
10443 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
10445 if (GET_CODE (operands[1]) == CONST_INT)
10447 output_asm_insn ("mov\t%1, %3", operands);
10449 else
10451 output_asm_insn ("sllx\t%H1, 32, %3", operands);
10452 if (sparc_check_64 (operands[1], insn) <= 0)
10453 output_asm_insn ("srl\t%L1, 0, %L1", operands);
10454 output_asm_insn ("or\t%L1, %3, %3", operands);
10457 strcpy (asm_code, opcode);
10459 if (which_alternative != 2)
10460 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
10461 else
10462 return
10463 strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
10466 /* Output rtl to increment the profiler label LABELNO
10467 for profiling a function entry. */
10469 void
10470 sparc_profile_hook (int labelno)
10472 char buf[32];
10473 rtx lab, fun;
10475 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
10476 if (NO_PROFILE_COUNTERS)
10478 emit_library_call (fun, LCT_NORMAL, VOIDmode);
10480 else
10482 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10483 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
10484 emit_library_call (fun, LCT_NORMAL, VOIDmode, lab, Pmode);
10488 #ifdef TARGET_SOLARIS
10489 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
10491 static void
10492 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
10493 tree decl ATTRIBUTE_UNUSED)
10495 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
10497 solaris_elf_asm_comdat_section (name, flags, decl);
10498 return;
10501 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
10503 if (!(flags & SECTION_DEBUG))
10504 fputs (",#alloc", asm_out_file);
10505 if (flags & SECTION_WRITE)
10506 fputs (",#write", asm_out_file);
10507 if (flags & SECTION_TLS)
10508 fputs (",#tls", asm_out_file);
10509 if (flags & SECTION_CODE)
10510 fputs (",#execinstr", asm_out_file);
10512 if (flags & SECTION_NOTYPE)
10514 else if (flags & SECTION_BSS)
10515 fputs (",#nobits", asm_out_file);
10516 else
10517 fputs (",#progbits", asm_out_file);
10519 fputc ('\n', asm_out_file);
10521 #endif /* TARGET_SOLARIS */
10523 /* We do not allow indirect calls to be optimized into sibling calls.
10525 We cannot use sibling calls when delayed branches are disabled
10526 because they will likely require the call delay slot to be filled.
10528 Also, on SPARC 32-bit we cannot emit a sibling call when the
10529 current function returns a structure. This is because the "unimp
10530 after call" convention would cause the callee to return to the
10531 wrong place. The generic code already disallows cases where the
10532 function being called returns a structure.
10534 It may seem strange how this last case could occur. Usually there
10535 is code after the call which jumps to epilogue code which dumps the
10536 return value into the struct return area. That ought to invalidate
10537 the sibling call right? Well, in the C++ case we can end up passing
10538 the pointer to the struct return area to a constructor (which returns
10539 void) and then nothing else happens. Such a sibling call would look
10540 valid without the added check here.
10542 VxWorks PIC PLT entries require the global pointer to be initialized
10543 on entry. We therefore can't emit sibling calls to them. */
10544 static bool
10545 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
10547 return (decl
10548 && flag_delayed_branch
10549 && (TARGET_ARCH64 || ! cfun->returns_struct)
10550 && !(TARGET_VXWORKS_RTP
10551 && flag_pic
10552 && !targetm.binds_local_p (decl)));
10555 /* libfunc renaming. */
10557 static void
10558 sparc_init_libfuncs (void)
10560 if (TARGET_ARCH32)
10562 /* Use the subroutines that Sun's library provides for integer
10563 multiply and divide. The `*' prevents an underscore from
10564 being prepended by the compiler. .umul is a little faster
10565 than .mul. */
10566 set_optab_libfunc (smul_optab, SImode, "*.umul");
10567 set_optab_libfunc (sdiv_optab, SImode, "*.div");
10568 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
10569 set_optab_libfunc (smod_optab, SImode, "*.rem");
10570 set_optab_libfunc (umod_optab, SImode, "*.urem");
10572 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
10573 set_optab_libfunc (add_optab, TFmode, "_Q_add");
10574 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
10575 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
10576 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
10577 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
10579 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
10580 is because with soft-float, the SFmode and DFmode sqrt
10581 instructions will be absent, and the compiler will notice and
10582 try to use the TFmode sqrt instruction for calls to the
10583 builtin function sqrt, but this fails. */
10584 if (TARGET_FPU)
10585 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
10587 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
10588 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
10589 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
10590 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
10591 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
10592 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
10594 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
10595 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
10596 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
10597 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
10599 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
10600 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
10601 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
10602 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
10604 if (DITF_CONVERSION_LIBFUNCS)
10606 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
10607 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
10608 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
10609 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
10612 if (SUN_CONVERSION_LIBFUNCS)
10614 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
10615 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
10616 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
10617 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
10620 if (TARGET_ARCH64)
10622 /* In the SPARC 64bit ABI, SImode multiply and divide functions
10623 do not exist in the library. Make sure the compiler does not
10624 emit calls to them by accident. (It should always use the
10625 hardware instructions.) */
10626 set_optab_libfunc (smul_optab, SImode, 0);
10627 set_optab_libfunc (sdiv_optab, SImode, 0);
10628 set_optab_libfunc (udiv_optab, SImode, 0);
10629 set_optab_libfunc (smod_optab, SImode, 0);
10630 set_optab_libfunc (umod_optab, SImode, 0);
10632 if (SUN_INTEGER_MULTIPLY_64)
10634 set_optab_libfunc (smul_optab, DImode, "__mul64");
10635 set_optab_libfunc (sdiv_optab, DImode, "__div64");
10636 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
10637 set_optab_libfunc (smod_optab, DImode, "__rem64");
10638 set_optab_libfunc (umod_optab, DImode, "__urem64");
10641 if (SUN_CONVERSION_LIBFUNCS)
10643 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
10644 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
10645 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
10646 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
10651 /* SPARC builtins. */
10652 enum sparc_builtins
10654 /* FPU builtins. */
10655 SPARC_BUILTIN_LDFSR,
10656 SPARC_BUILTIN_STFSR,
10658 /* VIS 1.0 builtins. */
10659 SPARC_BUILTIN_FPACK16,
10660 SPARC_BUILTIN_FPACK32,
10661 SPARC_BUILTIN_FPACKFIX,
10662 SPARC_BUILTIN_FEXPAND,
10663 SPARC_BUILTIN_FPMERGE,
10664 SPARC_BUILTIN_FMUL8X16,
10665 SPARC_BUILTIN_FMUL8X16AU,
10666 SPARC_BUILTIN_FMUL8X16AL,
10667 SPARC_BUILTIN_FMUL8SUX16,
10668 SPARC_BUILTIN_FMUL8ULX16,
10669 SPARC_BUILTIN_FMULD8SUX16,
10670 SPARC_BUILTIN_FMULD8ULX16,
10671 SPARC_BUILTIN_FALIGNDATAV4HI,
10672 SPARC_BUILTIN_FALIGNDATAV8QI,
10673 SPARC_BUILTIN_FALIGNDATAV2SI,
10674 SPARC_BUILTIN_FALIGNDATADI,
10675 SPARC_BUILTIN_WRGSR,
10676 SPARC_BUILTIN_RDGSR,
10677 SPARC_BUILTIN_ALIGNADDR,
10678 SPARC_BUILTIN_ALIGNADDRL,
10679 SPARC_BUILTIN_PDIST,
10680 SPARC_BUILTIN_EDGE8,
10681 SPARC_BUILTIN_EDGE8L,
10682 SPARC_BUILTIN_EDGE16,
10683 SPARC_BUILTIN_EDGE16L,
10684 SPARC_BUILTIN_EDGE32,
10685 SPARC_BUILTIN_EDGE32L,
10686 SPARC_BUILTIN_FCMPLE16,
10687 SPARC_BUILTIN_FCMPLE32,
10688 SPARC_BUILTIN_FCMPNE16,
10689 SPARC_BUILTIN_FCMPNE32,
10690 SPARC_BUILTIN_FCMPGT16,
10691 SPARC_BUILTIN_FCMPGT32,
10692 SPARC_BUILTIN_FCMPEQ16,
10693 SPARC_BUILTIN_FCMPEQ32,
10694 SPARC_BUILTIN_FPADD16,
10695 SPARC_BUILTIN_FPADD16S,
10696 SPARC_BUILTIN_FPADD32,
10697 SPARC_BUILTIN_FPADD32S,
10698 SPARC_BUILTIN_FPSUB16,
10699 SPARC_BUILTIN_FPSUB16S,
10700 SPARC_BUILTIN_FPSUB32,
10701 SPARC_BUILTIN_FPSUB32S,
10702 SPARC_BUILTIN_ARRAY8,
10703 SPARC_BUILTIN_ARRAY16,
10704 SPARC_BUILTIN_ARRAY32,
10706 /* VIS 2.0 builtins. */
10707 SPARC_BUILTIN_EDGE8N,
10708 SPARC_BUILTIN_EDGE8LN,
10709 SPARC_BUILTIN_EDGE16N,
10710 SPARC_BUILTIN_EDGE16LN,
10711 SPARC_BUILTIN_EDGE32N,
10712 SPARC_BUILTIN_EDGE32LN,
10713 SPARC_BUILTIN_BMASK,
10714 SPARC_BUILTIN_BSHUFFLEV4HI,
10715 SPARC_BUILTIN_BSHUFFLEV8QI,
10716 SPARC_BUILTIN_BSHUFFLEV2SI,
10717 SPARC_BUILTIN_BSHUFFLEDI,
10719 /* VIS 3.0 builtins. */
10720 SPARC_BUILTIN_CMASK8,
10721 SPARC_BUILTIN_CMASK16,
10722 SPARC_BUILTIN_CMASK32,
10723 SPARC_BUILTIN_FCHKSM16,
10724 SPARC_BUILTIN_FSLL16,
10725 SPARC_BUILTIN_FSLAS16,
10726 SPARC_BUILTIN_FSRL16,
10727 SPARC_BUILTIN_FSRA16,
10728 SPARC_BUILTIN_FSLL32,
10729 SPARC_BUILTIN_FSLAS32,
10730 SPARC_BUILTIN_FSRL32,
10731 SPARC_BUILTIN_FSRA32,
10732 SPARC_BUILTIN_PDISTN,
10733 SPARC_BUILTIN_FMEAN16,
10734 SPARC_BUILTIN_FPADD64,
10735 SPARC_BUILTIN_FPSUB64,
10736 SPARC_BUILTIN_FPADDS16,
10737 SPARC_BUILTIN_FPADDS16S,
10738 SPARC_BUILTIN_FPSUBS16,
10739 SPARC_BUILTIN_FPSUBS16S,
10740 SPARC_BUILTIN_FPADDS32,
10741 SPARC_BUILTIN_FPADDS32S,
10742 SPARC_BUILTIN_FPSUBS32,
10743 SPARC_BUILTIN_FPSUBS32S,
10744 SPARC_BUILTIN_FUCMPLE8,
10745 SPARC_BUILTIN_FUCMPNE8,
10746 SPARC_BUILTIN_FUCMPGT8,
10747 SPARC_BUILTIN_FUCMPEQ8,
10748 SPARC_BUILTIN_FHADDS,
10749 SPARC_BUILTIN_FHADDD,
10750 SPARC_BUILTIN_FHSUBS,
10751 SPARC_BUILTIN_FHSUBD,
10752 SPARC_BUILTIN_FNHADDS,
10753 SPARC_BUILTIN_FNHADDD,
10754 SPARC_BUILTIN_UMULXHI,
10755 SPARC_BUILTIN_XMULX,
10756 SPARC_BUILTIN_XMULXHI,
10758 /* VIS 4.0 builtins. */
10759 SPARC_BUILTIN_FPADD8,
10760 SPARC_BUILTIN_FPADDS8,
10761 SPARC_BUILTIN_FPADDUS8,
10762 SPARC_BUILTIN_FPADDUS16,
10763 SPARC_BUILTIN_FPCMPLE8,
10764 SPARC_BUILTIN_FPCMPGT8,
10765 SPARC_BUILTIN_FPCMPULE16,
10766 SPARC_BUILTIN_FPCMPUGT16,
10767 SPARC_BUILTIN_FPCMPULE32,
10768 SPARC_BUILTIN_FPCMPUGT32,
10769 SPARC_BUILTIN_FPMAX8,
10770 SPARC_BUILTIN_FPMAX16,
10771 SPARC_BUILTIN_FPMAX32,
10772 SPARC_BUILTIN_FPMAXU8,
10773 SPARC_BUILTIN_FPMAXU16,
10774 SPARC_BUILTIN_FPMAXU32,
10775 SPARC_BUILTIN_FPMIN8,
10776 SPARC_BUILTIN_FPMIN16,
10777 SPARC_BUILTIN_FPMIN32,
10778 SPARC_BUILTIN_FPMINU8,
10779 SPARC_BUILTIN_FPMINU16,
10780 SPARC_BUILTIN_FPMINU32,
10781 SPARC_BUILTIN_FPSUB8,
10782 SPARC_BUILTIN_FPSUBS8,
10783 SPARC_BUILTIN_FPSUBUS8,
10784 SPARC_BUILTIN_FPSUBUS16,
10786 /* VIS 4.0B builtins. */
10788 /* Note that all the DICTUNPACK* entries should be kept
10789 contiguous. */
10790 SPARC_BUILTIN_FIRST_DICTUNPACK,
10791 SPARC_BUILTIN_DICTUNPACK8 = SPARC_BUILTIN_FIRST_DICTUNPACK,
10792 SPARC_BUILTIN_DICTUNPACK16,
10793 SPARC_BUILTIN_DICTUNPACK32,
10794 SPARC_BUILTIN_LAST_DICTUNPACK = SPARC_BUILTIN_DICTUNPACK32,
10796 /* Note that all the FPCMP*SHL entries should be kept
10797 contiguous. */
10798 SPARC_BUILTIN_FIRST_FPCMPSHL,
10799 SPARC_BUILTIN_FPCMPLE8SHL = SPARC_BUILTIN_FIRST_FPCMPSHL,
10800 SPARC_BUILTIN_FPCMPGT8SHL,
10801 SPARC_BUILTIN_FPCMPEQ8SHL,
10802 SPARC_BUILTIN_FPCMPNE8SHL,
10803 SPARC_BUILTIN_FPCMPLE16SHL,
10804 SPARC_BUILTIN_FPCMPGT16SHL,
10805 SPARC_BUILTIN_FPCMPEQ16SHL,
10806 SPARC_BUILTIN_FPCMPNE16SHL,
10807 SPARC_BUILTIN_FPCMPLE32SHL,
10808 SPARC_BUILTIN_FPCMPGT32SHL,
10809 SPARC_BUILTIN_FPCMPEQ32SHL,
10810 SPARC_BUILTIN_FPCMPNE32SHL,
10811 SPARC_BUILTIN_FPCMPULE8SHL,
10812 SPARC_BUILTIN_FPCMPUGT8SHL,
10813 SPARC_BUILTIN_FPCMPULE16SHL,
10814 SPARC_BUILTIN_FPCMPUGT16SHL,
10815 SPARC_BUILTIN_FPCMPULE32SHL,
10816 SPARC_BUILTIN_FPCMPUGT32SHL,
10817 SPARC_BUILTIN_FPCMPDE8SHL,
10818 SPARC_BUILTIN_FPCMPDE16SHL,
10819 SPARC_BUILTIN_FPCMPDE32SHL,
10820 SPARC_BUILTIN_FPCMPUR8SHL,
10821 SPARC_BUILTIN_FPCMPUR16SHL,
10822 SPARC_BUILTIN_FPCMPUR32SHL,
10823 SPARC_BUILTIN_LAST_FPCMPSHL = SPARC_BUILTIN_FPCMPUR32SHL,
10825 SPARC_BUILTIN_MAX
10828 static GTY (()) tree sparc_builtins[(int) SPARC_BUILTIN_MAX];
10829 static enum insn_code sparc_builtins_icode[(int) SPARC_BUILTIN_MAX];
10831 /* Return true if OPVAL can be used for operand OPNUM of instruction ICODE.
10832 The instruction should require a constant operand of some sort. The
10833 function prints an error if OPVAL is not valid. */
10835 static int
10836 check_constant_argument (enum insn_code icode, int opnum, rtx opval)
10838 if (GET_CODE (opval) != CONST_INT)
10840 error ("%qs expects a constant argument", insn_data[icode].name);
10841 return false;
10844 if (!(*insn_data[icode].operand[opnum].predicate) (opval, VOIDmode))
10846 error ("constant argument out of range for %qs", insn_data[icode].name);
10847 return false;
10849 return true;
10852 /* Add a SPARC builtin function with NAME, ICODE, CODE and TYPE. Return the
10853 function decl or NULL_TREE if the builtin was not added. */
10855 static tree
10856 def_builtin (const char *name, enum insn_code icode, enum sparc_builtins code,
10857 tree type)
10859 tree t
10860 = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
10862 if (t)
10864 sparc_builtins[code] = t;
10865 sparc_builtins_icode[code] = icode;
10868 return t;
10871 /* Likewise, but also marks the function as "const". */
10873 static tree
10874 def_builtin_const (const char *name, enum insn_code icode,
10875 enum sparc_builtins code, tree type)
10877 tree t = def_builtin (name, icode, code, type);
10879 if (t)
10880 TREE_READONLY (t) = 1;
10882 return t;
10885 /* Implement the TARGET_INIT_BUILTINS target hook.
10886 Create builtin functions for special SPARC instructions. */
10888 static void
10889 sparc_init_builtins (void)
10891 if (TARGET_FPU)
10892 sparc_fpu_init_builtins ();
10894 if (TARGET_VIS)
10895 sparc_vis_init_builtins ();
10898 /* Create builtin functions for FPU instructions. */
10900 static void
10901 sparc_fpu_init_builtins (void)
10903 tree ftype
10904 = build_function_type_list (void_type_node,
10905 build_pointer_type (unsigned_type_node), 0);
10906 def_builtin ("__builtin_load_fsr", CODE_FOR_ldfsr,
10907 SPARC_BUILTIN_LDFSR, ftype);
10908 def_builtin ("__builtin_store_fsr", CODE_FOR_stfsr,
10909 SPARC_BUILTIN_STFSR, ftype);
10912 /* Create builtin functions for VIS instructions. */
10914 static void
10915 sparc_vis_init_builtins (void)
10917 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
10918 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
10919 tree v4hi = build_vector_type (intHI_type_node, 4);
10920 tree v2hi = build_vector_type (intHI_type_node, 2);
10921 tree v2si = build_vector_type (intSI_type_node, 2);
10922 tree v1si = build_vector_type (intSI_type_node, 1);
10924 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
10925 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
10926 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
10927 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
10928 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
10929 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
10930 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
10931 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
10932 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
10933 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
10934 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
10935 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
10936 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
10937 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
10938 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
10939 v8qi, v8qi,
10940 intDI_type_node, 0);
10941 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
10942 v8qi, v8qi, 0);
10943 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
10944 v8qi, v8qi, 0);
10945 tree v8qi_ftype_df_si = build_function_type_list (v8qi, double_type_node,
10946 intSI_type_node, 0);
10947 tree v4hi_ftype_df_si = build_function_type_list (v4hi, double_type_node,
10948 intSI_type_node, 0);
10949 tree v2si_ftype_df_si = build_function_type_list (v2si, double_type_node,
10950 intDI_type_node, 0);
10951 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
10952 intDI_type_node,
10953 intDI_type_node, 0);
10954 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
10955 intSI_type_node,
10956 intSI_type_node, 0);
10957 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
10958 ptr_type_node,
10959 intSI_type_node, 0);
10960 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
10961 ptr_type_node,
10962 intDI_type_node, 0);
10963 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
10964 ptr_type_node,
10965 ptr_type_node, 0);
10966 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
10967 ptr_type_node,
10968 ptr_type_node, 0);
10969 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
10970 v4hi, v4hi, 0);
10971 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
10972 v2si, v2si, 0);
10973 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
10974 v4hi, v4hi, 0);
10975 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
10976 v2si, v2si, 0);
10977 tree void_ftype_di = build_function_type_list (void_type_node,
10978 intDI_type_node, 0);
10979 tree di_ftype_void = build_function_type_list (intDI_type_node,
10980 void_type_node, 0);
10981 tree void_ftype_si = build_function_type_list (void_type_node,
10982 intSI_type_node, 0);
10983 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
10984 float_type_node,
10985 float_type_node, 0);
10986 tree df_ftype_df_df = build_function_type_list (double_type_node,
10987 double_type_node,
10988 double_type_node, 0);
10990 /* Packing and expanding vectors. */
10991 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
10992 SPARC_BUILTIN_FPACK16, v4qi_ftype_v4hi);
10993 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
10994 SPARC_BUILTIN_FPACK32, v8qi_ftype_v2si_v8qi);
10995 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
10996 SPARC_BUILTIN_FPACKFIX, v2hi_ftype_v2si);
10997 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
10998 SPARC_BUILTIN_FEXPAND, v4hi_ftype_v4qi);
10999 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
11000 SPARC_BUILTIN_FPMERGE, v8qi_ftype_v4qi_v4qi);
11002 /* Multiplications. */
11003 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
11004 SPARC_BUILTIN_FMUL8X16, v4hi_ftype_v4qi_v4hi);
11005 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
11006 SPARC_BUILTIN_FMUL8X16AU, v4hi_ftype_v4qi_v2hi);
11007 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
11008 SPARC_BUILTIN_FMUL8X16AL, v4hi_ftype_v4qi_v2hi);
11009 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
11010 SPARC_BUILTIN_FMUL8SUX16, v4hi_ftype_v8qi_v4hi);
11011 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
11012 SPARC_BUILTIN_FMUL8ULX16, v4hi_ftype_v8qi_v4hi);
11013 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
11014 SPARC_BUILTIN_FMULD8SUX16, v2si_ftype_v4qi_v2hi);
11015 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
11016 SPARC_BUILTIN_FMULD8ULX16, v2si_ftype_v4qi_v2hi);
11018 /* Data aligning. */
11019 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
11020 SPARC_BUILTIN_FALIGNDATAV4HI, v4hi_ftype_v4hi_v4hi);
11021 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
11022 SPARC_BUILTIN_FALIGNDATAV8QI, v8qi_ftype_v8qi_v8qi);
11023 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
11024 SPARC_BUILTIN_FALIGNDATAV2SI, v2si_ftype_v2si_v2si);
11025 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
11026 SPARC_BUILTIN_FALIGNDATADI, di_ftype_di_di);
11028 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
11029 SPARC_BUILTIN_WRGSR, void_ftype_di);
11030 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
11031 SPARC_BUILTIN_RDGSR, di_ftype_void);
11033 if (TARGET_ARCH64)
11035 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
11036 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_di);
11037 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
11038 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_di);
11040 else
11042 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
11043 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_si);
11044 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
11045 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_si);
11048 /* Pixel distance. */
11049 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
11050 SPARC_BUILTIN_PDIST, di_ftype_v8qi_v8qi_di);
11052 /* Edge handling. */
11053 if (TARGET_ARCH64)
11055 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
11056 SPARC_BUILTIN_EDGE8, di_ftype_ptr_ptr);
11057 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
11058 SPARC_BUILTIN_EDGE8L, di_ftype_ptr_ptr);
11059 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
11060 SPARC_BUILTIN_EDGE16, di_ftype_ptr_ptr);
11061 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
11062 SPARC_BUILTIN_EDGE16L, di_ftype_ptr_ptr);
11063 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
11064 SPARC_BUILTIN_EDGE32, di_ftype_ptr_ptr);
11065 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
11066 SPARC_BUILTIN_EDGE32L, di_ftype_ptr_ptr);
11068 else
11070 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
11071 SPARC_BUILTIN_EDGE8, si_ftype_ptr_ptr);
11072 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
11073 SPARC_BUILTIN_EDGE8L, si_ftype_ptr_ptr);
11074 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
11075 SPARC_BUILTIN_EDGE16, si_ftype_ptr_ptr);
11076 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
11077 SPARC_BUILTIN_EDGE16L, si_ftype_ptr_ptr);
11078 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
11079 SPARC_BUILTIN_EDGE32, si_ftype_ptr_ptr);
11080 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
11081 SPARC_BUILTIN_EDGE32L, si_ftype_ptr_ptr);
11084 /* Pixel compare. */
11085 if (TARGET_ARCH64)
11087 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
11088 SPARC_BUILTIN_FCMPLE16, di_ftype_v4hi_v4hi);
11089 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
11090 SPARC_BUILTIN_FCMPLE32, di_ftype_v2si_v2si);
11091 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
11092 SPARC_BUILTIN_FCMPNE16, di_ftype_v4hi_v4hi);
11093 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
11094 SPARC_BUILTIN_FCMPNE32, di_ftype_v2si_v2si);
11095 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
11096 SPARC_BUILTIN_FCMPGT16, di_ftype_v4hi_v4hi);
11097 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
11098 SPARC_BUILTIN_FCMPGT32, di_ftype_v2si_v2si);
11099 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
11100 SPARC_BUILTIN_FCMPEQ16, di_ftype_v4hi_v4hi);
11101 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
11102 SPARC_BUILTIN_FCMPEQ32, di_ftype_v2si_v2si);
11104 else
11106 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
11107 SPARC_BUILTIN_FCMPLE16, si_ftype_v4hi_v4hi);
11108 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
11109 SPARC_BUILTIN_FCMPLE32, si_ftype_v2si_v2si);
11110 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
11111 SPARC_BUILTIN_FCMPNE16, si_ftype_v4hi_v4hi);
11112 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
11113 SPARC_BUILTIN_FCMPNE32, si_ftype_v2si_v2si);
11114 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
11115 SPARC_BUILTIN_FCMPGT16, si_ftype_v4hi_v4hi);
11116 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
11117 SPARC_BUILTIN_FCMPGT32, si_ftype_v2si_v2si);
11118 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
11119 SPARC_BUILTIN_FCMPEQ16, si_ftype_v4hi_v4hi);
11120 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
11121 SPARC_BUILTIN_FCMPEQ32, si_ftype_v2si_v2si);
11124 /* Addition and subtraction. */
11125 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
11126 SPARC_BUILTIN_FPADD16, v4hi_ftype_v4hi_v4hi);
11127 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
11128 SPARC_BUILTIN_FPADD16S, v2hi_ftype_v2hi_v2hi);
11129 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
11130 SPARC_BUILTIN_FPADD32, v2si_ftype_v2si_v2si);
11131 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
11132 SPARC_BUILTIN_FPADD32S, v1si_ftype_v1si_v1si);
11133 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
11134 SPARC_BUILTIN_FPSUB16, v4hi_ftype_v4hi_v4hi);
11135 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
11136 SPARC_BUILTIN_FPSUB16S, v2hi_ftype_v2hi_v2hi);
11137 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
11138 SPARC_BUILTIN_FPSUB32, v2si_ftype_v2si_v2si);
11139 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
11140 SPARC_BUILTIN_FPSUB32S, v1si_ftype_v1si_v1si);
11142 /* Three-dimensional array addressing. */
11143 if (TARGET_ARCH64)
11145 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
11146 SPARC_BUILTIN_ARRAY8, di_ftype_di_di);
11147 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
11148 SPARC_BUILTIN_ARRAY16, di_ftype_di_di);
11149 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
11150 SPARC_BUILTIN_ARRAY32, di_ftype_di_di);
11152 else
11154 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
11155 SPARC_BUILTIN_ARRAY8, si_ftype_si_si);
11156 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
11157 SPARC_BUILTIN_ARRAY16, si_ftype_si_si);
11158 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
11159 SPARC_BUILTIN_ARRAY32, si_ftype_si_si);
11162 if (TARGET_VIS2)
11164 /* Edge handling. */
11165 if (TARGET_ARCH64)
11167 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
11168 SPARC_BUILTIN_EDGE8N, di_ftype_ptr_ptr);
11169 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
11170 SPARC_BUILTIN_EDGE8LN, di_ftype_ptr_ptr);
11171 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
11172 SPARC_BUILTIN_EDGE16N, di_ftype_ptr_ptr);
11173 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
11174 SPARC_BUILTIN_EDGE16LN, di_ftype_ptr_ptr);
11175 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
11176 SPARC_BUILTIN_EDGE32N, di_ftype_ptr_ptr);
11177 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
11178 SPARC_BUILTIN_EDGE32LN, di_ftype_ptr_ptr);
11180 else
11182 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
11183 SPARC_BUILTIN_EDGE8N, si_ftype_ptr_ptr);
11184 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
11185 SPARC_BUILTIN_EDGE8LN, si_ftype_ptr_ptr);
11186 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
11187 SPARC_BUILTIN_EDGE16N, si_ftype_ptr_ptr);
11188 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
11189 SPARC_BUILTIN_EDGE16LN, si_ftype_ptr_ptr);
11190 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
11191 SPARC_BUILTIN_EDGE32N, si_ftype_ptr_ptr);
11192 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
11193 SPARC_BUILTIN_EDGE32LN, si_ftype_ptr_ptr);
11196 /* Byte mask and shuffle. */
11197 if (TARGET_ARCH64)
11198 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
11199 SPARC_BUILTIN_BMASK, di_ftype_di_di);
11200 else
11201 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
11202 SPARC_BUILTIN_BMASK, si_ftype_si_si);
11203 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
11204 SPARC_BUILTIN_BSHUFFLEV4HI, v4hi_ftype_v4hi_v4hi);
11205 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
11206 SPARC_BUILTIN_BSHUFFLEV8QI, v8qi_ftype_v8qi_v8qi);
11207 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
11208 SPARC_BUILTIN_BSHUFFLEV2SI, v2si_ftype_v2si_v2si);
11209 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
11210 SPARC_BUILTIN_BSHUFFLEDI, di_ftype_di_di);
11213 if (TARGET_VIS3)
11215 if (TARGET_ARCH64)
11217 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
11218 SPARC_BUILTIN_CMASK8, void_ftype_di);
11219 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
11220 SPARC_BUILTIN_CMASK16, void_ftype_di);
11221 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
11222 SPARC_BUILTIN_CMASK32, void_ftype_di);
11224 else
11226 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
11227 SPARC_BUILTIN_CMASK8, void_ftype_si);
11228 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
11229 SPARC_BUILTIN_CMASK16, void_ftype_si);
11230 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
11231 SPARC_BUILTIN_CMASK32, void_ftype_si);
11234 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
11235 SPARC_BUILTIN_FCHKSM16, v4hi_ftype_v4hi_v4hi);
11237 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
11238 SPARC_BUILTIN_FSLL16, v4hi_ftype_v4hi_v4hi);
11239 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
11240 SPARC_BUILTIN_FSLAS16, v4hi_ftype_v4hi_v4hi);
11241 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
11242 SPARC_BUILTIN_FSRL16, v4hi_ftype_v4hi_v4hi);
11243 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
11244 SPARC_BUILTIN_FSRA16, v4hi_ftype_v4hi_v4hi);
11245 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
11246 SPARC_BUILTIN_FSLL32, v2si_ftype_v2si_v2si);
11247 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
11248 SPARC_BUILTIN_FSLAS32, v2si_ftype_v2si_v2si);
11249 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
11250 SPARC_BUILTIN_FSRL32, v2si_ftype_v2si_v2si);
11251 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
11252 SPARC_BUILTIN_FSRA32, v2si_ftype_v2si_v2si);
11254 if (TARGET_ARCH64)
11255 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
11256 SPARC_BUILTIN_PDISTN, di_ftype_v8qi_v8qi);
11257 else
11258 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
11259 SPARC_BUILTIN_PDISTN, si_ftype_v8qi_v8qi);
11261 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
11262 SPARC_BUILTIN_FMEAN16, v4hi_ftype_v4hi_v4hi);
11263 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
11264 SPARC_BUILTIN_FPADD64, di_ftype_di_di);
11265 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
11266 SPARC_BUILTIN_FPSUB64, di_ftype_di_di);
11268 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
11269 SPARC_BUILTIN_FPADDS16, v4hi_ftype_v4hi_v4hi);
11270 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
11271 SPARC_BUILTIN_FPADDS16S, v2hi_ftype_v2hi_v2hi);
11272 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
11273 SPARC_BUILTIN_FPSUBS16, v4hi_ftype_v4hi_v4hi);
11274 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
11275 SPARC_BUILTIN_FPSUBS16S, v2hi_ftype_v2hi_v2hi);
11276 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
11277 SPARC_BUILTIN_FPADDS32, v2si_ftype_v2si_v2si);
11278 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
11279 SPARC_BUILTIN_FPADDS32S, v1si_ftype_v1si_v1si);
11280 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
11281 SPARC_BUILTIN_FPSUBS32, v2si_ftype_v2si_v2si);
11282 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
11283 SPARC_BUILTIN_FPSUBS32S, v1si_ftype_v1si_v1si);
11285 if (TARGET_ARCH64)
11287 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
11288 SPARC_BUILTIN_FUCMPLE8, di_ftype_v8qi_v8qi);
11289 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
11290 SPARC_BUILTIN_FUCMPNE8, di_ftype_v8qi_v8qi);
11291 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
11292 SPARC_BUILTIN_FUCMPGT8, di_ftype_v8qi_v8qi);
11293 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
11294 SPARC_BUILTIN_FUCMPEQ8, di_ftype_v8qi_v8qi);
11296 else
11298 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
11299 SPARC_BUILTIN_FUCMPLE8, si_ftype_v8qi_v8qi);
11300 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
11301 SPARC_BUILTIN_FUCMPNE8, si_ftype_v8qi_v8qi);
11302 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
11303 SPARC_BUILTIN_FUCMPGT8, si_ftype_v8qi_v8qi);
11304 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
11305 SPARC_BUILTIN_FUCMPEQ8, si_ftype_v8qi_v8qi);
11308 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
11309 SPARC_BUILTIN_FHADDS, sf_ftype_sf_sf);
11310 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
11311 SPARC_BUILTIN_FHADDD, df_ftype_df_df);
11312 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
11313 SPARC_BUILTIN_FHSUBS, sf_ftype_sf_sf);
11314 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
11315 SPARC_BUILTIN_FHSUBD, df_ftype_df_df);
11316 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
11317 SPARC_BUILTIN_FNHADDS, sf_ftype_sf_sf);
11318 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
11319 SPARC_BUILTIN_FNHADDD, df_ftype_df_df);
11321 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
11322 SPARC_BUILTIN_UMULXHI, di_ftype_di_di);
11323 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
11324 SPARC_BUILTIN_XMULX, di_ftype_di_di);
11325 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
11326 SPARC_BUILTIN_XMULXHI, di_ftype_di_di);
11329 if (TARGET_VIS4)
11331 def_builtin_const ("__builtin_vis_fpadd8", CODE_FOR_addv8qi3,
11332 SPARC_BUILTIN_FPADD8, v8qi_ftype_v8qi_v8qi);
11333 def_builtin_const ("__builtin_vis_fpadds8", CODE_FOR_ssaddv8qi3,
11334 SPARC_BUILTIN_FPADDS8, v8qi_ftype_v8qi_v8qi);
11335 def_builtin_const ("__builtin_vis_fpaddus8", CODE_FOR_usaddv8qi3,
11336 SPARC_BUILTIN_FPADDUS8, v8qi_ftype_v8qi_v8qi);
11337 def_builtin_const ("__builtin_vis_fpaddus16", CODE_FOR_usaddv4hi3,
11338 SPARC_BUILTIN_FPADDUS16, v4hi_ftype_v4hi_v4hi);
11341 if (TARGET_ARCH64)
11343 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8di_vis,
11344 SPARC_BUILTIN_FPCMPLE8, di_ftype_v8qi_v8qi);
11345 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8di_vis,
11346 SPARC_BUILTIN_FPCMPGT8, di_ftype_v8qi_v8qi);
11347 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16di_vis,
11348 SPARC_BUILTIN_FPCMPULE16, di_ftype_v4hi_v4hi);
11349 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16di_vis,
11350 SPARC_BUILTIN_FPCMPUGT16, di_ftype_v4hi_v4hi);
11351 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32di_vis,
11352 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11353 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32di_vis,
11354 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11356 else
11358 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8si_vis,
11359 SPARC_BUILTIN_FPCMPLE8, si_ftype_v8qi_v8qi);
11360 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8si_vis,
11361 SPARC_BUILTIN_FPCMPGT8, si_ftype_v8qi_v8qi);
11362 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16si_vis,
11363 SPARC_BUILTIN_FPCMPULE16, si_ftype_v4hi_v4hi);
11364 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16si_vis,
11365 SPARC_BUILTIN_FPCMPUGT16, si_ftype_v4hi_v4hi);
11366 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32si_vis,
11367 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11368 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32si_vis,
11369 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11372 def_builtin_const ("__builtin_vis_fpmax8", CODE_FOR_maxv8qi3,
11373 SPARC_BUILTIN_FPMAX8, v8qi_ftype_v8qi_v8qi);
11374 def_builtin_const ("__builtin_vis_fpmax16", CODE_FOR_maxv4hi3,
11375 SPARC_BUILTIN_FPMAX16, v4hi_ftype_v4hi_v4hi);
11376 def_builtin_const ("__builtin_vis_fpmax32", CODE_FOR_maxv2si3,
11377 SPARC_BUILTIN_FPMAX32, v2si_ftype_v2si_v2si);
11378 def_builtin_const ("__builtin_vis_fpmaxu8", CODE_FOR_maxuv8qi3,
11379 SPARC_BUILTIN_FPMAXU8, v8qi_ftype_v8qi_v8qi);
11380 def_builtin_const ("__builtin_vis_fpmaxu16", CODE_FOR_maxuv4hi3,
11381 SPARC_BUILTIN_FPMAXU16, v4hi_ftype_v4hi_v4hi);
11382 def_builtin_const ("__builtin_vis_fpmaxu32", CODE_FOR_maxuv2si3,
11383 SPARC_BUILTIN_FPMAXU32, v2si_ftype_v2si_v2si);
11384 def_builtin_const ("__builtin_vis_fpmin8", CODE_FOR_minv8qi3,
11385 SPARC_BUILTIN_FPMIN8, v8qi_ftype_v8qi_v8qi);
11386 def_builtin_const ("__builtin_vis_fpmin16", CODE_FOR_minv4hi3,
11387 SPARC_BUILTIN_FPMIN16, v4hi_ftype_v4hi_v4hi);
11388 def_builtin_const ("__builtin_vis_fpmin32", CODE_FOR_minv2si3,
11389 SPARC_BUILTIN_FPMIN32, v2si_ftype_v2si_v2si);
11390 def_builtin_const ("__builtin_vis_fpminu8", CODE_FOR_minuv8qi3,
11391 SPARC_BUILTIN_FPMINU8, v8qi_ftype_v8qi_v8qi);
11392 def_builtin_const ("__builtin_vis_fpminu16", CODE_FOR_minuv4hi3,
11393 SPARC_BUILTIN_FPMINU16, v4hi_ftype_v4hi_v4hi);
11394 def_builtin_const ("__builtin_vis_fpminu32", CODE_FOR_minuv2si3,
11395 SPARC_BUILTIN_FPMINU32, v2si_ftype_v2si_v2si);
11396 def_builtin_const ("__builtin_vis_fpsub8", CODE_FOR_subv8qi3,
11397 SPARC_BUILTIN_FPSUB8, v8qi_ftype_v8qi_v8qi);
11398 def_builtin_const ("__builtin_vis_fpsubs8", CODE_FOR_sssubv8qi3,
11399 SPARC_BUILTIN_FPSUBS8, v8qi_ftype_v8qi_v8qi);
11400 def_builtin_const ("__builtin_vis_fpsubus8", CODE_FOR_ussubv8qi3,
11401 SPARC_BUILTIN_FPSUBUS8, v8qi_ftype_v8qi_v8qi);
11402 def_builtin_const ("__builtin_vis_fpsubus16", CODE_FOR_ussubv4hi3,
11403 SPARC_BUILTIN_FPSUBUS16, v4hi_ftype_v4hi_v4hi);
11406 if (TARGET_VIS4B)
11408 def_builtin_const ("__builtin_vis_dictunpack8", CODE_FOR_dictunpack8,
11409 SPARC_BUILTIN_DICTUNPACK8, v8qi_ftype_df_si);
11410 def_builtin_const ("__builtin_vis_dictunpack16", CODE_FOR_dictunpack16,
11411 SPARC_BUILTIN_DICTUNPACK16, v4hi_ftype_df_si);
11412 def_builtin_const ("__builtin_vis_dictunpack32", CODE_FOR_dictunpack32,
11413 SPARC_BUILTIN_DICTUNPACK32, v2si_ftype_df_si);
11415 if (TARGET_ARCH64)
11417 tree di_ftype_v8qi_v8qi_si = build_function_type_list (intDI_type_node,
11418 v8qi, v8qi,
11419 intSI_type_node, 0);
11420 tree di_ftype_v4hi_v4hi_si = build_function_type_list (intDI_type_node,
11421 v4hi, v4hi,
11422 intSI_type_node, 0);
11423 tree di_ftype_v2si_v2si_si = build_function_type_list (intDI_type_node,
11424 v2si, v2si,
11425 intSI_type_node, 0);
11427 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8dishl,
11428 SPARC_BUILTIN_FPCMPLE8SHL, di_ftype_v8qi_v8qi_si);
11429 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8dishl,
11430 SPARC_BUILTIN_FPCMPGT8SHL, di_ftype_v8qi_v8qi_si);
11431 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8dishl,
11432 SPARC_BUILTIN_FPCMPEQ8SHL, di_ftype_v8qi_v8qi_si);
11433 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8dishl,
11434 SPARC_BUILTIN_FPCMPNE8SHL, di_ftype_v8qi_v8qi_si);
11436 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16dishl,
11437 SPARC_BUILTIN_FPCMPLE16SHL, di_ftype_v4hi_v4hi_si);
11438 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16dishl,
11439 SPARC_BUILTIN_FPCMPGT16SHL, di_ftype_v4hi_v4hi_si);
11440 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16dishl,
11441 SPARC_BUILTIN_FPCMPEQ16SHL, di_ftype_v4hi_v4hi_si);
11442 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16dishl,
11443 SPARC_BUILTIN_FPCMPNE16SHL, di_ftype_v4hi_v4hi_si);
11445 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32dishl,
11446 SPARC_BUILTIN_FPCMPLE32SHL, di_ftype_v2si_v2si_si);
11447 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32dishl,
11448 SPARC_BUILTIN_FPCMPGT32SHL, di_ftype_v2si_v2si_si);
11449 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32dishl,
11450 SPARC_BUILTIN_FPCMPEQ32SHL, di_ftype_v2si_v2si_si);
11451 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32dishl,
11452 SPARC_BUILTIN_FPCMPNE32SHL, di_ftype_v2si_v2si_si);
11455 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8dishl,
11456 SPARC_BUILTIN_FPCMPULE8SHL, di_ftype_v8qi_v8qi_si);
11457 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8dishl,
11458 SPARC_BUILTIN_FPCMPUGT8SHL, di_ftype_v8qi_v8qi_si);
11460 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16dishl,
11461 SPARC_BUILTIN_FPCMPULE16SHL, di_ftype_v4hi_v4hi_si);
11462 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16dishl,
11463 SPARC_BUILTIN_FPCMPUGT16SHL, di_ftype_v4hi_v4hi_si);
11465 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32dishl,
11466 SPARC_BUILTIN_FPCMPULE32SHL, di_ftype_v2si_v2si_si);
11467 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32dishl,
11468 SPARC_BUILTIN_FPCMPUGT32SHL, di_ftype_v2si_v2si_si);
11470 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8dishl,
11471 SPARC_BUILTIN_FPCMPDE8SHL, di_ftype_v8qi_v8qi_si);
11472 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16dishl,
11473 SPARC_BUILTIN_FPCMPDE16SHL, di_ftype_v4hi_v4hi_si);
11474 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32dishl,
11475 SPARC_BUILTIN_FPCMPDE32SHL, di_ftype_v2si_v2si_si);
11477 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8dishl,
11478 SPARC_BUILTIN_FPCMPUR8SHL, di_ftype_v8qi_v8qi_si);
11479 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16dishl,
11480 SPARC_BUILTIN_FPCMPUR16SHL, di_ftype_v4hi_v4hi_si);
11481 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32dishl,
11482 SPARC_BUILTIN_FPCMPUR32SHL, di_ftype_v2si_v2si_si);
11485 else
11487 tree si_ftype_v8qi_v8qi_si = build_function_type_list (intSI_type_node,
11488 v8qi, v8qi,
11489 intSI_type_node, 0);
11490 tree si_ftype_v4hi_v4hi_si = build_function_type_list (intSI_type_node,
11491 v4hi, v4hi,
11492 intSI_type_node, 0);
11493 tree si_ftype_v2si_v2si_si = build_function_type_list (intSI_type_node,
11494 v2si, v2si,
11495 intSI_type_node, 0);
11497 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8sishl,
11498 SPARC_BUILTIN_FPCMPLE8SHL, si_ftype_v8qi_v8qi_si);
11499 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8sishl,
11500 SPARC_BUILTIN_FPCMPGT8SHL, si_ftype_v8qi_v8qi_si);
11501 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8sishl,
11502 SPARC_BUILTIN_FPCMPEQ8SHL, si_ftype_v8qi_v8qi_si);
11503 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8sishl,
11504 SPARC_BUILTIN_FPCMPNE8SHL, si_ftype_v8qi_v8qi_si);
11506 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16sishl,
11507 SPARC_BUILTIN_FPCMPLE16SHL, si_ftype_v4hi_v4hi_si);
11508 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16sishl,
11509 SPARC_BUILTIN_FPCMPGT16SHL, si_ftype_v4hi_v4hi_si);
11510 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16sishl,
11511 SPARC_BUILTIN_FPCMPEQ16SHL, si_ftype_v4hi_v4hi_si);
11512 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16sishl,
11513 SPARC_BUILTIN_FPCMPNE16SHL, si_ftype_v4hi_v4hi_si);
11515 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32sishl,
11516 SPARC_BUILTIN_FPCMPLE32SHL, si_ftype_v2si_v2si_si);
11517 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32sishl,
11518 SPARC_BUILTIN_FPCMPGT32SHL, si_ftype_v2si_v2si_si);
11519 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32sishl,
11520 SPARC_BUILTIN_FPCMPEQ32SHL, si_ftype_v2si_v2si_si);
11521 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32sishl,
11522 SPARC_BUILTIN_FPCMPNE32SHL, si_ftype_v2si_v2si_si);
11525 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8sishl,
11526 SPARC_BUILTIN_FPCMPULE8SHL, si_ftype_v8qi_v8qi_si);
11527 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8sishl,
11528 SPARC_BUILTIN_FPCMPUGT8SHL, si_ftype_v8qi_v8qi_si);
11530 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16sishl,
11531 SPARC_BUILTIN_FPCMPULE16SHL, si_ftype_v4hi_v4hi_si);
11532 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16sishl,
11533 SPARC_BUILTIN_FPCMPUGT16SHL, si_ftype_v4hi_v4hi_si);
11535 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32sishl,
11536 SPARC_BUILTIN_FPCMPULE32SHL, si_ftype_v2si_v2si_si);
11537 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32sishl,
11538 SPARC_BUILTIN_FPCMPUGT32SHL, si_ftype_v2si_v2si_si);
11540 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8sishl,
11541 SPARC_BUILTIN_FPCMPDE8SHL, si_ftype_v8qi_v8qi_si);
11542 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16sishl,
11543 SPARC_BUILTIN_FPCMPDE16SHL, si_ftype_v4hi_v4hi_si);
11544 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32sishl,
11545 SPARC_BUILTIN_FPCMPDE32SHL, si_ftype_v2si_v2si_si);
11547 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8sishl,
11548 SPARC_BUILTIN_FPCMPUR8SHL, si_ftype_v8qi_v8qi_si);
11549 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16sishl,
11550 SPARC_BUILTIN_FPCMPUR16SHL, si_ftype_v4hi_v4hi_si);
11551 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32sishl,
11552 SPARC_BUILTIN_FPCMPUR32SHL, si_ftype_v2si_v2si_si);
11557 /* Implement TARGET_BUILTIN_DECL hook. */
11559 static tree
11560 sparc_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
11562 if (code >= SPARC_BUILTIN_MAX)
11563 return error_mark_node;
11565 return sparc_builtins[code];
11568 /* Implemented TARGET_EXPAND_BUILTIN hook. */
11570 static rtx
11571 sparc_expand_builtin (tree exp, rtx target,
11572 rtx subtarget ATTRIBUTE_UNUSED,
11573 machine_mode tmode ATTRIBUTE_UNUSED,
11574 int ignore ATTRIBUTE_UNUSED)
11576 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11577 enum sparc_builtins code = (enum sparc_builtins) DECL_FUNCTION_CODE (fndecl);
11578 enum insn_code icode = sparc_builtins_icode[code];
11579 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
11580 call_expr_arg_iterator iter;
11581 int arg_count = 0;
11582 rtx pat, op[4];
11583 tree arg;
11585 if (nonvoid)
11587 machine_mode tmode = insn_data[icode].operand[0].mode;
11588 if (!target
11589 || GET_MODE (target) != tmode
11590 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11591 op[0] = gen_reg_rtx (tmode);
11592 else
11593 op[0] = target;
11595 else
11596 op[0] = NULL_RTX;
11598 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
11600 const struct insn_operand_data *insn_op;
11601 int idx;
11603 if (arg == error_mark_node)
11604 return NULL_RTX;
11606 arg_count++;
11607 idx = arg_count - !nonvoid;
11608 insn_op = &insn_data[icode].operand[idx];
11609 op[arg_count] = expand_normal (arg);
11611 /* Some of the builtins require constant arguments. We check
11612 for this here. */
11613 if ((code >= SPARC_BUILTIN_FIRST_FPCMPSHL
11614 && code <= SPARC_BUILTIN_LAST_FPCMPSHL
11615 && arg_count == 3)
11616 || (code >= SPARC_BUILTIN_FIRST_DICTUNPACK
11617 && code <= SPARC_BUILTIN_LAST_DICTUNPACK
11618 && arg_count == 2))
11620 if (!check_constant_argument (icode, idx, op[arg_count]))
11621 return const0_rtx;
11624 if (code == SPARC_BUILTIN_LDFSR || code == SPARC_BUILTIN_STFSR)
11626 if (!address_operand (op[arg_count], SImode))
11628 op[arg_count] = convert_memory_address (Pmode, op[arg_count]);
11629 op[arg_count] = copy_addr_to_reg (op[arg_count]);
11631 op[arg_count] = gen_rtx_MEM (SImode, op[arg_count]);
11634 else if (insn_op->mode == V1DImode
11635 && GET_MODE (op[arg_count]) == DImode)
11636 op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
11638 else if (insn_op->mode == V1SImode
11639 && GET_MODE (op[arg_count]) == SImode)
11640 op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
11642 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
11643 insn_op->mode))
11644 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
11647 switch (arg_count)
11649 case 0:
11650 pat = GEN_FCN (icode) (op[0]);
11651 break;
11652 case 1:
11653 if (nonvoid)
11654 pat = GEN_FCN (icode) (op[0], op[1]);
11655 else
11656 pat = GEN_FCN (icode) (op[1]);
11657 break;
11658 case 2:
11659 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
11660 break;
11661 case 3:
11662 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
11663 break;
11664 default:
11665 gcc_unreachable ();
11668 if (!pat)
11669 return NULL_RTX;
11671 emit_insn (pat);
11673 return (nonvoid ? op[0] : const0_rtx);
11676 /* Return the upper 16 bits of the 8x16 multiplication. */
11678 static int
11679 sparc_vis_mul8x16 (int e8, int e16)
11681 return (e8 * e16 + 128) / 256;
11684 /* Multiply the VECTOR_CSTs CST0 and CST1 as specified by FNCODE and put
11685 the result into the array N_ELTS, whose elements are of INNER_TYPE. */
11687 static void
11688 sparc_handle_vis_mul8x16 (vec<tree> *n_elts, enum sparc_builtins fncode,
11689 tree inner_type, tree cst0, tree cst1)
11691 unsigned i, num = VECTOR_CST_NELTS (cst0);
11692 int scale;
11694 switch (fncode)
11696 case SPARC_BUILTIN_FMUL8X16:
11697 for (i = 0; i < num; ++i)
11699 int val
11700 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11701 TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, i)));
11702 n_elts->quick_push (build_int_cst (inner_type, val));
11704 break;
11706 case SPARC_BUILTIN_FMUL8X16AU:
11707 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0));
11709 for (i = 0; i < num; ++i)
11711 int val
11712 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11713 scale);
11714 n_elts->quick_push (build_int_cst (inner_type, val));
11716 break;
11718 case SPARC_BUILTIN_FMUL8X16AL:
11719 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1));
11721 for (i = 0; i < num; ++i)
11723 int val
11724 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11725 scale);
11726 n_elts->quick_push (build_int_cst (inner_type, val));
11728 break;
11730 default:
11731 gcc_unreachable ();
11735 /* Implement TARGET_FOLD_BUILTIN hook.
11737 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
11738 result of the function call is ignored. NULL_TREE is returned if the
11739 function could not be folded. */
11741 static tree
11742 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
11743 tree *args, bool ignore)
11745 enum sparc_builtins code = (enum sparc_builtins) DECL_FUNCTION_CODE (fndecl);
11746 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
11747 tree arg0, arg1, arg2;
11749 if (ignore)
11750 switch (code)
11752 case SPARC_BUILTIN_LDFSR:
11753 case SPARC_BUILTIN_STFSR:
11754 case SPARC_BUILTIN_ALIGNADDR:
11755 case SPARC_BUILTIN_WRGSR:
11756 case SPARC_BUILTIN_BMASK:
11757 case SPARC_BUILTIN_CMASK8:
11758 case SPARC_BUILTIN_CMASK16:
11759 case SPARC_BUILTIN_CMASK32:
11760 break;
11762 default:
11763 return build_zero_cst (rtype);
11766 switch (code)
11768 case SPARC_BUILTIN_FEXPAND:
11769 arg0 = args[0];
11770 STRIP_NOPS (arg0);
11772 if (TREE_CODE (arg0) == VECTOR_CST)
11774 tree inner_type = TREE_TYPE (rtype);
11775 unsigned i;
11777 tree_vector_builder n_elts (rtype, VECTOR_CST_NELTS (arg0), 1);
11778 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11780 unsigned HOST_WIDE_INT val
11781 = TREE_INT_CST_LOW (VECTOR_CST_ELT (arg0, i));
11782 n_elts.quick_push (build_int_cst (inner_type, val << 4));
11784 return n_elts.build ();
11786 break;
11788 case SPARC_BUILTIN_FMUL8X16:
11789 case SPARC_BUILTIN_FMUL8X16AU:
11790 case SPARC_BUILTIN_FMUL8X16AL:
11791 arg0 = args[0];
11792 arg1 = args[1];
11793 STRIP_NOPS (arg0);
11794 STRIP_NOPS (arg1);
11796 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11798 tree inner_type = TREE_TYPE (rtype);
11799 tree_vector_builder n_elts (rtype, VECTOR_CST_NELTS (arg0), 1);
11800 sparc_handle_vis_mul8x16 (&n_elts, code, inner_type, arg0, arg1);
11801 return n_elts.build ();
11803 break;
11805 case SPARC_BUILTIN_FPMERGE:
11806 arg0 = args[0];
11807 arg1 = args[1];
11808 STRIP_NOPS (arg0);
11809 STRIP_NOPS (arg1);
11811 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11813 tree_vector_builder n_elts (rtype, 2 * VECTOR_CST_NELTS (arg0), 1);
11814 unsigned i;
11815 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11817 n_elts.quick_push (VECTOR_CST_ELT (arg0, i));
11818 n_elts.quick_push (VECTOR_CST_ELT (arg1, i));
11821 return n_elts.build ();
11823 break;
11825 case SPARC_BUILTIN_PDIST:
11826 case SPARC_BUILTIN_PDISTN:
11827 arg0 = args[0];
11828 arg1 = args[1];
11829 STRIP_NOPS (arg0);
11830 STRIP_NOPS (arg1);
11831 if (code == SPARC_BUILTIN_PDIST)
11833 arg2 = args[2];
11834 STRIP_NOPS (arg2);
11836 else
11837 arg2 = integer_zero_node;
11839 if (TREE_CODE (arg0) == VECTOR_CST
11840 && TREE_CODE (arg1) == VECTOR_CST
11841 && TREE_CODE (arg2) == INTEGER_CST)
11843 bool overflow = false;
11844 widest_int result = wi::to_widest (arg2);
11845 widest_int tmp;
11846 unsigned i;
11848 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11850 tree e0 = VECTOR_CST_ELT (arg0, i);
11851 tree e1 = VECTOR_CST_ELT (arg1, i);
11853 bool neg1_ovf, neg2_ovf, add1_ovf, add2_ovf;
11855 tmp = wi::neg (wi::to_widest (e1), &neg1_ovf);
11856 tmp = wi::add (wi::to_widest (e0), tmp, SIGNED, &add1_ovf);
11857 if (wi::neg_p (tmp))
11858 tmp = wi::neg (tmp, &neg2_ovf);
11859 else
11860 neg2_ovf = false;
11861 result = wi::add (result, tmp, SIGNED, &add2_ovf);
11862 overflow |= neg1_ovf | neg2_ovf | add1_ovf | add2_ovf;
11865 gcc_assert (!overflow);
11867 return wide_int_to_tree (rtype, result);
11870 default:
11871 break;
11874 return NULL_TREE;
11877 /* ??? This duplicates information provided to the compiler by the
11878 ??? scheduler description. Some day, teach genautomata to output
11879 ??? the latencies and then CSE will just use that. */
11881 static bool
11882 sparc_rtx_costs (rtx x, machine_mode mode, int outer_code,
11883 int opno ATTRIBUTE_UNUSED,
11884 int *total, bool speed ATTRIBUTE_UNUSED)
11886 int code = GET_CODE (x);
11887 bool float_mode_p = FLOAT_MODE_P (mode);
11889 switch (code)
11891 case CONST_INT:
11892 if (SMALL_INT (x))
11893 *total = 0;
11894 else
11895 *total = 2;
11896 return true;
11898 case CONST_WIDE_INT:
11899 *total = 0;
11900 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 0)))
11901 *total += 2;
11902 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 1)))
11903 *total += 2;
11904 return true;
11906 case HIGH:
11907 *total = 2;
11908 return true;
11910 case CONST:
11911 case LABEL_REF:
11912 case SYMBOL_REF:
11913 *total = 4;
11914 return true;
11916 case CONST_DOUBLE:
11917 *total = 8;
11918 return true;
11920 case MEM:
11921 /* If outer-code was a sign or zero extension, a cost
11922 of COSTS_N_INSNS (1) was already added in. This is
11923 why we are subtracting it back out. */
11924 if (outer_code == ZERO_EXTEND)
11926 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
11928 else if (outer_code == SIGN_EXTEND)
11930 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
11932 else if (float_mode_p)
11934 *total = sparc_costs->float_load;
11936 else
11938 *total = sparc_costs->int_load;
11941 return true;
11943 case PLUS:
11944 case MINUS:
11945 if (float_mode_p)
11946 *total = sparc_costs->float_plusminus;
11947 else
11948 *total = COSTS_N_INSNS (1);
11949 return false;
11951 case FMA:
11953 rtx sub;
11955 gcc_assert (float_mode_p);
11956 *total = sparc_costs->float_mul;
11958 sub = XEXP (x, 0);
11959 if (GET_CODE (sub) == NEG)
11960 sub = XEXP (sub, 0);
11961 *total += rtx_cost (sub, mode, FMA, 0, speed);
11963 sub = XEXP (x, 2);
11964 if (GET_CODE (sub) == NEG)
11965 sub = XEXP (sub, 0);
11966 *total += rtx_cost (sub, mode, FMA, 2, speed);
11967 return true;
11970 case MULT:
11971 if (float_mode_p)
11972 *total = sparc_costs->float_mul;
11973 else if (TARGET_ARCH32 && !TARGET_HARD_MUL)
11974 *total = COSTS_N_INSNS (25);
11975 else
11977 int bit_cost;
11979 bit_cost = 0;
11980 if (sparc_costs->int_mul_bit_factor)
11982 int nbits;
11984 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
11986 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
11987 for (nbits = 0; value != 0; value &= value - 1)
11988 nbits++;
11990 else
11991 nbits = 7;
11993 if (nbits < 3)
11994 nbits = 3;
11995 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
11996 bit_cost = COSTS_N_INSNS (bit_cost);
11999 if (mode == DImode || !TARGET_HARD_MUL)
12000 *total = sparc_costs->int_mulX + bit_cost;
12001 else
12002 *total = sparc_costs->int_mul + bit_cost;
12004 return false;
12006 case ASHIFT:
12007 case ASHIFTRT:
12008 case LSHIFTRT:
12009 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
12010 return false;
12012 case DIV:
12013 case UDIV:
12014 case MOD:
12015 case UMOD:
12016 if (float_mode_p)
12018 if (mode == DFmode)
12019 *total = sparc_costs->float_div_df;
12020 else
12021 *total = sparc_costs->float_div_sf;
12023 else
12025 if (mode == DImode)
12026 *total = sparc_costs->int_divX;
12027 else
12028 *total = sparc_costs->int_div;
12030 return false;
12032 case NEG:
12033 if (! float_mode_p)
12035 *total = COSTS_N_INSNS (1);
12036 return false;
12038 /* FALLTHRU */
12040 case ABS:
12041 case FLOAT:
12042 case UNSIGNED_FLOAT:
12043 case FIX:
12044 case UNSIGNED_FIX:
12045 case FLOAT_EXTEND:
12046 case FLOAT_TRUNCATE:
12047 *total = sparc_costs->float_move;
12048 return false;
12050 case SQRT:
12051 if (mode == DFmode)
12052 *total = sparc_costs->float_sqrt_df;
12053 else
12054 *total = sparc_costs->float_sqrt_sf;
12055 return false;
12057 case COMPARE:
12058 if (float_mode_p)
12059 *total = sparc_costs->float_cmp;
12060 else
12061 *total = COSTS_N_INSNS (1);
12062 return false;
12064 case IF_THEN_ELSE:
12065 if (float_mode_p)
12066 *total = sparc_costs->float_cmove;
12067 else
12068 *total = sparc_costs->int_cmove;
12069 return false;
12071 case IOR:
12072 /* Handle the NAND vector patterns. */
12073 if (sparc_vector_mode_supported_p (mode)
12074 && GET_CODE (XEXP (x, 0)) == NOT
12075 && GET_CODE (XEXP (x, 1)) == NOT)
12077 *total = COSTS_N_INSNS (1);
12078 return true;
12080 else
12081 return false;
12083 default:
12084 return false;
12088 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
12090 static inline bool
12091 general_or_i64_p (reg_class_t rclass)
12093 return (rclass == GENERAL_REGS || rclass == I64_REGS);
12096 /* Implement TARGET_REGISTER_MOVE_COST. */
12098 static int
12099 sparc_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
12100 reg_class_t from, reg_class_t to)
12102 bool need_memory = false;
12104 /* This helps postreload CSE to eliminate redundant comparisons. */
12105 if (from == NO_REGS || to == NO_REGS)
12106 return 100;
12108 if (from == FPCC_REGS || to == FPCC_REGS)
12109 need_memory = true;
12110 else if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
12111 || (general_or_i64_p (from) && FP_REG_CLASS_P (to)))
12113 if (TARGET_VIS3)
12115 int size = GET_MODE_SIZE (mode);
12116 if (size == 8 || size == 4)
12118 if (! TARGET_ARCH32 || size == 4)
12119 return 4;
12120 else
12121 return 6;
12124 need_memory = true;
12127 if (need_memory)
12129 if (sparc_cpu == PROCESSOR_ULTRASPARC
12130 || sparc_cpu == PROCESSOR_ULTRASPARC3
12131 || sparc_cpu == PROCESSOR_NIAGARA
12132 || sparc_cpu == PROCESSOR_NIAGARA2
12133 || sparc_cpu == PROCESSOR_NIAGARA3
12134 || sparc_cpu == PROCESSOR_NIAGARA4
12135 || sparc_cpu == PROCESSOR_NIAGARA7
12136 || sparc_cpu == PROCESSOR_M8)
12137 return 12;
12139 return 6;
12142 return 2;
12145 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
12146 This is achieved by means of a manual dynamic stack space allocation in
12147 the current frame. We make the assumption that SEQ doesn't contain any
12148 function calls, with the possible exception of calls to the GOT helper. */
12150 static void
12151 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
12153 /* We must preserve the lowest 16 words for the register save area. */
12154 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
12155 /* We really need only 2 words of fresh stack space. */
12156 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
12158 rtx slot
12159 = gen_rtx_MEM (word_mode, plus_constant (Pmode, stack_pointer_rtx,
12160 SPARC_STACK_BIAS + offset));
12162 emit_insn (gen_stack_pointer_inc (GEN_INT (-size)));
12163 emit_insn (gen_rtx_SET (slot, reg));
12164 if (reg2)
12165 emit_insn (gen_rtx_SET (adjust_address (slot, word_mode, UNITS_PER_WORD),
12166 reg2));
12167 emit_insn (seq);
12168 if (reg2)
12169 emit_insn (gen_rtx_SET (reg2,
12170 adjust_address (slot, word_mode, UNITS_PER_WORD)));
12171 emit_insn (gen_rtx_SET (reg, slot));
12172 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
12175 /* Output the assembler code for a thunk function. THUNK_DECL is the
12176 declaration for the thunk function itself, FUNCTION is the decl for
12177 the target function. DELTA is an immediate constant offset to be
12178 added to THIS. If VCALL_OFFSET is nonzero, the word at address
12179 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
12181 static void
12182 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
12183 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12184 tree function)
12186 rtx this_rtx, funexp;
12187 rtx_insn *insn;
12188 unsigned int int_arg_first;
12190 reload_completed = 1;
12191 epilogue_completed = 1;
12193 emit_note (NOTE_INSN_PROLOGUE_END);
12195 if (TARGET_FLAT)
12197 sparc_leaf_function_p = 1;
12199 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
12201 else if (flag_delayed_branch)
12203 /* We will emit a regular sibcall below, so we need to instruct
12204 output_sibcall that we are in a leaf function. */
12205 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 1;
12207 /* This will cause final.c to invoke leaf_renumber_regs so we
12208 must behave as if we were in a not-yet-leafified function. */
12209 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
12211 else
12213 /* We will emit the sibcall manually below, so we will need to
12214 manually spill non-leaf registers. */
12215 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 0;
12217 /* We really are in a leaf function. */
12218 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
12221 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
12222 returns a structure, the structure return pointer is there instead. */
12223 if (TARGET_ARCH64
12224 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12225 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
12226 else
12227 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
12229 /* Add DELTA. When possible use a plain add, otherwise load it into
12230 a register first. */
12231 if (delta)
12233 rtx delta_rtx = GEN_INT (delta);
12235 if (! SPARC_SIMM13_P (delta))
12237 rtx scratch = gen_rtx_REG (Pmode, 1);
12238 emit_move_insn (scratch, delta_rtx);
12239 delta_rtx = scratch;
12242 /* THIS_RTX += DELTA. */
12243 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
12246 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
12247 if (vcall_offset)
12249 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
12250 rtx scratch = gen_rtx_REG (Pmode, 1);
12252 gcc_assert (vcall_offset < 0);
12254 /* SCRATCH = *THIS_RTX. */
12255 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
12257 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
12258 may not have any available scratch register at this point. */
12259 if (SPARC_SIMM13_P (vcall_offset))
12261 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
12262 else if (! fixed_regs[5]
12263 /* The below sequence is made up of at least 2 insns,
12264 while the default method may need only one. */
12265 && vcall_offset < -8192)
12267 rtx scratch2 = gen_rtx_REG (Pmode, 5);
12268 emit_move_insn (scratch2, vcall_offset_rtx);
12269 vcall_offset_rtx = scratch2;
12271 else
12273 rtx increment = GEN_INT (-4096);
12275 /* VCALL_OFFSET is a negative number whose typical range can be
12276 estimated as -32768..0 in 32-bit mode. In almost all cases
12277 it is therefore cheaper to emit multiple add insns than
12278 spilling and loading the constant into a register (at least
12279 6 insns). */
12280 while (! SPARC_SIMM13_P (vcall_offset))
12282 emit_insn (gen_add2_insn (scratch, increment));
12283 vcall_offset += 4096;
12285 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
12288 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
12289 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
12290 gen_rtx_PLUS (Pmode,
12291 scratch,
12292 vcall_offset_rtx)));
12294 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
12295 emit_insn (gen_add2_insn (this_rtx, scratch));
12298 /* Generate a tail call to the target function. */
12299 if (! TREE_USED (function))
12301 assemble_external (function);
12302 TREE_USED (function) = 1;
12304 funexp = XEXP (DECL_RTL (function), 0);
12306 if (flag_delayed_branch)
12308 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
12309 insn = emit_call_insn (gen_sibcall (funexp));
12310 SIBLING_CALL_P (insn) = 1;
12312 else
12314 /* The hoops we have to jump through in order to generate a sibcall
12315 without using delay slots... */
12316 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
12318 if (flag_pic)
12320 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
12321 start_sequence ();
12322 load_got_register (); /* clobbers %o7 */
12323 if (!TARGET_VXWORKS_RTP)
12324 pic_offset_table_rtx = global_offset_table_rtx;
12325 scratch = sparc_legitimize_pic_address (funexp, scratch);
12326 seq = get_insns ();
12327 end_sequence ();
12328 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
12330 else if (TARGET_ARCH32)
12332 emit_insn (gen_rtx_SET (scratch,
12333 gen_rtx_HIGH (SImode, funexp)));
12334 emit_insn (gen_rtx_SET (scratch,
12335 gen_rtx_LO_SUM (SImode, scratch, funexp)));
12337 else /* TARGET_ARCH64 */
12339 switch (sparc_cmodel)
12341 case CM_MEDLOW:
12342 case CM_MEDMID:
12343 /* The destination can serve as a temporary. */
12344 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
12345 break;
12347 case CM_MEDANY:
12348 case CM_EMBMEDANY:
12349 /* The destination cannot serve as a temporary. */
12350 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
12351 start_sequence ();
12352 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
12353 seq = get_insns ();
12354 end_sequence ();
12355 emit_and_preserve (seq, spill_reg, 0);
12356 break;
12358 default:
12359 gcc_unreachable ();
12363 emit_jump_insn (gen_indirect_jump (scratch));
12366 emit_barrier ();
12368 /* Run just enough of rest_of_compilation to get the insns emitted.
12369 There's not really enough bulk here to make other passes such as
12370 instruction scheduling worth while. Note that use_thunk calls
12371 assemble_start_function and assemble_end_function. */
12372 insn = get_insns ();
12373 shorten_branches (insn);
12374 final_start_function (insn, file, 1);
12375 final (insn, file, 1);
12376 final_end_function ();
12378 reload_completed = 0;
12379 epilogue_completed = 0;
12382 /* Return true if sparc_output_mi_thunk would be able to output the
12383 assembler code for the thunk function specified by the arguments
12384 it is passed, and false otherwise. */
12385 static bool
12386 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
12387 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
12388 HOST_WIDE_INT vcall_offset,
12389 const_tree function ATTRIBUTE_UNUSED)
12391 /* Bound the loop used in the default method above. */
12392 return (vcall_offset >= -32768 || ! fixed_regs[5]);
12395 /* How to allocate a 'struct machine_function'. */
12397 static struct machine_function *
12398 sparc_init_machine_status (void)
12400 return ggc_cleared_alloc<machine_function> ();
12403 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
12404 We need to emit DTP-relative relocations. */
12406 static void
12407 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
12409 switch (size)
12411 case 4:
12412 fputs ("\t.word\t%r_tls_dtpoff32(", file);
12413 break;
12414 case 8:
12415 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
12416 break;
12417 default:
12418 gcc_unreachable ();
12420 output_addr_const (file, x);
12421 fputs (")", file);
12424 /* Do whatever processing is required at the end of a file. */
12426 static void
12427 sparc_file_end (void)
12429 /* If we need to emit the special GOT helper function, do so now. */
12430 if (got_helper_rtx)
12432 const char *name = XSTR (got_helper_rtx, 0);
12433 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
12434 #ifdef DWARF2_UNWIND_INFO
12435 bool do_cfi;
12436 #endif
12438 if (USE_HIDDEN_LINKONCE)
12440 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
12441 get_identifier (name),
12442 build_function_type_list (void_type_node,
12443 NULL_TREE));
12444 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
12445 NULL_TREE, void_type_node);
12446 TREE_PUBLIC (decl) = 1;
12447 TREE_STATIC (decl) = 1;
12448 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
12449 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
12450 DECL_VISIBILITY_SPECIFIED (decl) = 1;
12451 resolve_unique_section (decl, 0, flag_function_sections);
12452 allocate_struct_function (decl, true);
12453 cfun->is_thunk = 1;
12454 current_function_decl = decl;
12455 init_varasm_status ();
12456 assemble_start_function (decl, name);
12458 else
12460 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
12461 switch_to_section (text_section);
12462 if (align > 0)
12463 ASM_OUTPUT_ALIGN (asm_out_file, align);
12464 ASM_OUTPUT_LABEL (asm_out_file, name);
12467 #ifdef DWARF2_UNWIND_INFO
12468 do_cfi = dwarf2out_do_cfi_asm ();
12469 if (do_cfi)
12470 fprintf (asm_out_file, "\t.cfi_startproc\n");
12471 #endif
12472 if (flag_delayed_branch)
12473 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
12474 reg_name, reg_name);
12475 else
12476 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
12477 reg_name, reg_name);
12478 #ifdef DWARF2_UNWIND_INFO
12479 if (do_cfi)
12480 fprintf (asm_out_file, "\t.cfi_endproc\n");
12481 #endif
12484 if (NEED_INDICATE_EXEC_STACK)
12485 file_end_indicate_exec_stack ();
12487 #ifdef TARGET_SOLARIS
12488 solaris_file_end ();
12489 #endif
12492 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
12493 /* Implement TARGET_MANGLE_TYPE. */
12495 static const char *
12496 sparc_mangle_type (const_tree type)
12498 if (TARGET_ARCH32
12499 && TYPE_MAIN_VARIANT (type) == long_double_type_node
12500 && TARGET_LONG_DOUBLE_128)
12501 return "g";
12503 /* For all other types, use normal C++ mangling. */
12504 return NULL;
12506 #endif
12508 /* Expand a membar instruction for various use cases. Both the LOAD_STORE
12509 and BEFORE_AFTER arguments of the form X_Y. They are two-bit masks where
12510 bit 0 indicates that X is true, and bit 1 indicates Y is true. */
12512 void
12513 sparc_emit_membar_for_model (enum memmodel model,
12514 int load_store, int before_after)
12516 /* Bits for the MEMBAR mmask field. */
12517 const int LoadLoad = 1;
12518 const int StoreLoad = 2;
12519 const int LoadStore = 4;
12520 const int StoreStore = 8;
12522 int mm = 0, implied = 0;
12524 switch (sparc_memory_model)
12526 case SMM_SC:
12527 /* Sequential Consistency. All memory transactions are immediately
12528 visible in sequential execution order. No barriers needed. */
12529 implied = LoadLoad | StoreLoad | LoadStore | StoreStore;
12530 break;
12532 case SMM_TSO:
12533 /* Total Store Ordering: all memory transactions with store semantics
12534 are followed by an implied StoreStore. */
12535 implied |= StoreStore;
12537 /* If we're not looking for a raw barrer (before+after), then atomic
12538 operations get the benefit of being both load and store. */
12539 if (load_store == 3 && before_after == 1)
12540 implied |= StoreLoad;
12541 /* FALLTHRU */
12543 case SMM_PSO:
12544 /* Partial Store Ordering: all memory transactions with load semantics
12545 are followed by an implied LoadLoad | LoadStore. */
12546 implied |= LoadLoad | LoadStore;
12548 /* If we're not looking for a raw barrer (before+after), then atomic
12549 operations get the benefit of being both load and store. */
12550 if (load_store == 3 && before_after == 2)
12551 implied |= StoreLoad | StoreStore;
12552 /* FALLTHRU */
12554 case SMM_RMO:
12555 /* Relaxed Memory Ordering: no implicit bits. */
12556 break;
12558 default:
12559 gcc_unreachable ();
12562 if (before_after & 1)
12564 if (is_mm_release (model) || is_mm_acq_rel (model)
12565 || is_mm_seq_cst (model))
12567 if (load_store & 1)
12568 mm |= LoadLoad | StoreLoad;
12569 if (load_store & 2)
12570 mm |= LoadStore | StoreStore;
12573 if (before_after & 2)
12575 if (is_mm_acquire (model) || is_mm_acq_rel (model)
12576 || is_mm_seq_cst (model))
12578 if (load_store & 1)
12579 mm |= LoadLoad | LoadStore;
12580 if (load_store & 2)
12581 mm |= StoreLoad | StoreStore;
12585 /* Remove the bits implied by the system memory model. */
12586 mm &= ~implied;
12588 /* For raw barriers (before+after), always emit a barrier.
12589 This will become a compile-time barrier if needed. */
12590 if (mm || before_after == 3)
12591 emit_insn (gen_membar (GEN_INT (mm)));
12594 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
12595 compare and swap on the word containing the byte or half-word. */
12597 static void
12598 sparc_expand_compare_and_swap_12 (rtx bool_result, rtx result, rtx mem,
12599 rtx oldval, rtx newval)
12601 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
12602 rtx addr = gen_reg_rtx (Pmode);
12603 rtx off = gen_reg_rtx (SImode);
12604 rtx oldv = gen_reg_rtx (SImode);
12605 rtx newv = gen_reg_rtx (SImode);
12606 rtx oldvalue = gen_reg_rtx (SImode);
12607 rtx newvalue = gen_reg_rtx (SImode);
12608 rtx res = gen_reg_rtx (SImode);
12609 rtx resv = gen_reg_rtx (SImode);
12610 rtx memsi, val, mask, cc;
12612 emit_insn (gen_rtx_SET (addr, gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
12614 if (Pmode != SImode)
12615 addr1 = gen_lowpart (SImode, addr1);
12616 emit_insn (gen_rtx_SET (off, gen_rtx_AND (SImode, addr1, GEN_INT (3))));
12618 memsi = gen_rtx_MEM (SImode, addr);
12619 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
12620 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
12622 val = copy_to_reg (memsi);
12624 emit_insn (gen_rtx_SET (off,
12625 gen_rtx_XOR (SImode, off,
12626 GEN_INT (GET_MODE (mem) == QImode
12627 ? 3 : 2))));
12629 emit_insn (gen_rtx_SET (off, gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
12631 if (GET_MODE (mem) == QImode)
12632 mask = force_reg (SImode, GEN_INT (0xff));
12633 else
12634 mask = force_reg (SImode, GEN_INT (0xffff));
12636 emit_insn (gen_rtx_SET (mask, gen_rtx_ASHIFT (SImode, mask, off)));
12638 emit_insn (gen_rtx_SET (val,
12639 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12640 val)));
12642 oldval = gen_lowpart (SImode, oldval);
12643 emit_insn (gen_rtx_SET (oldv, gen_rtx_ASHIFT (SImode, oldval, off)));
12645 newval = gen_lowpart_common (SImode, newval);
12646 emit_insn (gen_rtx_SET (newv, gen_rtx_ASHIFT (SImode, newval, off)));
12648 emit_insn (gen_rtx_SET (oldv, gen_rtx_AND (SImode, oldv, mask)));
12650 emit_insn (gen_rtx_SET (newv, gen_rtx_AND (SImode, newv, mask)));
12652 rtx_code_label *end_label = gen_label_rtx ();
12653 rtx_code_label *loop_label = gen_label_rtx ();
12654 emit_label (loop_label);
12656 emit_insn (gen_rtx_SET (oldvalue, gen_rtx_IOR (SImode, oldv, val)));
12658 emit_insn (gen_rtx_SET (newvalue, gen_rtx_IOR (SImode, newv, val)));
12660 emit_move_insn (bool_result, const1_rtx);
12662 emit_insn (gen_atomic_compare_and_swapsi_1 (res, memsi, oldvalue, newvalue));
12664 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
12666 emit_insn (gen_rtx_SET (resv,
12667 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12668 res)));
12670 emit_move_insn (bool_result, const0_rtx);
12672 cc = gen_compare_reg_1 (NE, resv, val);
12673 emit_insn (gen_rtx_SET (val, resv));
12675 /* Use cbranchcc4 to separate the compare and branch! */
12676 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
12677 cc, const0_rtx, loop_label));
12679 emit_label (end_label);
12681 emit_insn (gen_rtx_SET (res, gen_rtx_AND (SImode, res, mask)));
12683 emit_insn (gen_rtx_SET (res, gen_rtx_LSHIFTRT (SImode, res, off)));
12685 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
12688 /* Expand code to perform a compare-and-swap. */
12690 void
12691 sparc_expand_compare_and_swap (rtx operands[])
12693 rtx bval, retval, mem, oldval, newval;
12694 machine_mode mode;
12695 enum memmodel model;
12697 bval = operands[0];
12698 retval = operands[1];
12699 mem = operands[2];
12700 oldval = operands[3];
12701 newval = operands[4];
12702 model = (enum memmodel) INTVAL (operands[6]);
12703 mode = GET_MODE (mem);
12705 sparc_emit_membar_for_model (model, 3, 1);
12707 if (reg_overlap_mentioned_p (retval, oldval))
12708 oldval = copy_to_reg (oldval);
12710 if (mode == QImode || mode == HImode)
12711 sparc_expand_compare_and_swap_12 (bval, retval, mem, oldval, newval);
12712 else
12714 rtx (*gen) (rtx, rtx, rtx, rtx);
12715 rtx x;
12717 if (mode == SImode)
12718 gen = gen_atomic_compare_and_swapsi_1;
12719 else
12720 gen = gen_atomic_compare_and_swapdi_1;
12721 emit_insn (gen (retval, mem, oldval, newval));
12723 x = emit_store_flag (bval, EQ, retval, oldval, mode, 1, 1);
12724 if (x != bval)
12725 convert_move (bval, x, 1);
12728 sparc_emit_membar_for_model (model, 3, 2);
12731 void
12732 sparc_expand_vec_perm_bmask (machine_mode vmode, rtx sel)
12734 rtx t_1, t_2, t_3;
12736 sel = gen_lowpart (DImode, sel);
12737 switch (vmode)
12739 case E_V2SImode:
12740 /* inp = xxxxxxxAxxxxxxxB */
12741 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12742 NULL_RTX, 1, OPTAB_DIRECT);
12743 /* t_1 = ....xxxxxxxAxxx. */
12744 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12745 GEN_INT (3), NULL_RTX, 1, OPTAB_DIRECT);
12746 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12747 GEN_INT (0x30000), NULL_RTX, 1, OPTAB_DIRECT);
12748 /* sel = .......B */
12749 /* t_1 = ...A.... */
12750 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12751 /* sel = ...A...B */
12752 sel = expand_mult (SImode, sel, GEN_INT (0x4444), sel, 1);
12753 /* sel = AAAABBBB * 4 */
12754 t_1 = force_reg (SImode, GEN_INT (0x01230123));
12755 /* sel = { A*4, A*4+1, A*4+2, ... } */
12756 break;
12758 case E_V4HImode:
12759 /* inp = xxxAxxxBxxxCxxxD */
12760 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
12761 NULL_RTX, 1, OPTAB_DIRECT);
12762 t_2 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12763 NULL_RTX, 1, OPTAB_DIRECT);
12764 t_3 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (24),
12765 NULL_RTX, 1, OPTAB_DIRECT);
12766 /* t_1 = ..xxxAxxxBxxxCxx */
12767 /* t_2 = ....xxxAxxxBxxxC */
12768 /* t_3 = ......xxxAxxxBxx */
12769 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12770 GEN_INT (0x07),
12771 NULL_RTX, 1, OPTAB_DIRECT);
12772 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12773 GEN_INT (0x0700),
12774 NULL_RTX, 1, OPTAB_DIRECT);
12775 t_2 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_2),
12776 GEN_INT (0x070000),
12777 NULL_RTX, 1, OPTAB_DIRECT);
12778 t_3 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_3),
12779 GEN_INT (0x07000000),
12780 NULL_RTX, 1, OPTAB_DIRECT);
12781 /* sel = .......D */
12782 /* t_1 = .....C.. */
12783 /* t_2 = ...B.... */
12784 /* t_3 = .A...... */
12785 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12786 t_2 = expand_simple_binop (SImode, IOR, t_2, t_3, t_2, 1, OPTAB_DIRECT);
12787 sel = expand_simple_binop (SImode, IOR, sel, t_2, sel, 1, OPTAB_DIRECT);
12788 /* sel = .A.B.C.D */
12789 sel = expand_mult (SImode, sel, GEN_INT (0x22), sel, 1);
12790 /* sel = AABBCCDD * 2 */
12791 t_1 = force_reg (SImode, GEN_INT (0x01010101));
12792 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
12793 break;
12795 case E_V8QImode:
12796 /* input = xAxBxCxDxExFxGxH */
12797 sel = expand_simple_binop (DImode, AND, sel,
12798 GEN_INT ((HOST_WIDE_INT)0x0f0f0f0f << 32
12799 | 0x0f0f0f0f),
12800 NULL_RTX, 1, OPTAB_DIRECT);
12801 /* sel = .A.B.C.D.E.F.G.H */
12802 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (4),
12803 NULL_RTX, 1, OPTAB_DIRECT);
12804 /* t_1 = ..A.B.C.D.E.F.G. */
12805 sel = expand_simple_binop (DImode, IOR, sel, t_1,
12806 NULL_RTX, 1, OPTAB_DIRECT);
12807 /* sel = .AABBCCDDEEFFGGH */
12808 sel = expand_simple_binop (DImode, AND, sel,
12809 GEN_INT ((HOST_WIDE_INT)0xff00ff << 32
12810 | 0xff00ff),
12811 NULL_RTX, 1, OPTAB_DIRECT);
12812 /* sel = ..AB..CD..EF..GH */
12813 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
12814 NULL_RTX, 1, OPTAB_DIRECT);
12815 /* t_1 = ....AB..CD..EF.. */
12816 sel = expand_simple_binop (DImode, IOR, sel, t_1,
12817 NULL_RTX, 1, OPTAB_DIRECT);
12818 /* sel = ..ABABCDCDEFEFGH */
12819 sel = expand_simple_binop (DImode, AND, sel,
12820 GEN_INT ((HOST_WIDE_INT)0xffff << 32 | 0xffff),
12821 NULL_RTX, 1, OPTAB_DIRECT);
12822 /* sel = ....ABCD....EFGH */
12823 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12824 NULL_RTX, 1, OPTAB_DIRECT);
12825 /* t_1 = ........ABCD.... */
12826 sel = gen_lowpart (SImode, sel);
12827 t_1 = gen_lowpart (SImode, t_1);
12828 break;
12830 default:
12831 gcc_unreachable ();
12834 /* Always perform the final addition/merge within the bmask insn. */
12835 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, t_1));
12838 /* Implement TARGET_VEC_PERM_CONST. */
12840 static bool
12841 sparc_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
12842 rtx op1, const vec_perm_indices &sel)
12844 if (!TARGET_VIS2)
12845 return false;
12847 /* All permutes are supported. */
12848 if (!target)
12849 return true;
12851 /* Force target-independent code to convert constant permutations on other
12852 modes down to V8QI. Rely on this to avoid the complexity of the byte
12853 order of the permutation. */
12854 if (vmode != V8QImode)
12855 return false;
12857 unsigned int i, mask;
12858 for (i = mask = 0; i < 8; ++i)
12859 mask |= (sel[i] & 0xf) << (28 - i*4);
12860 rtx mask_rtx = force_reg (SImode, gen_int_mode (mask, SImode));
12862 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), mask_rtx, const0_rtx));
12863 emit_insn (gen_bshufflev8qi_vis (target, op0, op1));
12864 return true;
12867 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
12869 static bool
12870 sparc_frame_pointer_required (void)
12872 /* If the stack pointer is dynamically modified in the function, it cannot
12873 serve as the frame pointer. */
12874 if (cfun->calls_alloca)
12875 return true;
12877 /* If the function receives nonlocal gotos, it needs to save the frame
12878 pointer in the nonlocal_goto_save_area object. */
12879 if (cfun->has_nonlocal_label)
12880 return true;
12882 /* In flat mode, that's it. */
12883 if (TARGET_FLAT)
12884 return false;
12886 /* Otherwise, the frame pointer is required if the function isn't leaf, but
12887 we cannot use sparc_leaf_function_p since it hasn't been computed yet. */
12888 return !(optimize > 0 && crtl->is_leaf && only_leaf_regs_used ());
12891 /* The way this is structured, we can't eliminate SFP in favor of SP
12892 if the frame pointer is required: we want to use the SFP->HFP elimination
12893 in that case. But the test in update_eliminables doesn't know we are
12894 assuming below that we only do the former elimination. */
12896 static bool
12897 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
12899 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
12902 /* Return the hard frame pointer directly to bypass the stack bias. */
12904 static rtx
12905 sparc_builtin_setjmp_frame_value (void)
12907 return hard_frame_pointer_rtx;
12910 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
12911 they won't be allocated. */
12913 static void
12914 sparc_conditional_register_usage (void)
12916 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
12918 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12919 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12921 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
12922 /* then honor it. */
12923 if (TARGET_ARCH32 && fixed_regs[5])
12924 fixed_regs[5] = 1;
12925 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
12926 fixed_regs[5] = 0;
12927 if (! TARGET_V9)
12929 int regno;
12930 for (regno = SPARC_FIRST_V9_FP_REG;
12931 regno <= SPARC_LAST_V9_FP_REG;
12932 regno++)
12933 fixed_regs[regno] = 1;
12934 /* %fcc0 is used by v8 and v9. */
12935 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
12936 regno <= SPARC_LAST_V9_FCC_REG;
12937 regno++)
12938 fixed_regs[regno] = 1;
12940 if (! TARGET_FPU)
12942 int regno;
12943 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
12944 fixed_regs[regno] = 1;
12946 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
12947 /* then honor it. Likewise with g3 and g4. */
12948 if (fixed_regs[2] == 2)
12949 fixed_regs[2] = ! TARGET_APP_REGS;
12950 if (fixed_regs[3] == 2)
12951 fixed_regs[3] = ! TARGET_APP_REGS;
12952 if (TARGET_ARCH32 && fixed_regs[4] == 2)
12953 fixed_regs[4] = ! TARGET_APP_REGS;
12954 else if (TARGET_CM_EMBMEDANY)
12955 fixed_regs[4] = 1;
12956 else if (fixed_regs[4] == 2)
12957 fixed_regs[4] = 0;
12958 if (TARGET_FLAT)
12960 int regno;
12961 /* Disable leaf functions. */
12962 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
12963 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
12964 leaf_reg_remap [regno] = regno;
12966 if (TARGET_VIS)
12967 global_regs[SPARC_GSR_REG] = 1;
12970 /* Implement TARGET_USE_PSEUDO_PIC_REG. */
12972 static bool
12973 sparc_use_pseudo_pic_reg (void)
12975 return !TARGET_VXWORKS_RTP && flag_pic;
12978 /* Implement TARGET_INIT_PIC_REG. */
12980 static void
12981 sparc_init_pic_reg (void)
12983 edge entry_edge;
12984 rtx_insn *seq;
12986 if (!crtl->uses_pic_offset_table)
12987 return;
12989 start_sequence ();
12990 load_got_register ();
12991 if (!TARGET_VXWORKS_RTP)
12992 emit_move_insn (pic_offset_table_rtx, global_offset_table_rtx);
12993 seq = get_insns ();
12994 end_sequence ();
12996 entry_edge = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
12997 insert_insn_on_edge (seq, entry_edge);
12998 commit_one_edge_insertion (entry_edge);
13001 /* Implement TARGET_PREFERRED_RELOAD_CLASS:
13003 - We can't load constants into FP registers.
13004 - We can't load FP constants into integer registers when soft-float,
13005 because there is no soft-float pattern with a r/F constraint.
13006 - We can't load FP constants into integer registers for TFmode unless
13007 it is 0.0L, because there is no movtf pattern with a r/F constraint.
13008 - Try and reload integer constants (symbolic or otherwise) back into
13009 registers directly, rather than having them dumped to memory. */
13011 static reg_class_t
13012 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
13014 machine_mode mode = GET_MODE (x);
13015 if (CONSTANT_P (x))
13017 if (FP_REG_CLASS_P (rclass)
13018 || rclass == GENERAL_OR_FP_REGS
13019 || rclass == GENERAL_OR_EXTRA_FP_REGS
13020 || (GET_MODE_CLASS (mode) == MODE_FLOAT && ! TARGET_FPU)
13021 || (mode == TFmode && ! const_zero_operand (x, mode)))
13022 return NO_REGS;
13024 if (GET_MODE_CLASS (mode) == MODE_INT)
13025 return GENERAL_REGS;
13027 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13029 if (! FP_REG_CLASS_P (rclass)
13030 || !(const_zero_operand (x, mode)
13031 || const_all_ones_operand (x, mode)))
13032 return NO_REGS;
13036 if (TARGET_VIS3
13037 && ! TARGET_ARCH64
13038 && (rclass == EXTRA_FP_REGS
13039 || rclass == GENERAL_OR_EXTRA_FP_REGS))
13041 int regno = true_regnum (x);
13043 if (SPARC_INT_REG_P (regno))
13044 return (rclass == EXTRA_FP_REGS
13045 ? FP_REGS : GENERAL_OR_FP_REGS);
13048 return rclass;
13051 /* Return true if we use LRA instead of reload pass. */
13053 static bool
13054 sparc_lra_p (void)
13056 return TARGET_LRA;
13059 /* Output a wide multiply instruction in V8+ mode. INSN is the instruction,
13060 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
13062 const char *
13063 output_v8plus_mult (rtx_insn *insn, rtx *operands, const char *opcode)
13065 char mulstr[32];
13067 gcc_assert (! TARGET_ARCH64);
13069 if (sparc_check_64 (operands[1], insn) <= 0)
13070 output_asm_insn ("srl\t%L1, 0, %L1", operands);
13071 if (which_alternative == 1)
13072 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
13073 if (GET_CODE (operands[2]) == CONST_INT)
13075 if (which_alternative == 1)
13077 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13078 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", opcode);
13079 output_asm_insn (mulstr, operands);
13080 return "srlx\t%L0, 32, %H0";
13082 else
13084 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13085 output_asm_insn ("or\t%L1, %3, %3", operands);
13086 sprintf (mulstr, "%s\t%%3, %%2, %%3", opcode);
13087 output_asm_insn (mulstr, operands);
13088 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13089 return "mov\t%3, %L0";
13092 else if (rtx_equal_p (operands[1], operands[2]))
13094 if (which_alternative == 1)
13096 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13097 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", opcode);
13098 output_asm_insn (mulstr, operands);
13099 return "srlx\t%L0, 32, %H0";
13101 else
13103 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13104 output_asm_insn ("or\t%L1, %3, %3", operands);
13105 sprintf (mulstr, "%s\t%%3, %%3, %%3", opcode);
13106 output_asm_insn (mulstr, operands);
13107 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13108 return "mov\t%3, %L0";
13111 if (sparc_check_64 (operands[2], insn) <= 0)
13112 output_asm_insn ("srl\t%L2, 0, %L2", operands);
13113 if (which_alternative == 1)
13115 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13116 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
13117 output_asm_insn ("or\t%L2, %L1, %L1", operands);
13118 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", opcode);
13119 output_asm_insn (mulstr, operands);
13120 return "srlx\t%L0, 32, %H0";
13122 else
13124 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13125 output_asm_insn ("sllx\t%H2, 32, %4", operands);
13126 output_asm_insn ("or\t%L1, %3, %3", operands);
13127 output_asm_insn ("or\t%L2, %4, %4", operands);
13128 sprintf (mulstr, "%s\t%%3, %%4, %%3", opcode);
13129 output_asm_insn (mulstr, operands);
13130 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13131 return "mov\t%3, %L0";
13135 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13136 all fields of TARGET to ELT by means of VIS2 BSHUFFLE insn. MODE
13137 and INNER_MODE are the modes describing TARGET. */
13139 static void
13140 vector_init_bshuffle (rtx target, rtx elt, machine_mode mode,
13141 machine_mode inner_mode)
13143 rtx t1, final_insn, sel;
13144 int bmask;
13146 t1 = gen_reg_rtx (mode);
13148 elt = convert_modes (SImode, inner_mode, elt, true);
13149 emit_move_insn (gen_lowpart(SImode, t1), elt);
13151 switch (mode)
13153 case E_V2SImode:
13154 final_insn = gen_bshufflev2si_vis (target, t1, t1);
13155 bmask = 0x45674567;
13156 break;
13157 case E_V4HImode:
13158 final_insn = gen_bshufflev4hi_vis (target, t1, t1);
13159 bmask = 0x67676767;
13160 break;
13161 case E_V8QImode:
13162 final_insn = gen_bshufflev8qi_vis (target, t1, t1);
13163 bmask = 0x77777777;
13164 break;
13165 default:
13166 gcc_unreachable ();
13169 sel = force_reg (SImode, GEN_INT (bmask));
13170 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, const0_rtx));
13171 emit_insn (final_insn);
13174 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13175 all fields of TARGET to ELT in V8QI by means of VIS FPMERGE insn. */
13177 static void
13178 vector_init_fpmerge (rtx target, rtx elt)
13180 rtx t1, t2, t2_low, t3, t3_low;
13182 t1 = gen_reg_rtx (V4QImode);
13183 elt = convert_modes (SImode, QImode, elt, true);
13184 emit_move_insn (gen_lowpart (SImode, t1), elt);
13186 t2 = gen_reg_rtx (V8QImode);
13187 t2_low = gen_lowpart (V4QImode, t2);
13188 emit_insn (gen_fpmerge_vis (t2, t1, t1));
13190 t3 = gen_reg_rtx (V8QImode);
13191 t3_low = gen_lowpart (V4QImode, t3);
13192 emit_insn (gen_fpmerge_vis (t3, t2_low, t2_low));
13194 emit_insn (gen_fpmerge_vis (target, t3_low, t3_low));
13197 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13198 all fields of TARGET to ELT in V4HI by means of VIS FALIGNDATA insn. */
13200 static void
13201 vector_init_faligndata (rtx target, rtx elt)
13203 rtx t1 = gen_reg_rtx (V4HImode);
13204 int i;
13206 elt = convert_modes (SImode, HImode, elt, true);
13207 emit_move_insn (gen_lowpart (SImode, t1), elt);
13209 emit_insn (gen_alignaddrsi_vis (gen_reg_rtx (SImode),
13210 force_reg (SImode, GEN_INT (6)),
13211 const0_rtx));
13213 for (i = 0; i < 4; i++)
13214 emit_insn (gen_faligndatav4hi_vis (target, t1, target));
13217 /* Emit code to initialize TARGET to values for individual fields VALS. */
13219 void
13220 sparc_expand_vector_init (rtx target, rtx vals)
13222 const machine_mode mode = GET_MODE (target);
13223 const machine_mode inner_mode = GET_MODE_INNER (mode);
13224 const int n_elts = GET_MODE_NUNITS (mode);
13225 int i, n_var = 0;
13226 bool all_same = true;
13227 rtx mem;
13229 for (i = 0; i < n_elts; i++)
13231 rtx x = XVECEXP (vals, 0, i);
13232 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
13233 n_var++;
13235 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
13236 all_same = false;
13239 if (n_var == 0)
13241 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
13242 return;
13245 if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (mode))
13247 if (GET_MODE_SIZE (inner_mode) == 4)
13249 emit_move_insn (gen_lowpart (SImode, target),
13250 gen_lowpart (SImode, XVECEXP (vals, 0, 0)));
13251 return;
13253 else if (GET_MODE_SIZE (inner_mode) == 8)
13255 emit_move_insn (gen_lowpart (DImode, target),
13256 gen_lowpart (DImode, XVECEXP (vals, 0, 0)));
13257 return;
13260 else if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (word_mode)
13261 && GET_MODE_SIZE (mode) == 2 * GET_MODE_SIZE (word_mode))
13263 emit_move_insn (gen_highpart (word_mode, target),
13264 gen_lowpart (word_mode, XVECEXP (vals, 0, 0)));
13265 emit_move_insn (gen_lowpart (word_mode, target),
13266 gen_lowpart (word_mode, XVECEXP (vals, 0, 1)));
13267 return;
13270 if (all_same && GET_MODE_SIZE (mode) == 8)
13272 if (TARGET_VIS2)
13274 vector_init_bshuffle (target, XVECEXP (vals, 0, 0), mode, inner_mode);
13275 return;
13277 if (mode == V8QImode)
13279 vector_init_fpmerge (target, XVECEXP (vals, 0, 0));
13280 return;
13282 if (mode == V4HImode)
13284 vector_init_faligndata (target, XVECEXP (vals, 0, 0));
13285 return;
13289 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
13290 for (i = 0; i < n_elts; i++)
13291 emit_move_insn (adjust_address_nv (mem, inner_mode,
13292 i * GET_MODE_SIZE (inner_mode)),
13293 XVECEXP (vals, 0, i));
13294 emit_move_insn (target, mem);
13297 /* Implement TARGET_SECONDARY_RELOAD. */
13299 static reg_class_t
13300 sparc_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
13301 machine_mode mode, secondary_reload_info *sri)
13303 enum reg_class rclass = (enum reg_class) rclass_i;
13305 sri->icode = CODE_FOR_nothing;
13306 sri->extra_cost = 0;
13308 /* We need a temporary when loading/storing a HImode/QImode value
13309 between memory and the FPU registers. This can happen when combine puts
13310 a paradoxical subreg in a float/fix conversion insn. */
13311 if (FP_REG_CLASS_P (rclass)
13312 && (mode == HImode || mode == QImode)
13313 && (GET_CODE (x) == MEM
13314 || ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
13315 && true_regnum (x) == -1)))
13316 return GENERAL_REGS;
13318 /* On 32-bit we need a temporary when loading/storing a DFmode value
13319 between unaligned memory and the upper FPU registers. */
13320 if (TARGET_ARCH32
13321 && rclass == EXTRA_FP_REGS
13322 && mode == DFmode
13323 && GET_CODE (x) == MEM
13324 && ! mem_min_alignment (x, 8))
13325 return FP_REGS;
13327 if (((TARGET_CM_MEDANY
13328 && symbolic_operand (x, mode))
13329 || (TARGET_CM_EMBMEDANY
13330 && text_segment_operand (x, mode)))
13331 && ! flag_pic)
13333 if (in_p)
13334 sri->icode = direct_optab_handler (reload_in_optab, mode);
13335 else
13336 sri->icode = direct_optab_handler (reload_out_optab, mode);
13337 return NO_REGS;
13340 if (TARGET_VIS3 && TARGET_ARCH32)
13342 int regno = true_regnum (x);
13344 /* When using VIS3 fp<-->int register moves, on 32-bit we have
13345 to move 8-byte values in 4-byte pieces. This only works via
13346 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
13347 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
13348 an FP_REGS intermediate move. */
13349 if ((rclass == EXTRA_FP_REGS && SPARC_INT_REG_P (regno))
13350 || ((general_or_i64_p (rclass)
13351 || rclass == GENERAL_OR_FP_REGS)
13352 && SPARC_FP_REG_P (regno)))
13354 sri->extra_cost = 2;
13355 return FP_REGS;
13359 return NO_REGS;
13362 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
13364 On SPARC when not VIS3 it is not possible to directly move data
13365 between GENERAL_REGS and FP_REGS. */
13367 static bool
13368 sparc_secondary_memory_needed (machine_mode mode, reg_class_t class1,
13369 reg_class_t class2)
13371 return ((FP_REG_CLASS_P (class1) != FP_REG_CLASS_P (class2))
13372 && (! TARGET_VIS3
13373 || GET_MODE_SIZE (mode) > 8
13374 || GET_MODE_SIZE (mode) < 4));
13377 /* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE.
13379 get_secondary_mem widens its argument to BITS_PER_WORD which loses on v9
13380 because the movsi and movsf patterns don't handle r/f moves.
13381 For v8 we copy the default definition. */
13383 static machine_mode
13384 sparc_secondary_memory_needed_mode (machine_mode mode)
13386 if (TARGET_ARCH64)
13388 if (GET_MODE_BITSIZE (mode) < 32)
13389 return mode_for_size (32, GET_MODE_CLASS (mode), 0).require ();
13390 return mode;
13392 else
13394 if (GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
13395 return mode_for_size (BITS_PER_WORD,
13396 GET_MODE_CLASS (mode), 0).require ();
13397 return mode;
13401 /* Emit code to conditionally move either OPERANDS[2] or OPERANDS[3] into
13402 OPERANDS[0] in MODE. OPERANDS[1] is the operator of the condition. */
13404 bool
13405 sparc_expand_conditional_move (machine_mode mode, rtx *operands)
13407 enum rtx_code rc = GET_CODE (operands[1]);
13408 machine_mode cmp_mode;
13409 rtx cc_reg, dst, cmp;
13411 cmp = operands[1];
13412 if (GET_MODE (XEXP (cmp, 0)) == DImode && !TARGET_ARCH64)
13413 return false;
13415 if (GET_MODE (XEXP (cmp, 0)) == TFmode && !TARGET_HARD_QUAD)
13416 cmp = sparc_emit_float_lib_cmp (XEXP (cmp, 0), XEXP (cmp, 1), rc);
13418 cmp_mode = GET_MODE (XEXP (cmp, 0));
13419 rc = GET_CODE (cmp);
13421 dst = operands[0];
13422 if (! rtx_equal_p (operands[2], dst)
13423 && ! rtx_equal_p (operands[3], dst))
13425 if (reg_overlap_mentioned_p (dst, cmp))
13426 dst = gen_reg_rtx (mode);
13428 emit_move_insn (dst, operands[3]);
13430 else if (operands[2] == dst)
13432 operands[2] = operands[3];
13434 if (GET_MODE_CLASS (cmp_mode) == MODE_FLOAT)
13435 rc = reverse_condition_maybe_unordered (rc);
13436 else
13437 rc = reverse_condition (rc);
13440 if (XEXP (cmp, 1) == const0_rtx
13441 && GET_CODE (XEXP (cmp, 0)) == REG
13442 && cmp_mode == DImode
13443 && v9_regcmp_p (rc))
13444 cc_reg = XEXP (cmp, 0);
13445 else
13446 cc_reg = gen_compare_reg_1 (rc, XEXP (cmp, 0), XEXP (cmp, 1));
13448 cmp = gen_rtx_fmt_ee (rc, GET_MODE (cc_reg), cc_reg, const0_rtx);
13450 emit_insn (gen_rtx_SET (dst,
13451 gen_rtx_IF_THEN_ELSE (mode, cmp, operands[2], dst)));
13453 if (dst != operands[0])
13454 emit_move_insn (operands[0], dst);
13456 return true;
13459 /* Emit code to conditionally move a combination of OPERANDS[1] and OPERANDS[2]
13460 into OPERANDS[0] in MODE, depending on the outcome of the comparison of
13461 OPERANDS[4] and OPERANDS[5]. OPERANDS[3] is the operator of the condition.
13462 FCODE is the machine code to be used for OPERANDS[3] and CCODE the machine
13463 code to be used for the condition mask. */
13465 void
13466 sparc_expand_vcond (machine_mode mode, rtx *operands, int ccode, int fcode)
13468 rtx mask, cop0, cop1, fcmp, cmask, bshuf, gsr;
13469 enum rtx_code code = GET_CODE (operands[3]);
13471 mask = gen_reg_rtx (Pmode);
13472 cop0 = operands[4];
13473 cop1 = operands[5];
13474 if (code == LT || code == GE)
13476 rtx t;
13478 code = swap_condition (code);
13479 t = cop0; cop0 = cop1; cop1 = t;
13482 gsr = gen_rtx_REG (DImode, SPARC_GSR_REG);
13484 fcmp = gen_rtx_UNSPEC (Pmode,
13485 gen_rtvec (1, gen_rtx_fmt_ee (code, mode, cop0, cop1)),
13486 fcode);
13488 cmask = gen_rtx_UNSPEC (DImode,
13489 gen_rtvec (2, mask, gsr),
13490 ccode);
13492 bshuf = gen_rtx_UNSPEC (mode,
13493 gen_rtvec (3, operands[1], operands[2], gsr),
13494 UNSPEC_BSHUFFLE);
13496 emit_insn (gen_rtx_SET (mask, fcmp));
13497 emit_insn (gen_rtx_SET (gsr, cmask));
13499 emit_insn (gen_rtx_SET (operands[0], bshuf));
13502 /* On sparc, any mode which naturally allocates into the float
13503 registers should return 4 here. */
13505 unsigned int
13506 sparc_regmode_natural_size (machine_mode mode)
13508 int size = UNITS_PER_WORD;
13510 if (TARGET_ARCH64)
13512 enum mode_class mclass = GET_MODE_CLASS (mode);
13514 if (mclass == MODE_FLOAT || mclass == MODE_VECTOR_INT)
13515 size = 4;
13518 return size;
13521 /* Implement TARGET_HARD_REGNO_NREGS.
13523 On SPARC, ordinary registers hold 32 bits worth; this means both
13524 integer and floating point registers. On v9, integer regs hold 64
13525 bits worth; floating point regs hold 32 bits worth (this includes the
13526 new fp regs as even the odd ones are included in the hard register
13527 count). */
13529 static unsigned int
13530 sparc_hard_regno_nregs (unsigned int regno, machine_mode mode)
13532 if (regno == SPARC_GSR_REG)
13533 return 1;
13534 if (TARGET_ARCH64)
13536 if (SPARC_INT_REG_P (regno) || regno == FRAME_POINTER_REGNUM)
13537 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
13538 return CEIL (GET_MODE_SIZE (mode), 4);
13540 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
13543 /* Implement TARGET_HARD_REGNO_MODE_OK.
13545 ??? Because of the funny way we pass parameters we should allow certain
13546 ??? types of float/complex values to be in integer registers during
13547 ??? RTL generation. This only matters on arch32. */
13549 static bool
13550 sparc_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
13552 return (hard_regno_mode_classes[regno] & sparc_mode_class[mode]) != 0;
13555 /* Implement TARGET_MODES_TIEABLE_P.
13557 For V9 we have to deal with the fact that only the lower 32 floating
13558 point registers are 32-bit addressable. */
13560 static bool
13561 sparc_modes_tieable_p (machine_mode mode1, machine_mode mode2)
13563 enum mode_class mclass1, mclass2;
13564 unsigned short size1, size2;
13566 if (mode1 == mode2)
13567 return true;
13569 mclass1 = GET_MODE_CLASS (mode1);
13570 mclass2 = GET_MODE_CLASS (mode2);
13571 if (mclass1 != mclass2)
13572 return false;
13574 if (! TARGET_V9)
13575 return true;
13577 /* Classes are the same and we are V9 so we have to deal with upper
13578 vs. lower floating point registers. If one of the modes is a
13579 4-byte mode, and the other is not, we have to mark them as not
13580 tieable because only the lower 32 floating point register are
13581 addressable 32-bits at a time.
13583 We can't just test explicitly for SFmode, otherwise we won't
13584 cover the vector mode cases properly. */
13586 if (mclass1 != MODE_FLOAT && mclass1 != MODE_VECTOR_INT)
13587 return true;
13589 size1 = GET_MODE_SIZE (mode1);
13590 size2 = GET_MODE_SIZE (mode2);
13591 if ((size1 > 4 && size2 == 4)
13592 || (size2 > 4 && size1 == 4))
13593 return false;
13595 return true;
13598 /* Implement TARGET_CSTORE_MODE. */
13600 static scalar_int_mode
13601 sparc_cstore_mode (enum insn_code icode ATTRIBUTE_UNUSED)
13603 return (TARGET_ARCH64 ? DImode : SImode);
13606 /* Return the compound expression made of T1 and T2. */
13608 static inline tree
13609 compound_expr (tree t1, tree t2)
13611 return build2 (COMPOUND_EXPR, void_type_node, t1, t2);
13614 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
13616 static void
13617 sparc_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
13619 if (!TARGET_FPU)
13620 return;
13622 const unsigned HOST_WIDE_INT accrued_exception_mask = 0x1f << 5;
13623 const unsigned HOST_WIDE_INT trap_enable_mask = 0x1f << 23;
13625 /* We generate the equivalent of feholdexcept (&fenv_var):
13627 unsigned int fenv_var;
13628 __builtin_store_fsr (&fenv_var);
13630 unsigned int tmp1_var;
13631 tmp1_var = fenv_var & ~(accrued_exception_mask | trap_enable_mask);
13633 __builtin_load_fsr (&tmp1_var); */
13635 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
13636 TREE_ADDRESSABLE (fenv_var) = 1;
13637 tree fenv_addr = build_fold_addr_expr (fenv_var);
13638 tree stfsr = sparc_builtins[SPARC_BUILTIN_STFSR];
13639 tree hold_stfsr
13640 = build4 (TARGET_EXPR, unsigned_type_node, fenv_var,
13641 build_call_expr (stfsr, 1, fenv_addr), NULL_TREE, NULL_TREE);
13643 tree tmp1_var = create_tmp_var_raw (unsigned_type_node);
13644 TREE_ADDRESSABLE (tmp1_var) = 1;
13645 tree masked_fenv_var
13646 = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
13647 build_int_cst (unsigned_type_node,
13648 ~(accrued_exception_mask | trap_enable_mask)));
13649 tree hold_mask
13650 = build4 (TARGET_EXPR, unsigned_type_node, tmp1_var, masked_fenv_var,
13651 NULL_TREE, NULL_TREE);
13653 tree tmp1_addr = build_fold_addr_expr (tmp1_var);
13654 tree ldfsr = sparc_builtins[SPARC_BUILTIN_LDFSR];
13655 tree hold_ldfsr = build_call_expr (ldfsr, 1, tmp1_addr);
13657 *hold = compound_expr (compound_expr (hold_stfsr, hold_mask), hold_ldfsr);
13659 /* We reload the value of tmp1_var to clear the exceptions:
13661 __builtin_load_fsr (&tmp1_var); */
13663 *clear = build_call_expr (ldfsr, 1, tmp1_addr);
13665 /* We generate the equivalent of feupdateenv (&fenv_var):
13667 unsigned int tmp2_var;
13668 __builtin_store_fsr (&tmp2_var);
13670 __builtin_load_fsr (&fenv_var);
13672 if (SPARC_LOW_FE_EXCEPT_VALUES)
13673 tmp2_var >>= 5;
13674 __atomic_feraiseexcept ((int) tmp2_var); */
13676 tree tmp2_var = create_tmp_var_raw (unsigned_type_node);
13677 TREE_ADDRESSABLE (tmp2_var) = 1;
13678 tree tmp2_addr = build_fold_addr_expr (tmp2_var);
13679 tree update_stfsr
13680 = build4 (TARGET_EXPR, unsigned_type_node, tmp2_var,
13681 build_call_expr (stfsr, 1, tmp2_addr), NULL_TREE, NULL_TREE);
13683 tree update_ldfsr = build_call_expr (ldfsr, 1, fenv_addr);
13685 tree atomic_feraiseexcept
13686 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
13687 tree update_call
13688 = build_call_expr (atomic_feraiseexcept, 1,
13689 fold_convert (integer_type_node, tmp2_var));
13691 if (SPARC_LOW_FE_EXCEPT_VALUES)
13693 tree shifted_tmp2_var
13694 = build2 (RSHIFT_EXPR, unsigned_type_node, tmp2_var,
13695 build_int_cst (unsigned_type_node, 5));
13696 tree update_shift
13697 = build2 (MODIFY_EXPR, void_type_node, tmp2_var, shifted_tmp2_var);
13698 update_call = compound_expr (update_shift, update_call);
13701 *update
13702 = compound_expr (compound_expr (update_stfsr, update_ldfsr), update_call);
13705 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. Borrowed from the PA port.
13707 SImode loads to floating-point registers are not zero-extended.
13708 The definition for LOAD_EXTEND_OP specifies that integer loads
13709 narrower than BITS_PER_WORD will be zero-extended. As a result,
13710 we inhibit changes from SImode unless they are to a mode that is
13711 identical in size.
13713 Likewise for SFmode, since word-mode paradoxical subregs are
13714 problematic on big-endian architectures. */
13716 static bool
13717 sparc_can_change_mode_class (machine_mode from, machine_mode to,
13718 reg_class_t rclass)
13720 if (TARGET_ARCH64
13721 && GET_MODE_SIZE (from) == 4
13722 && GET_MODE_SIZE (to) != 4)
13723 return !reg_classes_intersect_p (rclass, FP_REGS);
13724 return true;
13727 /* Implement TARGET_CONSTANT_ALIGNMENT. */
13729 static HOST_WIDE_INT
13730 sparc_constant_alignment (const_tree exp, HOST_WIDE_INT align)
13732 if (TREE_CODE (exp) == STRING_CST)
13733 return MAX (align, FASTEST_ALIGNMENT);
13734 return align;
13737 #include "gt-sparc.h"