Daily bump.
[official-gcc.git] / gcc / config / nds32 / nds32-md-auxiliary.c
blob3040bde9423ae3483b013100ad98f6325ba5da3d
1 /* Auxiliary functions for output asm template or expand rtl
2 pattern of Andes NDS32 cpu for GNU compiler
3 Copyright (C) 2012-2018 Free Software Foundation, Inc.
4 Contributed by Andes Technology Corporation.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* ------------------------------------------------------------------------ */
24 #define IN_TARGET_CODE 1
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "backend.h"
30 #include "target.h"
31 #include "rtl.h"
32 #include "tree.h"
33 #include "memmodel.h"
34 #include "tm_p.h"
35 #include "optabs.h" /* For GEN_FCN. */
36 #include "recog.h"
37 #include "output.h"
38 #include "tm-constrs.h"
39 #include "expr.h"
40 #include "emit-rtl.h"
41 #include "explow.h"
42 #include "stringpool.h"
43 #include "attribs.h"
46 /* ------------------------------------------------------------------------ */
48 static int
49 nds32_regno_to_enable4 (unsigned regno)
51 switch (regno)
53 case 28: /* $r28/fp */
54 return 0x8;
55 case 29: /* $r29/gp */
56 return 0x4;
57 case 30: /* $r30/lp */
58 return 0x2;
59 case 31: /* $r31/sp */
60 return 0x1;
61 default:
62 gcc_unreachable ();
66 /* A helper function to return character based on byte size. */
67 static char
68 nds32_byte_to_size (int byte)
70 switch (byte)
72 case 4:
73 return 'w';
74 case 2:
75 return 'h';
76 case 1:
77 return 'b';
78 default:
79 /* Normally it should not be here. */
80 gcc_unreachable ();
84 static int
85 nds32_inverse_cond_code (int code)
87 switch (code)
89 case NE:
90 return EQ;
91 case EQ:
92 return NE;
93 case GT:
94 return LE;
95 case LE:
96 return GT;
97 case GE:
98 return LT;
99 case LT:
100 return GE;
101 default:
102 gcc_unreachable ();
106 static const char *
107 nds32_cond_code_str (int code)
109 switch (code)
111 case NE:
112 return "ne";
113 case EQ:
114 return "eq";
115 case GT:
116 return "gt";
117 case LE:
118 return "le";
119 case GE:
120 return "ge";
121 case LT:
122 return "lt";
123 default:
124 gcc_unreachable ();
128 static void
129 output_cond_branch (int code, const char *suffix, bool r5_p,
130 bool long_jump_p, rtx *operands)
132 char pattern[256];
133 const char *cond_code;
134 bool align_p = NDS32_ALIGN_P ();
135 const char *align = align_p ? "\t.align\t2\n" : "";
137 if (r5_p && REGNO (operands[2]) == 5 && TARGET_16_BIT)
139 /* This is special case for beqs38 and bnes38,
140 second operand 2 can't be $r5 and it's almost meanless,
141 however it may occur after copy propgation. */
142 if (code == EQ)
144 /* $r5 == $r5 always taken! */
145 if (long_jump_p)
146 snprintf (pattern, sizeof (pattern),
147 "j\t%%3");
148 else
149 snprintf (pattern, sizeof (pattern),
150 "j8\t%%3");
152 else
153 /* Don't output anything since $r5 != $r5 never taken! */
154 pattern[0] = '\0';
156 else if (long_jump_p)
158 int inverse_code = nds32_inverse_cond_code (code);
159 cond_code = nds32_cond_code_str (inverse_code);
161 /* b<cond><suffix> $r0, $r1, .L0
163 b<inverse_cond><suffix> $r0, $r1, .LCB0
164 j .L0
165 .LCB0:
169 b<cond><suffix> $r0, $r1, .L0
171 b<inverse_cond><suffix> $r0, $r1, .LCB0
172 j .L0
173 .LCB0:
175 if (r5_p && TARGET_16_BIT)
177 snprintf (pattern, sizeof (pattern),
178 "b%ss38\t %%2, .LCB%%=\n\tj\t%%3\n%s.LCB%%=:",
179 cond_code, align);
181 else
183 snprintf (pattern, sizeof (pattern),
184 "b%s%s\t%%1, %%2, .LCB%%=\n\tj\t%%3\n%s.LCB%%=:",
185 cond_code, suffix, align);
188 else
190 cond_code = nds32_cond_code_str (code);
191 if (r5_p && TARGET_16_BIT)
193 /* b<cond>s38 $r1, .L0 */
194 snprintf (pattern, sizeof (pattern),
195 "b%ss38\t %%2, %%3", cond_code);
197 else
199 /* b<cond><suffix> $r0, $r1, .L0 */
200 snprintf (pattern, sizeof (pattern),
201 "b%s%s\t%%1, %%2, %%3", cond_code, suffix);
205 output_asm_insn (pattern, operands);
208 static void
209 output_cond_branch_compare_zero (int code, const char *suffix,
210 bool long_jump_p, rtx *operands,
211 bool ta_implied_p)
213 char pattern[256];
214 const char *cond_code;
215 bool align_p = NDS32_ALIGN_P ();
216 const char *align = align_p ? "\t.align\t2\n" : "";
217 if (long_jump_p)
219 int inverse_code = nds32_inverse_cond_code (code);
220 cond_code = nds32_cond_code_str (inverse_code);
222 if (ta_implied_p && TARGET_16_BIT)
224 /* b<cond>z<suffix> .L0
226 b<inverse_cond>z<suffix> .LCB0
227 j .L0
228 .LCB0:
230 snprintf (pattern, sizeof (pattern),
231 "b%sz%s\t.LCB%%=\n\tj\t%%2\n%s.LCB%%=:",
232 cond_code, suffix, align);
234 else
236 /* b<cond>z<suffix> $r0, .L0
238 b<inverse_cond>z<suffix> $r0, .LCB0
239 j .L0
240 .LCB0:
242 snprintf (pattern, sizeof (pattern),
243 "b%sz%s\t%%1, .LCB%%=\n\tj\t%%2\n%s.LCB%%=:",
244 cond_code, suffix, align);
247 else
249 cond_code = nds32_cond_code_str (code);
250 if (ta_implied_p && TARGET_16_BIT)
252 /* b<cond>z<suffix> .L0 */
253 snprintf (pattern, sizeof (pattern),
254 "b%sz%s\t%%2", cond_code, suffix);
256 else
258 /* b<cond>z<suffix> $r0, .L0 */
259 snprintf (pattern, sizeof (pattern),
260 "b%sz%s\t%%1, %%2", cond_code, suffix);
264 output_asm_insn (pattern, operands);
267 static void
268 nds32_split_shiftrtdi3 (rtx dst, rtx src, rtx shiftamount, bool logic_shift_p)
270 rtx src_high_part;
271 rtx dst_high_part, dst_low_part;
273 dst_high_part = nds32_di_high_part_subreg (dst);
274 src_high_part = nds32_di_high_part_subreg (src);
275 dst_low_part = nds32_di_low_part_subreg (dst);
277 if (CONST_INT_P (shiftamount))
279 if (INTVAL (shiftamount) < 32)
281 if (logic_shift_p)
283 emit_insn (gen_uwext (dst_low_part, src,
284 shiftamount));
285 emit_insn (gen_lshrsi3 (dst_high_part, src_high_part,
286 shiftamount));
288 else
290 emit_insn (gen_wext (dst_low_part, src,
291 shiftamount));
292 emit_insn (gen_ashrsi3 (dst_high_part, src_high_part,
293 shiftamount));
296 else
298 rtx new_shift_amout = gen_int_mode(INTVAL (shiftamount) - 32, SImode);
300 if (logic_shift_p)
302 emit_insn (gen_lshrsi3 (dst_low_part, src_high_part,
303 new_shift_amout));
304 emit_move_insn (dst_high_part, const0_rtx);
306 else
308 emit_insn (gen_ashrsi3 (dst_low_part, src_high_part,
309 new_shift_amout));
310 emit_insn (gen_ashrsi3 (dst_high_part, src_high_part,
311 GEN_INT (31)));
315 else
317 rtx dst_low_part_l32, dst_high_part_l32;
318 rtx dst_low_part_g32, dst_high_part_g32;
319 rtx new_shift_amout, select_reg;
320 dst_low_part_l32 = gen_reg_rtx (SImode);
321 dst_high_part_l32 = gen_reg_rtx (SImode);
322 dst_low_part_g32 = gen_reg_rtx (SImode);
323 dst_high_part_g32 = gen_reg_rtx (SImode);
324 new_shift_amout = gen_reg_rtx (SImode);
325 select_reg = gen_reg_rtx (SImode);
327 emit_insn (gen_andsi3 (shiftamount, shiftamount, GEN_INT (0x3f)));
329 if (logic_shift_p)
332 if (shiftamount < 32)
333 dst_low_part = wext (src, shiftamount)
334 dst_high_part = src_high_part >> shiftamount
335 else
336 dst_low_part = src_high_part >> (shiftamount & 0x1f)
337 dst_high_part = 0
339 emit_insn (gen_uwext (dst_low_part_l32, src, shiftamount));
340 emit_insn (gen_lshrsi3 (dst_high_part_l32, src_high_part,
341 shiftamount));
343 emit_insn (gen_andsi3 (new_shift_amout, shiftamount, GEN_INT (0x1f)));
344 emit_insn (gen_lshrsi3 (dst_low_part_g32, src_high_part,
345 new_shift_amout));
346 emit_move_insn (dst_high_part_g32, const0_rtx);
348 else
351 if (shiftamount < 32)
352 dst_low_part = wext (src, shiftamount)
353 dst_high_part = src_high_part >> shiftamount
354 else
355 dst_low_part = src_high_part >> (shiftamount & 0x1f)
356 # shift 31 for sign extend
357 dst_high_part = src_high_part >> 31
359 emit_insn (gen_wext (dst_low_part_l32, src, shiftamount));
360 emit_insn (gen_ashrsi3 (dst_high_part_l32, src_high_part,
361 shiftamount));
363 emit_insn (gen_andsi3 (new_shift_amout, shiftamount, GEN_INT (0x1f)));
364 emit_insn (gen_ashrsi3 (dst_low_part_g32, src_high_part,
365 new_shift_amout));
366 emit_insn (gen_ashrsi3 (dst_high_part_g32, src_high_part,
367 GEN_INT (31)));
370 emit_insn (gen_slt_compare (select_reg, shiftamount, GEN_INT (32)));
372 emit_insn (gen_cmovnsi (dst_low_part, select_reg,
373 dst_low_part_l32, dst_low_part_g32));
374 emit_insn (gen_cmovnsi (dst_high_part, select_reg,
375 dst_high_part_l32, dst_high_part_g32));
379 /* ------------------------------------------------------------------------ */
381 /* Auxiliary function for expand RTL pattern. */
383 enum nds32_expand_result_type
384 nds32_expand_cbranch (rtx *operands)
386 rtx tmp_reg;
387 enum rtx_code code;
389 code = GET_CODE (operands[0]);
391 /* If operands[2] is (const_int 0),
392 we can use beqz,bnez,bgtz,bgez,bltz,or blez instructions.
393 So we have gcc generate original template rtx. */
394 if (GET_CODE (operands[2]) == CONST_INT)
395 if (INTVAL (operands[2]) == 0)
396 if ((code != GTU)
397 && (code != GEU)
398 && (code != LTU)
399 && (code != LEU))
400 return EXPAND_CREATE_TEMPLATE;
402 /* For other comparison, NDS32 ISA only has slt (Set-on-Less-Than)
403 behavior for the comparison, we might need to generate other
404 rtx patterns to achieve same semantic. */
405 switch (code)
407 case GT:
408 case GTU:
409 if (GET_CODE (operands[2]) == CONST_INT)
411 /* GT reg_A, const_int => !(LT reg_A, const_int + 1) */
412 if (optimize_size || optimize == 0)
413 tmp_reg = gen_rtx_REG (SImode, TA_REGNUM);
414 else
415 tmp_reg = gen_reg_rtx (SImode);
417 /* We want to plus 1 into the integer value
418 of operands[2] to create 'slt' instruction.
419 This caculation is performed on the host machine,
420 which may be 64-bit integer.
421 So the meaning of caculation result may be
422 different from the 32-bit nds32 target.
424 For example:
425 0x7fffffff + 0x1 -> 0x80000000,
426 this value is POSITIVE on 64-bit machine,
427 but the expected value on 32-bit nds32 target
428 should be NEGATIVE value.
430 Hence, instead of using GEN_INT(), we use gen_int_mode() to
431 explicitly create SImode constant rtx. */
432 enum rtx_code cmp_code;
434 rtx plus1 = gen_int_mode (INTVAL (operands[2]) + 1, SImode);
435 if (satisfies_constraint_Is15 (plus1))
437 operands[2] = plus1;
438 cmp_code = EQ;
439 if (code == GT)
441 /* GT, use slts instruction */
442 emit_insn (
443 gen_slts_compare (tmp_reg, operands[1], operands[2]));
445 else
447 /* GTU, use slt instruction */
448 emit_insn (
449 gen_slt_compare (tmp_reg, operands[1], operands[2]));
452 else
454 cmp_code = NE;
455 if (code == GT)
457 /* GT, use slts instruction */
458 emit_insn (
459 gen_slts_compare (tmp_reg, operands[2], operands[1]));
461 else
463 /* GTU, use slt instruction */
464 emit_insn (
465 gen_slt_compare (tmp_reg, operands[2], operands[1]));
469 PUT_CODE (operands[0], cmp_code);
470 operands[1] = tmp_reg;
471 operands[2] = const0_rtx;
472 emit_insn (gen_cbranchsi4 (operands[0], operands[1],
473 operands[2], operands[3]));
475 return EXPAND_DONE;
477 else
479 /* GT reg_A, reg_B => LT reg_B, reg_A */
480 if (optimize_size || optimize == 0)
481 tmp_reg = gen_rtx_REG (SImode, TA_REGNUM);
482 else
483 tmp_reg = gen_reg_rtx (SImode);
485 if (code == GT)
487 /* GT, use slts instruction */
488 emit_insn (gen_slts_compare (tmp_reg, operands[2], operands[1]));
490 else
492 /* GTU, use slt instruction */
493 emit_insn (gen_slt_compare (tmp_reg, operands[2], operands[1]));
496 PUT_CODE (operands[0], NE);
497 operands[1] = tmp_reg;
498 operands[2] = const0_rtx;
499 emit_insn (gen_cbranchsi4 (operands[0], operands[1],
500 operands[2], operands[3]));
502 return EXPAND_DONE;
505 case GE:
506 case GEU:
507 /* GE reg_A, reg_B => !(LT reg_A, reg_B) */
508 /* GE reg_A, const_int => !(LT reg_A, const_int) */
509 if (optimize_size || optimize == 0)
510 tmp_reg = gen_rtx_REG (SImode, TA_REGNUM);
511 else
512 tmp_reg = gen_reg_rtx (SImode);
514 if (code == GE)
516 /* GE, use slts instruction */
517 emit_insn (gen_slts_compare (tmp_reg, operands[1], operands[2]));
519 else
521 /* GEU, use slt instruction */
522 emit_insn (gen_slt_compare (tmp_reg, operands[1], operands[2]));
525 PUT_CODE (operands[0], EQ);
526 operands[1] = tmp_reg;
527 operands[2] = const0_rtx;
528 emit_insn (gen_cbranchsi4 (operands[0], operands[1],
529 operands[2], operands[3]));
531 return EXPAND_DONE;
533 case LT:
534 case LTU:
535 /* LT reg_A, reg_B => LT reg_A, reg_B */
536 /* LT reg_A, const_int => LT reg_A, const_int */
537 if (optimize_size || optimize == 0)
538 tmp_reg = gen_rtx_REG (SImode, TA_REGNUM);
539 else
540 tmp_reg = gen_reg_rtx (SImode);
542 if (code == LT)
544 /* LT, use slts instruction */
545 emit_insn (gen_slts_compare (tmp_reg, operands[1], operands[2]));
547 else
549 /* LTU, use slt instruction */
550 emit_insn (gen_slt_compare (tmp_reg, operands[1], operands[2]));
553 PUT_CODE (operands[0], NE);
554 operands[1] = tmp_reg;
555 operands[2] = const0_rtx;
556 emit_insn (gen_cbranchsi4 (operands[0], operands[1],
557 operands[2], operands[3]));
559 return EXPAND_DONE;
561 case LE:
562 case LEU:
563 if (GET_CODE (operands[2]) == CONST_INT)
565 /* LE reg_A, const_int => LT reg_A, const_int + 1 */
566 if (optimize_size || optimize == 0)
567 tmp_reg = gen_rtx_REG (SImode, TA_REGNUM);
568 else
569 tmp_reg = gen_reg_rtx (SImode);
571 enum rtx_code cmp_code;
572 /* Note that (le:SI X INT_MAX) is not the same as (lt:SI X INT_MIN).
573 We better have an assert here in case GCC does not properly
574 optimize it away. The INT_MAX here is 0x7fffffff for target. */
575 rtx plus1 = gen_int_mode (INTVAL (operands[2]) + 1, SImode);
576 if (satisfies_constraint_Is15 (plus1))
578 operands[2] = plus1;
579 cmp_code = NE;
580 if (code == LE)
582 /* LE, use slts instruction */
583 emit_insn (
584 gen_slts_compare (tmp_reg, operands[1], operands[2]));
586 else
588 /* LEU, use slt instruction */
589 emit_insn (
590 gen_slt_compare (tmp_reg, operands[1], operands[2]));
593 else
595 cmp_code = EQ;
596 if (code == LE)
598 /* LE, use slts instruction */
599 emit_insn (
600 gen_slts_compare (tmp_reg, operands[2], operands[1]));
602 else
604 /* LEU, use slt instruction */
605 emit_insn (
606 gen_slt_compare (tmp_reg, operands[2], operands[1]));
610 PUT_CODE (operands[0], cmp_code);
611 operands[1] = tmp_reg;
612 operands[2] = const0_rtx;
613 emit_insn (gen_cbranchsi4 (operands[0], operands[1],
614 operands[2], operands[3]));
616 return EXPAND_DONE;
618 else
620 /* LE reg_A, reg_B => !(LT reg_B, reg_A) */
621 if (optimize_size || optimize == 0)
622 tmp_reg = gen_rtx_REG (SImode, TA_REGNUM);
623 else
624 tmp_reg = gen_reg_rtx (SImode);
626 if (code == LE)
628 /* LE, use slts instruction */
629 emit_insn (gen_slts_compare (tmp_reg, operands[2], operands[1]));
631 else
633 /* LEU, use slt instruction */
634 emit_insn (gen_slt_compare (tmp_reg, operands[2], operands[1]));
637 PUT_CODE (operands[0], EQ);
638 operands[1] = tmp_reg;
639 operands[2] = const0_rtx;
640 emit_insn (gen_cbranchsi4 (operands[0], operands[1],
641 operands[2], operands[3]));
643 return EXPAND_DONE;
646 case EQ:
647 case NE:
648 /* NDS32 ISA has various form for eq/ne behavior no matter
649 what kind of the operand is.
650 So just generate original template rtx. */
652 /* Put operands[2] into register if operands[2] is a large
653 const_int or ISAv2. */
654 if (GET_CODE (operands[2]) == CONST_INT
655 && (!satisfies_constraint_Is11 (operands[2])
656 || TARGET_ISA_V2))
657 operands[2] = force_reg (SImode, operands[2]);
659 return EXPAND_CREATE_TEMPLATE;
661 default:
662 return EXPAND_FAIL;
666 enum nds32_expand_result_type
667 nds32_expand_cstore (rtx *operands)
669 rtx tmp_reg;
670 enum rtx_code code;
672 code = GET_CODE (operands[1]);
674 switch (code)
676 case EQ:
677 case NE:
678 if (GET_CODE (operands[3]) == CONST_INT)
680 /* reg_R = (reg_A == const_int_B)
681 --> xori reg_C, reg_A, const_int_B
682 slti reg_R, reg_C, const_int_1
683 reg_R = (reg_A != const_int_B)
684 --> xori reg_C, reg_A, const_int_B
685 slti reg_R, const_int0, reg_C */
686 tmp_reg = gen_reg_rtx (SImode);
688 /* If the integer value is not in the range of imm15s,
689 we need to force register first because our addsi3 pattern
690 only accept nds32_rimm15s_operand predicate. */
691 rtx new_imm = gen_int_mode (-INTVAL (operands[3]), SImode);
692 if (satisfies_constraint_Is15 (new_imm))
693 emit_insn (gen_addsi3 (tmp_reg, operands[2], new_imm));
694 else
696 if (!(satisfies_constraint_Iu15 (operands[3])
697 || (TARGET_EXT_PERF
698 && satisfies_constraint_It15 (operands[3]))))
699 operands[3] = force_reg (SImode, operands[3]);
700 emit_insn (gen_xorsi3 (tmp_reg, operands[2], operands[3]));
703 if (code == EQ)
704 emit_insn (gen_slt_eq0 (operands[0], tmp_reg));
705 else
706 emit_insn (gen_slt_compare (operands[0], const0_rtx, tmp_reg));
708 return EXPAND_DONE;
710 else
712 /* reg_R = (reg_A == reg_B)
713 --> xor reg_C, reg_A, reg_B
714 slti reg_R, reg_C, const_int_1
715 reg_R = (reg_A != reg_B)
716 --> xor reg_C, reg_A, reg_B
717 slti reg_R, const_int0, reg_C */
718 tmp_reg = gen_reg_rtx (SImode);
719 emit_insn (gen_xorsi3 (tmp_reg, operands[2], operands[3]));
720 if (code == EQ)
721 emit_insn (gen_slt_eq0 (operands[0], tmp_reg));
722 else
723 emit_insn (gen_slt_compare (operands[0], const0_rtx, tmp_reg));
725 return EXPAND_DONE;
727 case GT:
728 case GTU:
729 /* reg_R = (reg_A > reg_B) --> slt reg_R, reg_B, reg_A */
730 /* reg_R = (reg_A > const_int_B) --> slt reg_R, const_int_B, reg_A */
731 if (code == GT)
733 /* GT, use slts instruction */
734 emit_insn (gen_slts_compare (operands[0], operands[3], operands[2]));
736 else
738 /* GTU, use slt instruction */
739 emit_insn (gen_slt_compare (operands[0], operands[3], operands[2]));
742 return EXPAND_DONE;
744 case GE:
745 case GEU:
746 if (GET_CODE (operands[3]) == CONST_INT)
748 /* reg_R = (reg_A >= const_int_B)
749 --> movi reg_C, const_int_B - 1
750 slt reg_R, reg_C, reg_A */
751 tmp_reg = gen_reg_rtx (SImode);
753 emit_insn (gen_movsi (tmp_reg,
754 gen_int_mode (INTVAL (operands[3]) - 1,
755 SImode)));
756 if (code == GE)
758 /* GE, use slts instruction */
759 emit_insn (gen_slts_compare (operands[0], tmp_reg, operands[2]));
761 else
763 /* GEU, use slt instruction */
764 emit_insn (gen_slt_compare (operands[0], tmp_reg, operands[2]));
767 return EXPAND_DONE;
769 else
771 /* reg_R = (reg_A >= reg_B)
772 --> slt reg_R, reg_A, reg_B
773 xori reg_R, reg_R, const_int_1 */
774 if (code == GE)
776 /* GE, use slts instruction */
777 emit_insn (gen_slts_compare (operands[0],
778 operands[2], operands[3]));
780 else
782 /* GEU, use slt instruction */
783 emit_insn (gen_slt_compare (operands[0],
784 operands[2], operands[3]));
787 /* perform 'not' behavior */
788 emit_insn (gen_xorsi3 (operands[0], operands[0], const1_rtx));
790 return EXPAND_DONE;
793 case LT:
794 case LTU:
795 /* reg_R = (reg_A < reg_B) --> slt reg_R, reg_A, reg_B */
796 /* reg_R = (reg_A < const_int_B) --> slt reg_R, reg_A, const_int_B */
797 if (code == LT)
799 /* LT, use slts instruction */
800 emit_insn (gen_slts_compare (operands[0], operands[2], operands[3]));
802 else
804 /* LTU, use slt instruction */
805 emit_insn (gen_slt_compare (operands[0], operands[2], operands[3]));
808 return EXPAND_DONE;
810 case LE:
811 case LEU:
812 if (GET_CODE (operands[3]) == CONST_INT)
814 /* reg_R = (reg_A <= const_int_B)
815 --> movi reg_C, const_int_B + 1
816 slt reg_R, reg_A, reg_C */
817 tmp_reg = gen_reg_rtx (SImode);
819 emit_insn (gen_movsi (tmp_reg,
820 gen_int_mode (INTVAL (operands[3]) + 1,
821 SImode)));
822 if (code == LE)
824 /* LE, use slts instruction */
825 emit_insn (gen_slts_compare (operands[0], operands[2], tmp_reg));
827 else
829 /* LEU, use slt instruction */
830 emit_insn (gen_slt_compare (operands[0], operands[2], tmp_reg));
833 return EXPAND_DONE;
835 else
837 /* reg_R = (reg_A <= reg_B) --> slt reg_R, reg_B, reg_A
838 xori reg_R, reg_R, const_int_1 */
839 if (code == LE)
841 /* LE, use slts instruction */
842 emit_insn (gen_slts_compare (operands[0],
843 operands[3], operands[2]));
845 else
847 /* LEU, use slt instruction */
848 emit_insn (gen_slt_compare (operands[0],
849 operands[3], operands[2]));
852 /* perform 'not' behavior */
853 emit_insn (gen_xorsi3 (operands[0], operands[0], const1_rtx));
855 return EXPAND_DONE;
859 default:
860 gcc_unreachable ();
864 void
865 nds32_expand_float_cbranch (rtx *operands)
867 enum rtx_code code = GET_CODE (operands[0]);
868 enum rtx_code new_code = code;
869 rtx cmp_op0 = operands[1];
870 rtx cmp_op1 = operands[2];
871 rtx tmp_reg;
872 rtx tmp;
874 int reverse = 0;
876 /* Main Goal: Use compare instruction + branch instruction.
878 For example:
879 GT, GE: swap condition and swap operands and generate
880 compare instruction(LT, LE) + branch not equal instruction.
882 UNORDERED, LT, LE, EQ: no need to change and generate
883 compare instruction(UNORDERED, LT, LE, EQ) + branch not equal instruction.
885 ORDERED, NE: reverse condition and generate
886 compare instruction(EQ) + branch equal instruction. */
888 switch (code)
890 case GT:
891 case GE:
892 tmp = cmp_op0;
893 cmp_op0 = cmp_op1;
894 cmp_op1 = tmp;
895 new_code = swap_condition (new_code);
896 break;
897 case UNORDERED:
898 case LT:
899 case LE:
900 case EQ:
901 break;
902 case ORDERED:
903 case NE:
904 new_code = reverse_condition (new_code);
905 reverse = 1;
906 break;
907 case UNGT:
908 case UNGE:
909 new_code = reverse_condition_maybe_unordered (new_code);
910 reverse = 1;
911 break;
912 case UNLT:
913 case UNLE:
914 new_code = reverse_condition_maybe_unordered (new_code);
915 tmp = cmp_op0;
916 cmp_op0 = cmp_op1;
917 cmp_op1 = tmp;
918 new_code = swap_condition (new_code);
919 reverse = 1;
920 break;
921 default:
922 return;
925 tmp_reg = gen_reg_rtx (SImode);
926 emit_insn (gen_rtx_SET (tmp_reg,
927 gen_rtx_fmt_ee (new_code, SImode,
928 cmp_op0, cmp_op1)));
930 PUT_CODE (operands[0], reverse ? EQ : NE);
931 emit_insn (gen_cbranchsi4 (operands[0], tmp_reg,
932 const0_rtx, operands[3]));
935 void
936 nds32_expand_float_cstore (rtx *operands)
938 enum rtx_code code = GET_CODE (operands[1]);
939 enum rtx_code new_code = code;
940 machine_mode mode = GET_MODE (operands[2]);
942 rtx cmp_op0 = operands[2];
943 rtx cmp_op1 = operands[3];
944 rtx tmp;
946 /* Main Goal: Use compare instruction to store value.
948 For example:
949 GT, GE: swap condition and swap operands.
950 reg_R = (reg_A > reg_B) --> fcmplt reg_R, reg_B, reg_A
951 reg_R = (reg_A >= reg_B) --> fcmple reg_R, reg_B, reg_A
953 LT, LE, EQ: no need to change, it is already LT, LE, EQ.
954 reg_R = (reg_A < reg_B) --> fcmplt reg_R, reg_A, reg_B
955 reg_R = (reg_A <= reg_B) --> fcmple reg_R, reg_A, reg_B
956 reg_R = (reg_A == reg_B) --> fcmpeq reg_R, reg_A, reg_B
958 ORDERED: reverse condition and using xor insturction to achieve 'ORDERED'.
959 reg_R = (reg_A != reg_B) --> fcmpun reg_R, reg_A, reg_B
960 xor reg_R, reg_R, const1_rtx
962 NE: reverse condition and using xor insturction to achieve 'NE'.
963 reg_R = (reg_A != reg_B) --> fcmpeq reg_R, reg_A, reg_B
964 xor reg_R, reg_R, const1_rtx */
965 switch (code)
967 case GT:
968 case GE:
969 tmp = cmp_op0;
970 cmp_op0 = cmp_op1;
971 cmp_op1 =tmp;
972 new_code = swap_condition (new_code);
973 break;
974 case UNORDERED:
975 case LT:
976 case LE:
977 case EQ:
978 break;
979 case ORDERED:
980 if (mode == SFmode)
981 emit_insn (gen_cmpsf_un (operands[0], cmp_op0, cmp_op1));
982 else
983 emit_insn (gen_cmpdf_un (operands[0], cmp_op0, cmp_op1));
985 emit_insn (gen_xorsi3 (operands[0], operands[0], const1_rtx));
986 return;
987 case NE:
988 if (mode == SFmode)
989 emit_insn (gen_cmpsf_eq (operands[0], cmp_op0, cmp_op1));
990 else
991 emit_insn (gen_cmpdf_eq (operands[0], cmp_op0, cmp_op1));
993 emit_insn (gen_xorsi3 (operands[0], operands[0], const1_rtx));
994 return;
995 default:
996 return;
999 emit_insn (gen_rtx_SET (operands[0],
1000 gen_rtx_fmt_ee (new_code, SImode,
1001 cmp_op0, cmp_op1)));
1004 enum nds32_expand_result_type
1005 nds32_expand_movcc (rtx *operands)
1007 enum rtx_code code = GET_CODE (operands[1]);
1008 enum rtx_code new_code = code;
1009 machine_mode cmp0_mode = GET_MODE (XEXP (operands[1], 0));
1010 rtx cmp_op0 = XEXP (operands[1], 0);
1011 rtx cmp_op1 = XEXP (operands[1], 1);
1012 rtx tmp;
1014 if ((GET_CODE (operands[1]) == EQ || GET_CODE (operands[1]) == NE)
1015 && XEXP (operands[1], 1) == const0_rtx)
1017 /* If the operands[1] rtx is already (eq X 0) or (ne X 0),
1018 we have gcc generate original template rtx. */
1019 return EXPAND_CREATE_TEMPLATE;
1021 else if ((TARGET_FPU_SINGLE && cmp0_mode == SFmode)
1022 || (TARGET_FPU_DOUBLE && cmp0_mode == DFmode))
1024 nds32_expand_float_movcc (operands);
1026 else
1028 /* Since there is only 'slt'(Set when Less Than) instruction for
1029 comparison in Andes ISA, the major strategy we use here is to
1030 convert conditional move into 'LT + EQ' or 'LT + NE' rtx combination.
1031 We design constraints properly so that the reload phase will assist
1032 to make one source operand to use same register as result operand.
1033 Then we can use cmovz/cmovn to catch the other source operand
1034 which has different register. */
1035 int reverse = 0;
1037 /* Main Goal: Use 'LT + EQ' or 'LT + NE' to target "then" part
1038 Strategy : Reverse condition and swap comparison operands
1040 For example:
1042 a <= b ? P : Q (LE or LEU)
1043 --> a > b ? Q : P (reverse condition)
1044 --> b < a ? Q : P (swap comparison operands to achieve 'LT/LTU')
1046 a >= b ? P : Q (GE or GEU)
1047 --> a < b ? Q : P (reverse condition to achieve 'LT/LTU')
1049 a < b ? P : Q (LT or LTU)
1050 --> (NO NEED TO CHANGE, it is already 'LT/LTU')
1052 a > b ? P : Q (GT or GTU)
1053 --> b < a ? P : Q (swap comparison operands to achieve 'LT/LTU') */
1054 switch (code)
1056 case GE: case GEU: case LE: case LEU:
1057 new_code = reverse_condition (code);
1058 reverse = 1;
1059 break;
1060 case EQ:
1061 case NE:
1062 /* no need to reverse condition */
1063 break;
1064 default:
1065 return EXPAND_FAIL;
1068 /* For '>' comparison operator, we swap operands
1069 so that we can have 'LT/LTU' operator. */
1070 if (new_code == GT || new_code == GTU)
1072 tmp = cmp_op0;
1073 cmp_op0 = cmp_op1;
1074 cmp_op1 = tmp;
1076 new_code = swap_condition (new_code);
1079 /* Use a temporary register to store slt/slts result. */
1080 tmp = gen_reg_rtx (SImode);
1082 if (new_code == EQ || new_code == NE)
1084 emit_insn (gen_xorsi3 (tmp, cmp_op0, cmp_op1));
1085 /* tmp == 0 if cmp_op0 == cmp_op1. */
1086 operands[1] = gen_rtx_fmt_ee (new_code, VOIDmode, tmp, const0_rtx);
1088 else
1090 /* This emit_insn will create corresponding 'slt/slts'
1091 insturction. */
1092 if (new_code == LT)
1093 emit_insn (gen_slts_compare (tmp, cmp_op0, cmp_op1));
1094 else if (new_code == LTU)
1095 emit_insn (gen_slt_compare (tmp, cmp_op0, cmp_op1));
1096 else
1097 gcc_unreachable ();
1099 /* Change comparison semantic into (eq X 0) or (ne X 0) behavior
1100 so that cmovz or cmovn will be matched later.
1102 For reverse condition cases, we want to create a semantic that:
1103 (eq X 0) --> pick up "else" part
1104 For normal cases, we want to create a semantic that:
1105 (ne X 0) --> pick up "then" part
1107 Later we will have cmovz/cmovn instruction pattern to
1108 match corresponding behavior and output instruction. */
1109 operands[1] = gen_rtx_fmt_ee (reverse ? EQ : NE,
1110 VOIDmode, tmp, const0_rtx);
1113 return EXPAND_CREATE_TEMPLATE;
1116 void
1117 nds32_expand_float_movcc (rtx *operands)
1119 if ((GET_CODE (operands[1]) == EQ || GET_CODE (operands[1]) == NE)
1120 && GET_MODE (XEXP (operands[1], 0)) == SImode
1121 && XEXP (operands[1], 1) == const0_rtx)
1123 /* If the operands[1] rtx is already (eq X 0) or (ne X 0),
1124 we have gcc generate original template rtx. */
1125 return;
1127 else
1129 enum rtx_code code = GET_CODE (operands[1]);
1130 enum rtx_code new_code = code;
1131 machine_mode cmp0_mode = GET_MODE (XEXP (operands[1], 0));
1132 machine_mode cmp1_mode = GET_MODE (XEXP (operands[1], 1));
1133 rtx cmp_op0 = XEXP (operands[1], 0);
1134 rtx cmp_op1 = XEXP (operands[1], 1);
1135 rtx tmp;
1137 /* Compare instruction Operations: (cmp_op0 condition cmp_op1) ? 1 : 0,
1138 when result is 1, and 'reverse' be set 1 for fcmovzs instructuin. */
1139 int reverse = 0;
1141 /* Main Goal: Use cmpare instruction + conditional move instruction.
1142 Strategy : swap condition and swap comparison operands.
1144 For example:
1145 a > b ? P : Q (GT)
1146 --> a < b ? Q : P (swap condition)
1147 --> b < a ? Q : P (swap comparison operands to achieve 'GT')
1149 a >= b ? P : Q (GE)
1150 --> a <= b ? Q : P (swap condition)
1151 --> b <= a ? Q : P (swap comparison operands to achieve 'GE')
1153 a < b ? P : Q (LT)
1154 --> (NO NEED TO CHANGE, it is already 'LT')
1156 a >= b ? P : Q (LE)
1157 --> (NO NEED TO CHANGE, it is already 'LE')
1159 a == b ? P : Q (EQ)
1160 --> (NO NEED TO CHANGE, it is already 'EQ') */
1162 switch (code)
1164 case GT:
1165 case GE:
1166 tmp = cmp_op0;
1167 cmp_op0 = cmp_op1;
1168 cmp_op1 =tmp;
1169 new_code = swap_condition (new_code);
1170 break;
1171 case UNORDERED:
1172 case LT:
1173 case LE:
1174 case EQ:
1175 break;
1176 case ORDERED:
1177 case NE:
1178 reverse = 1;
1179 new_code = reverse_condition (new_code);
1180 break;
1181 case UNGT:
1182 case UNGE:
1183 new_code = reverse_condition_maybe_unordered (new_code);
1184 reverse = 1;
1185 break;
1186 case UNLT:
1187 case UNLE:
1188 new_code = reverse_condition_maybe_unordered (new_code);
1189 tmp = cmp_op0;
1190 cmp_op0 = cmp_op1;
1191 cmp_op1 = tmp;
1192 new_code = swap_condition (new_code);
1193 reverse = 1;
1194 break;
1195 default:
1196 return;
1199 /* Use a temporary register to store fcmpxxs result. */
1200 tmp = gen_reg_rtx (SImode);
1202 /* Create float compare instruction for SFmode and DFmode,
1203 other MODE using cstoresi create compare instruction. */
1204 if ((cmp0_mode == DFmode || cmp0_mode == SFmode)
1205 && (cmp1_mode == DFmode || cmp1_mode == SFmode))
1207 /* This emit_insn create corresponding float compare instruction */
1208 emit_insn (gen_rtx_SET (tmp,
1209 gen_rtx_fmt_ee (new_code, SImode,
1210 cmp_op0, cmp_op1)));
1212 else
1214 /* This emit_insn using cstoresi create corresponding
1215 compare instruction */
1216 PUT_CODE (operands[1], new_code);
1217 emit_insn (gen_cstoresi4 (tmp, operands[1],
1218 cmp_op0, cmp_op1));
1220 /* operands[1] crete corresponding condition move instruction
1221 for fcmovzs and fcmovns. */
1222 operands[1] = gen_rtx_fmt_ee (reverse ? EQ : NE,
1223 VOIDmode, tmp, const0_rtx);
1227 void
1228 nds32_emit_push_fpr_callee_saved (int base_offset)
1230 rtx fpu_insn;
1231 rtx reg, mem;
1232 unsigned int regno = cfun->machine->callee_saved_first_fpr_regno;
1233 unsigned int last_fpr = cfun->machine->callee_saved_last_fpr_regno;
1235 while (regno <= last_fpr)
1237 /* Handling two registers, using fsdi instruction. */
1238 reg = gen_rtx_REG (DFmode, regno);
1239 mem = gen_frame_mem (DFmode, plus_constant (Pmode,
1240 stack_pointer_rtx,
1241 base_offset));
1242 base_offset += 8;
1243 regno += 2;
1244 fpu_insn = emit_move_insn (mem, reg);
1245 RTX_FRAME_RELATED_P (fpu_insn) = 1;
1249 void
1250 nds32_emit_pop_fpr_callee_saved (int gpr_padding_size)
1252 rtx fpu_insn;
1253 rtx reg, mem, addr;
1254 rtx dwarf, adjust_sp_rtx;
1255 unsigned int regno = cfun->machine->callee_saved_first_fpr_regno;
1256 unsigned int last_fpr = cfun->machine->callee_saved_last_fpr_regno;
1257 int padding = 0;
1259 while (regno <= last_fpr)
1261 /* Handling two registers, using fldi.bi instruction. */
1262 if ((regno + 1) >= last_fpr)
1263 padding = gpr_padding_size;
1265 reg = gen_rtx_REG (DFmode, (regno));
1266 addr = gen_rtx_POST_MODIFY (Pmode, stack_pointer_rtx,
1267 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
1268 GEN_INT (8 + padding)));
1269 mem = gen_frame_mem (DFmode, addr);
1270 regno += 2;
1271 fpu_insn = emit_move_insn (reg, mem);
1273 adjust_sp_rtx =
1274 gen_rtx_SET (stack_pointer_rtx,
1275 plus_constant (Pmode, stack_pointer_rtx,
1276 8 + padding));
1278 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, NULL_RTX);
1279 /* Tell gcc we adjust SP in this insn. */
1280 dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, copy_rtx (adjust_sp_rtx),
1281 dwarf);
1282 RTX_FRAME_RELATED_P (fpu_insn) = 1;
1283 REG_NOTES (fpu_insn) = dwarf;
1287 void
1288 nds32_emit_v3pop_fpr_callee_saved (int base)
1290 int fpu_base_addr = base;
1291 int regno;
1292 rtx fpu_insn;
1293 rtx reg, mem;
1294 rtx dwarf;
1296 regno = cfun->machine->callee_saved_first_fpr_regno;
1297 while (regno <= cfun->machine->callee_saved_last_fpr_regno)
1299 /* Handling two registers, using fldi instruction. */
1300 reg = gen_rtx_REG (DFmode, regno);
1301 mem = gen_frame_mem (DFmode, plus_constant (Pmode,
1302 stack_pointer_rtx,
1303 fpu_base_addr));
1304 fpu_base_addr += 8;
1305 regno += 2;
1306 fpu_insn = emit_move_insn (reg, mem);
1307 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, NULL_RTX);
1308 RTX_FRAME_RELATED_P (fpu_insn) = 1;
1309 REG_NOTES (fpu_insn) = dwarf;
1313 enum nds32_expand_result_type
1314 nds32_expand_extv (rtx *operands)
1316 gcc_assert (CONST_INT_P (operands[2]) && CONST_INT_P (operands[3]));
1317 HOST_WIDE_INT width = INTVAL (operands[2]);
1318 HOST_WIDE_INT bitpos = INTVAL (operands[3]);
1319 rtx dst = operands[0];
1320 rtx src = operands[1];
1322 if (MEM_P (src)
1323 && width == 32
1324 && (bitpos % BITS_PER_UNIT) == 0
1325 && GET_MODE_BITSIZE (GET_MODE (dst)) == width)
1327 rtx newmem = adjust_address (src, GET_MODE (dst),
1328 bitpos / BITS_PER_UNIT);
1330 rtx base_addr = force_reg (Pmode, XEXP (newmem, 0));
1332 emit_insn (gen_unaligned_loadsi (dst, base_addr));
1334 return EXPAND_DONE;
1336 return EXPAND_FAIL;
1339 enum nds32_expand_result_type
1340 nds32_expand_insv (rtx *operands)
1342 gcc_assert (CONST_INT_P (operands[1]) && CONST_INT_P (operands[2]));
1343 HOST_WIDE_INT width = INTVAL (operands[1]);
1344 HOST_WIDE_INT bitpos = INTVAL (operands[2]);
1345 rtx dst = operands[0];
1346 rtx src = operands[3];
1348 if (MEM_P (dst)
1349 && width == 32
1350 && (bitpos % BITS_PER_UNIT) == 0
1351 && GET_MODE_BITSIZE (GET_MODE (src)) == width)
1353 rtx newmem = adjust_address (dst, GET_MODE (src),
1354 bitpos / BITS_PER_UNIT);
1356 rtx base_addr = force_reg (Pmode, XEXP (newmem, 0));
1358 emit_insn (gen_unaligned_storesi (base_addr, src));
1360 return EXPAND_DONE;
1362 return EXPAND_FAIL;
1365 /* ------------------------------------------------------------------------ */
1367 /* Function to generate PC relative jump table.
1368 Refer to nds32.md for more details.
1370 The following is the sample for the case that diff value
1371 can be presented in '.short' size.
1373 addi $r1, $r1, -(case_lower_bound)
1374 slti $ta, $r1, (case_number)
1375 beqz $ta, .L_skip_label
1377 la $ta, .L35 ! get jump table address
1378 lh $r1, [$ta + $r1 << 1] ! load symbol diff from jump table entry
1379 addi $ta, $r1, $ta
1380 jr5 $ta
1382 ! jump table entry
1383 L35:
1384 .short .L25-.L35
1385 .short .L26-.L35
1386 .short .L27-.L35
1387 .short .L28-.L35
1388 .short .L29-.L35
1389 .short .L30-.L35
1390 .short .L31-.L35
1391 .short .L32-.L35
1392 .short .L33-.L35
1393 .short .L34-.L35 */
1394 const char *
1395 nds32_output_casesi_pc_relative (rtx *operands)
1397 machine_mode mode;
1398 rtx diff_vec;
1400 diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[1])));
1402 gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
1404 /* Step C: "t <-- operands[1]". */
1405 if (flag_pic)
1407 output_asm_insn ("sethi\t$ta, hi20(%l1@GOTOFF)", operands);
1408 output_asm_insn ("ori\t$ta, $ta, lo12(%l1@GOTOFF)", operands);
1409 output_asm_insn ("add\t$ta, $ta, $gp", operands);
1411 else
1412 output_asm_insn ("la\t$ta, %l1", operands);
1414 /* Get the mode of each element in the difference vector. */
1415 mode = GET_MODE (diff_vec);
1417 /* Step D: "z <-- (mem (plus (operands[0] << m) t))",
1418 where m is 0, 1, or 2 to load address-diff value from table. */
1419 switch (mode)
1421 case E_QImode:
1422 output_asm_insn ("lb\t%2, [$ta + %0 << 0]", operands);
1423 break;
1424 case E_HImode:
1425 output_asm_insn ("lh\t%2, [$ta + %0 << 1]", operands);
1426 break;
1427 case E_SImode:
1428 output_asm_insn ("lw\t%2, [$ta + %0 << 2]", operands);
1429 break;
1430 default:
1431 gcc_unreachable ();
1434 /* Step E: "t <-- z + t".
1435 Add table label_ref with address-diff value to
1436 obtain target case address. */
1437 output_asm_insn ("add\t$ta, %2, $ta", operands);
1439 /* Step F: jump to target with register t. */
1440 if (TARGET_16_BIT)
1441 return "jr5\t$ta";
1442 else
1443 return "jr\t$ta";
1446 /* Function to generate normal jump table. */
1447 const char *
1448 nds32_output_casesi (rtx *operands)
1450 /* Step C: "t <-- operands[1]". */
1451 if (flag_pic)
1453 output_asm_insn ("sethi\t$ta, hi20(%l1@GOTOFF)", operands);
1454 output_asm_insn ("ori\t$ta, $ta, lo12(%l1@GOTOFF)", operands);
1455 output_asm_insn ("add\t$ta, $ta, $gp", operands);
1457 else
1458 output_asm_insn ("la\t$ta, %l1", operands);
1460 /* Step D: "z <-- (mem (plus (operands[0] << 2) t))". */
1461 output_asm_insn ("lw\t%2, [$ta + %0 << 2]", operands);
1463 /* No need to perform Step E, which is only used for
1464 pc relative jump table. */
1466 /* Step F: jump to target with register z. */
1467 if (TARGET_16_BIT)
1468 return "jr5\t%2";
1469 else
1470 return "jr\t%2";
1473 /* Function to return memory format. */
1474 enum nds32_16bit_address_type
1475 nds32_mem_format (rtx op)
1477 machine_mode mode_test;
1478 int val;
1479 int regno;
1481 if (!TARGET_16_BIT)
1482 return ADDRESS_NOT_16BIT_FORMAT;
1484 mode_test = GET_MODE (op);
1486 op = XEXP (op, 0);
1488 /* 45 format. */
1489 if (GET_CODE (op) == REG
1490 && ((mode_test == SImode) || (mode_test == SFmode)))
1491 return ADDRESS_REG;
1493 /* 333 format for QI/HImode. */
1494 if (GET_CODE (op) == REG && (REGNO (op) < R8_REGNUM))
1495 return ADDRESS_LO_REG_IMM3U;
1497 /* post_inc 333 format. */
1498 if ((GET_CODE (op) == POST_INC)
1499 && ((mode_test == SImode) || (mode_test == SFmode)))
1501 regno = REGNO(XEXP (op, 0));
1503 if (regno < 8)
1504 return ADDRESS_POST_INC_LO_REG_IMM3U;
1507 /* post_inc 333 format. */
1508 if ((GET_CODE (op) == POST_MODIFY)
1509 && ((mode_test == SImode) || (mode_test == SFmode))
1510 && (REG_P (XEXP (XEXP (op, 1), 0)))
1511 && (CONST_INT_P (XEXP (XEXP (op, 1), 1))))
1513 regno = REGNO (XEXP (XEXP (op, 1), 0));
1514 val = INTVAL (XEXP (XEXP (op, 1), 1));
1515 if (regno < 8 && val > 0 && val < 32)
1516 return ADDRESS_POST_MODIFY_LO_REG_IMM3U;
1519 if ((GET_CODE (op) == PLUS)
1520 && (GET_CODE (XEXP (op, 0)) == REG)
1521 && (GET_CODE (XEXP (op, 1)) == CONST_INT))
1523 val = INTVAL (XEXP (op, 1));
1525 regno = REGNO(XEXP (op, 0));
1527 if (regno > 8
1528 && regno != SP_REGNUM
1529 && regno != FP_REGNUM)
1530 return ADDRESS_NOT_16BIT_FORMAT;
1532 switch (mode_test)
1534 case E_QImode:
1535 /* 333 format. */
1536 if (val >= 0 && val < 8 && regno < 8)
1537 return ADDRESS_LO_REG_IMM3U;
1538 break;
1540 case E_HImode:
1541 /* 333 format. */
1542 if (val >= 0 && val < 16 && (val % 2 == 0) && regno < 8)
1543 return ADDRESS_LO_REG_IMM3U;
1544 break;
1546 case E_SImode:
1547 case E_SFmode:
1548 case E_DFmode:
1549 /* r8 imply fe format. */
1550 if ((regno == 8) &&
1551 (val >= -128 && val <= -4 && (val % 4 == 0)))
1552 return ADDRESS_R8_IMM7U;
1553 /* fp imply 37 format. */
1554 if ((regno == FP_REGNUM) &&
1555 (val >= 0 && val < 512 && (val % 4 == 0)))
1556 return ADDRESS_FP_IMM7U;
1557 /* sp imply 37 format. */
1558 else if ((regno == SP_REGNUM) &&
1559 (val >= 0 && val < 512 && (val % 4 == 0)))
1560 return ADDRESS_SP_IMM7U;
1561 /* 333 format. */
1562 else if (val >= 0 && val < 32 && (val % 4 == 0) && regno < 8)
1563 return ADDRESS_LO_REG_IMM3U;
1564 break;
1566 default:
1567 break;
1571 return ADDRESS_NOT_16BIT_FORMAT;
1574 /* Output 16-bit store. */
1575 const char *
1576 nds32_output_16bit_store (rtx *operands, int byte)
1578 char pattern[100];
1579 char size;
1580 rtx code = XEXP (operands[0], 0);
1582 size = nds32_byte_to_size (byte);
1584 switch (nds32_mem_format (operands[0]))
1586 case ADDRESS_REG:
1587 operands[0] = code;
1588 output_asm_insn ("swi450\t%1, [%0]", operands);
1589 break;
1590 case ADDRESS_LO_REG_IMM3U:
1591 snprintf (pattern, sizeof (pattern), "s%ci333\t%%1, %%0", size);
1592 output_asm_insn (pattern, operands);
1593 break;
1594 case ADDRESS_POST_INC_LO_REG_IMM3U:
1595 snprintf (pattern, sizeof (pattern), "swi333.bi\t%%1, %%0, 4");
1596 output_asm_insn (pattern, operands);
1597 break;
1598 case ADDRESS_POST_MODIFY_LO_REG_IMM3U:
1599 snprintf (pattern, sizeof (pattern), "swi333.bi\t%%1, %%0");
1600 output_asm_insn (pattern, operands);
1601 break;
1602 case ADDRESS_FP_IMM7U:
1603 output_asm_insn ("swi37\t%1, %0", operands);
1604 break;
1605 case ADDRESS_SP_IMM7U:
1606 /* Get immediate value and set back to operands[1]. */
1607 operands[0] = XEXP (code, 1);
1608 output_asm_insn ("swi37.sp\t%1, [ + (%0)]", operands);
1609 break;
1610 default:
1611 break;
1614 return "";
1617 /* Output 16-bit load. */
1618 const char *
1619 nds32_output_16bit_load (rtx *operands, int byte)
1621 char pattern[100];
1622 unsigned char size;
1623 rtx code = XEXP (operands[1], 0);
1625 size = nds32_byte_to_size (byte);
1627 switch (nds32_mem_format (operands[1]))
1629 case ADDRESS_REG:
1630 operands[1] = code;
1631 output_asm_insn ("lwi450\t%0, [%1]", operands);
1632 break;
1633 case ADDRESS_LO_REG_IMM3U:
1634 snprintf (pattern, sizeof (pattern), "l%ci333\t%%0, %%1", size);
1635 output_asm_insn (pattern, operands);
1636 break;
1637 case ADDRESS_POST_INC_LO_REG_IMM3U:
1638 snprintf (pattern, sizeof (pattern), "lwi333.bi\t%%0, %%1, 4");
1639 output_asm_insn (pattern, operands);
1640 break;
1641 case ADDRESS_POST_MODIFY_LO_REG_IMM3U:
1642 snprintf (pattern, sizeof (pattern), "lwi333.bi\t%%0, %%1");
1643 output_asm_insn (pattern, operands);
1644 break;
1645 case ADDRESS_R8_IMM7U:
1646 output_asm_insn ("lwi45.fe\t%0, %e1", operands);
1647 break;
1648 case ADDRESS_FP_IMM7U:
1649 output_asm_insn ("lwi37\t%0, %1", operands);
1650 break;
1651 case ADDRESS_SP_IMM7U:
1652 /* Get immediate value and set back to operands[0]. */
1653 operands[1] = XEXP (code, 1);
1654 output_asm_insn ("lwi37.sp\t%0, [ + (%1)]", operands);
1655 break;
1656 default:
1657 break;
1660 return "";
1663 /* Output 32-bit store. */
1664 const char *
1665 nds32_output_32bit_store (rtx *operands, int byte)
1667 char pattern[100];
1668 unsigned char size;
1669 rtx code = XEXP (operands[0], 0);
1671 size = nds32_byte_to_size (byte);
1673 switch (GET_CODE (code))
1675 case REG:
1676 /* (mem (reg X))
1677 => access location by using register,
1678 use "sbi / shi / swi" */
1679 snprintf (pattern, sizeof (pattern), "s%ci\t%%1, %%0", size);
1680 break;
1682 case SYMBOL_REF:
1683 case CONST:
1684 /* (mem (symbol_ref X))
1685 (mem (const (...)))
1686 => access global variables,
1687 use "sbi.gp / shi.gp / swi.gp" */
1688 operands[0] = XEXP (operands[0], 0);
1689 snprintf (pattern, sizeof (pattern), "s%ci.gp\t%%1, [ + %%0]", size);
1690 break;
1692 case POST_INC:
1693 /* (mem (post_inc reg))
1694 => access location by using register which will be post increment,
1695 use "sbi.bi / shi.bi / swi.bi" */
1696 snprintf (pattern, sizeof (pattern),
1697 "s%ci.bi\t%%1, %%0, %d", size, byte);
1698 break;
1700 case POST_DEC:
1701 /* (mem (post_dec reg))
1702 => access location by using register which will be post decrement,
1703 use "sbi.bi / shi.bi / swi.bi" */
1704 snprintf (pattern, sizeof (pattern),
1705 "s%ci.bi\t%%1, %%0, -%d", size, byte);
1706 break;
1708 case POST_MODIFY:
1709 switch (GET_CODE (XEXP (XEXP (code, 1), 1)))
1711 case REG:
1712 case SUBREG:
1713 /* (mem (post_modify (reg) (plus (reg) (reg))))
1714 => access location by using register which will be
1715 post modified with reg,
1716 use "sb.bi/ sh.bi / sw.bi" */
1717 snprintf (pattern, sizeof (pattern), "s%c.bi\t%%1, %%0", size);
1718 break;
1719 case CONST_INT:
1720 /* (mem (post_modify (reg) (plus (reg) (const_int))))
1721 => access location by using register which will be
1722 post modified with const_int,
1723 use "sbi.bi/ shi.bi / swi.bi" */
1724 snprintf (pattern, sizeof (pattern), "s%ci.bi\t%%1, %%0", size);
1725 break;
1726 default:
1727 abort ();
1729 break;
1731 case PLUS:
1732 switch (GET_CODE (XEXP (code, 1)))
1734 case REG:
1735 case SUBREG:
1736 /* (mem (plus reg reg)) or (mem (plus (mult reg const_int) reg))
1737 => access location by adding two registers,
1738 use "sb / sh / sw" */
1739 snprintf (pattern, sizeof (pattern), "s%c\t%%1, %%0", size);
1740 break;
1741 case CONST_INT:
1742 /* (mem (plus reg const_int))
1743 => access location by adding one register with const_int,
1744 use "sbi / shi / swi" */
1745 snprintf (pattern, sizeof (pattern), "s%ci\t%%1, %%0", size);
1746 break;
1747 default:
1748 abort ();
1750 break;
1752 case LO_SUM:
1753 operands[2] = XEXP (code, 1);
1754 operands[0] = XEXP (code, 0);
1755 snprintf (pattern, sizeof (pattern),
1756 "s%ci\t%%1, [%%0 + lo12(%%2)]", size);
1757 break;
1759 default:
1760 abort ();
1763 output_asm_insn (pattern, operands);
1764 return "";
1767 /* Output 32-bit load. */
1768 const char *
1769 nds32_output_32bit_load (rtx *operands, int byte)
1771 char pattern[100];
1772 unsigned char size;
1773 rtx code;
1775 code = XEXP (operands[1], 0);
1777 size = nds32_byte_to_size (byte);
1779 switch (GET_CODE (code))
1781 case REG:
1782 /* (mem (reg X))
1783 => access location by using register,
1784 use "lbi / lhi / lwi" */
1785 snprintf (pattern, sizeof (pattern), "l%ci\t%%0, %%1", size);
1786 break;
1788 case SYMBOL_REF:
1789 case CONST:
1790 /* (mem (symbol_ref X))
1791 (mem (const (...)))
1792 => access global variables,
1793 use "lbi.gp / lhi.gp / lwi.gp" */
1794 operands[1] = XEXP (operands[1], 0);
1795 snprintf (pattern, sizeof (pattern), "l%ci.gp\t%%0, [ + %%1]", size);
1796 break;
1798 case POST_INC:
1799 /* (mem (post_inc reg))
1800 => access location by using register which will be post increment,
1801 use "lbi.bi / lhi.bi / lwi.bi" */
1802 snprintf (pattern, sizeof (pattern),
1803 "l%ci.bi\t%%0, %%1, %d", size, byte);
1804 break;
1806 case POST_DEC:
1807 /* (mem (post_dec reg))
1808 => access location by using register which will be post decrement,
1809 use "lbi.bi / lhi.bi / lwi.bi" */
1810 snprintf (pattern, sizeof (pattern),
1811 "l%ci.bi\t%%0, %%1, -%d", size, byte);
1812 break;
1814 case POST_MODIFY:
1815 switch (GET_CODE (XEXP (XEXP (code, 1), 1)))
1817 case REG:
1818 case SUBREG:
1819 /* (mem (post_modify (reg) (plus (reg) (reg))))
1820 => access location by using register which will be
1821 post modified with reg,
1822 use "lb.bi/ lh.bi / lw.bi" */
1823 snprintf (pattern, sizeof (pattern), "l%c.bi\t%%0, %%1", size);
1824 break;
1825 case CONST_INT:
1826 /* (mem (post_modify (reg) (plus (reg) (const_int))))
1827 => access location by using register which will be
1828 post modified with const_int,
1829 use "lbi.bi/ lhi.bi / lwi.bi" */
1830 snprintf (pattern, sizeof (pattern), "l%ci.bi\t%%0, %%1", size);
1831 break;
1832 default:
1833 abort ();
1835 break;
1837 case PLUS:
1838 switch (GET_CODE (XEXP (code, 1)))
1840 case REG:
1841 case SUBREG:
1842 /* (mem (plus reg reg)) or (mem (plus (mult reg const_int) reg))
1843 use "lb / lh / lw" */
1844 snprintf (pattern, sizeof (pattern), "l%c\t%%0, %%1", size);
1845 break;
1846 case CONST_INT:
1847 /* (mem (plus reg const_int))
1848 => access location by adding one register with const_int,
1849 use "lbi / lhi / lwi" */
1850 snprintf (pattern, sizeof (pattern), "l%ci\t%%0, %%1", size);
1851 break;
1852 default:
1853 abort ();
1855 break;
1857 case LO_SUM:
1858 operands[2] = XEXP (code, 1);
1859 operands[1] = XEXP (code, 0);
1860 snprintf (pattern, sizeof (pattern),
1861 "l%ci\t%%0, [%%1 + lo12(%%2)]", size);
1862 break;
1864 default:
1865 abort ();
1868 output_asm_insn (pattern, operands);
1869 return "";
1872 /* Output 32-bit load with signed extension. */
1873 const char *
1874 nds32_output_32bit_load_s (rtx *operands, int byte)
1876 char pattern[100];
1877 unsigned char size;
1878 rtx code;
1880 code = XEXP (operands[1], 0);
1882 size = nds32_byte_to_size (byte);
1884 switch (GET_CODE (code))
1886 case REG:
1887 /* (mem (reg X))
1888 => access location by using register,
1889 use "lbsi / lhsi" */
1890 snprintf (pattern, sizeof (pattern), "l%csi\t%%0, %%1", size);
1891 break;
1893 case SYMBOL_REF:
1894 case CONST:
1895 /* (mem (symbol_ref X))
1896 (mem (const (...)))
1897 => access global variables,
1898 use "lbsi.gp / lhsi.gp" */
1899 operands[1] = XEXP (operands[1], 0);
1900 snprintf (pattern, sizeof (pattern), "l%csi.gp\t%%0, [ + %%1]", size);
1901 break;
1903 case POST_INC:
1904 /* (mem (post_inc reg))
1905 => access location by using register which will be post increment,
1906 use "lbsi.bi / lhsi.bi" */
1907 snprintf (pattern, sizeof (pattern),
1908 "l%csi.bi\t%%0, %%1, %d", size, byte);
1909 break;
1911 case POST_DEC:
1912 /* (mem (post_dec reg))
1913 => access location by using register which will be post decrement,
1914 use "lbsi.bi / lhsi.bi" */
1915 snprintf (pattern, sizeof (pattern),
1916 "l%csi.bi\t%%0, %%1, -%d", size, byte);
1917 break;
1919 case POST_MODIFY:
1920 switch (GET_CODE (XEXP (XEXP (code, 1), 1)))
1922 case REG:
1923 case SUBREG:
1924 /* (mem (post_modify (reg) (plus (reg) (reg))))
1925 => access location by using register which will be
1926 post modified with reg,
1927 use "lbs.bi/ lhs.bi" */
1928 snprintf (pattern, sizeof (pattern), "l%cs.bi\t%%0, %%1", size);
1929 break;
1930 case CONST_INT:
1931 /* (mem (post_modify (reg) (plus (reg) (const_int))))
1932 => access location by using register which will be
1933 post modified with const_int,
1934 use "lbsi.bi/ lhsi.bi" */
1935 snprintf (pattern, sizeof (pattern), "l%csi.bi\t%%0, %%1", size);
1936 break;
1937 default:
1938 abort ();
1940 break;
1942 case PLUS:
1943 switch (GET_CODE (XEXP (code, 1)))
1945 case REG:
1946 case SUBREG:
1947 /* (mem (plus reg reg)) or (mem (plus (mult reg const_int) reg))
1948 use "lbs / lhs" */
1949 snprintf (pattern, sizeof (pattern), "l%cs\t%%0, %%1", size);
1950 break;
1951 case CONST_INT:
1952 /* (mem (plus reg const_int))
1953 => access location by adding one register with const_int,
1954 use "lbsi / lhsi" */
1955 snprintf (pattern, sizeof (pattern), "l%csi\t%%0, %%1", size);
1956 break;
1957 default:
1958 abort ();
1960 break;
1962 case LO_SUM:
1963 operands[2] = XEXP (code, 1);
1964 operands[1] = XEXP (code, 0);
1965 snprintf (pattern, sizeof (pattern),
1966 "l%csi\t%%0, [%%1 + lo12(%%2)]", size);
1967 break;
1969 default:
1970 abort ();
1973 output_asm_insn (pattern, operands);
1974 return "";
1977 /* Function to output stack push operation.
1978 We need to deal with normal stack push multiple or stack v3push. */
1979 const char *
1980 nds32_output_stack_push (rtx par_rtx)
1982 /* A string pattern for output_asm_insn(). */
1983 char pattern[100];
1984 /* The operands array which will be used in output_asm_insn(). */
1985 rtx operands[3];
1986 /* Pick up varargs first regno and last regno for further use. */
1987 int rb_va_args = cfun->machine->va_args_first_regno;
1988 int re_va_args = cfun->machine->va_args_last_regno;
1989 int last_argument_regno = NDS32_FIRST_GPR_REGNUM
1990 + NDS32_MAX_GPR_REGS_FOR_ARGS
1991 - 1;
1992 /* Pick up first and last eh data regno for further use. */
1993 int rb_eh_data = cfun->machine->eh_return_data_first_regno;
1994 int re_eh_data = cfun->machine->eh_return_data_last_regno;
1995 int first_eh_data_regno = EH_RETURN_DATA_REGNO (0);
1996 /* Pick up callee-saved first regno and last regno for further use. */
1997 int rb_callee_saved = cfun->machine->callee_saved_first_gpr_regno;
1998 int re_callee_saved = cfun->machine->callee_saved_last_gpr_regno;
2000 /* First we need to check if we are pushing argument registers not used
2001 for the named arguments. If so, we have to create 'smw.adm' (push.s)
2002 instruction. */
2003 if (reg_mentioned_p (gen_rtx_REG (SImode, last_argument_regno), par_rtx))
2005 /* Set operands[0] and operands[1]. */
2006 operands[0] = gen_rtx_REG (SImode, rb_va_args);
2007 operands[1] = gen_rtx_REG (SImode, re_va_args);
2008 /* Create assembly code pattern: "Rb, Re, { }". */
2009 snprintf (pattern, sizeof (pattern), "push.s\t%s", "%0, %1, { }");
2010 /* We use output_asm_insn() to output assembly code by ourself. */
2011 output_asm_insn (pattern, operands);
2012 return "";
2015 /* If last_argument_regno is not mentioned in par_rtx, we can confirm that
2016 we do not need to push argument registers for variadic function.
2017 But we still need to check if we need to push exception handling
2018 data registers. */
2019 if (reg_mentioned_p (gen_rtx_REG (SImode, first_eh_data_regno), par_rtx))
2021 /* Set operands[0] and operands[1]. */
2022 operands[0] = gen_rtx_REG (SImode, rb_eh_data);
2023 operands[1] = gen_rtx_REG (SImode, re_eh_data);
2024 /* Create assembly code pattern: "Rb, Re, { }". */
2025 snprintf (pattern, sizeof (pattern), "push.s\t%s", "%0, %1, { }");
2026 /* We use output_asm_insn() to output assembly code by ourself. */
2027 output_asm_insn (pattern, operands);
2028 return "";
2031 /* If we step here, we are going to do v3push or multiple push operation. */
2033 /* Refer to nds32.h, where we comment when push25/pop25 are available. */
2034 if (NDS32_V3PUSH_AVAILABLE_P)
2036 /* For stack v3push:
2037 operands[0]: Re
2038 operands[1]: imm8u */
2040 /* This variable is to check if 'push25 Re,imm8u' is available. */
2041 int sp_adjust;
2043 /* Set operands[0]. */
2044 operands[0] = gen_rtx_REG (SImode, re_callee_saved);
2046 /* Check if we can generate 'push25 Re,imm8u',
2047 otherwise, generate 'push25 Re,0'. */
2048 sp_adjust = cfun->machine->local_size
2049 + cfun->machine->out_args_size
2050 + cfun->machine->callee_saved_area_gpr_padding_bytes
2051 + cfun->machine->callee_saved_fpr_regs_size;
2052 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
2053 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust))
2054 operands[1] = GEN_INT (sp_adjust);
2055 else
2057 /* Allocate callee saved fpr space. */
2058 if (cfun->machine->callee_saved_first_fpr_regno != SP_REGNUM)
2060 sp_adjust = cfun->machine->callee_saved_area_gpr_padding_bytes
2061 + cfun->machine->callee_saved_fpr_regs_size;
2062 operands[1] = GEN_INT (sp_adjust);
2064 else
2066 operands[1] = GEN_INT (0);
2070 /* Create assembly code pattern. */
2071 snprintf (pattern, sizeof (pattern), "push25\t%%0, %%1");
2073 else
2075 /* For normal stack push multiple:
2076 operands[0]: Rb
2077 operands[1]: Re
2078 operands[2]: En4 */
2080 /* This variable is used to check if we only need to generate En4 field.
2081 As long as Rb==Re=SP_REGNUM, we set this variable to 1. */
2082 int push_en4_only_p = 0;
2084 /* Set operands[0] and operands[1]. */
2085 operands[0] = gen_rtx_REG (SImode, rb_callee_saved);
2086 operands[1] = gen_rtx_REG (SImode, re_callee_saved);
2088 /* 'smw.adm $sp,[$sp],$sp,0' means push nothing. */
2089 if (!cfun->machine->fp_size
2090 && !cfun->machine->gp_size
2091 && !cfun->machine->lp_size
2092 && REGNO (operands[0]) == SP_REGNUM
2093 && REGNO (operands[1]) == SP_REGNUM)
2095 /* No need to generate instruction. */
2096 return "";
2098 else
2100 /* If Rb==Re=SP_REGNUM, we only need to generate En4 field. */
2101 if (REGNO (operands[0]) == SP_REGNUM
2102 && REGNO (operands[1]) == SP_REGNUM)
2103 push_en4_only_p = 1;
2105 /* Create assembly code pattern.
2106 We need to handle the form: "Rb, Re, { $fp $gp $lp }". */
2107 snprintf (pattern, sizeof (pattern),
2108 "push.s\t%s{%s%s%s }",
2109 push_en4_only_p ? "" : "%0, %1, ",
2110 cfun->machine->fp_size ? " $fp" : "",
2111 cfun->machine->gp_size ? " $gp" : "",
2112 cfun->machine->lp_size ? " $lp" : "");
2116 /* We use output_asm_insn() to output assembly code by ourself. */
2117 output_asm_insn (pattern, operands);
2118 return "";
2121 /* Function to output stack pop operation.
2122 We need to deal with normal stack pop multiple or stack v3pop. */
2123 const char *
2124 nds32_output_stack_pop (rtx par_rtx ATTRIBUTE_UNUSED)
2126 /* A string pattern for output_asm_insn(). */
2127 char pattern[100];
2128 /* The operands array which will be used in output_asm_insn(). */
2129 rtx operands[3];
2130 /* Pick up first and last eh data regno for further use. */
2131 int rb_eh_data = cfun->machine->eh_return_data_first_regno;
2132 int re_eh_data = cfun->machine->eh_return_data_last_regno;
2133 int first_eh_data_regno = EH_RETURN_DATA_REGNO (0);
2134 /* Pick up callee-saved first regno and last regno for further use. */
2135 int rb_callee_saved = cfun->machine->callee_saved_first_gpr_regno;
2136 int re_callee_saved = cfun->machine->callee_saved_last_gpr_regno;
2138 /* We need to check if we need to push exception handling
2139 data registers. */
2140 if (reg_mentioned_p (gen_rtx_REG (SImode, first_eh_data_regno), par_rtx))
2142 /* Set operands[0] and operands[1]. */
2143 operands[0] = gen_rtx_REG (SImode, rb_eh_data);
2144 operands[1] = gen_rtx_REG (SImode, re_eh_data);
2145 /* Create assembly code pattern: "Rb, Re, { }". */
2146 snprintf (pattern, sizeof (pattern), "pop.s\t%s", "%0, %1, { }");
2147 /* We use output_asm_insn() to output assembly code by ourself. */
2148 output_asm_insn (pattern, operands);
2149 return "";
2152 /* If we step here, we are going to do v3pop or multiple pop operation. */
2154 /* Refer to nds32.h, where we comment when push25/pop25 are available. */
2155 if (NDS32_V3PUSH_AVAILABLE_P)
2157 /* For stack v3pop:
2158 operands[0]: Re
2159 operands[1]: imm8u */
2161 /* This variable is to check if 'pop25 Re,imm8u' is available. */
2162 int sp_adjust;
2164 /* Set operands[0]. */
2165 operands[0] = gen_rtx_REG (SImode, re_callee_saved);
2167 /* Check if we can generate 'pop25 Re,imm8u',
2168 otherwise, generate 'pop25 Re,0'.
2169 We have to consider alloca issue as well.
2170 If the function does call alloca(), the stack pointer is not fixed.
2171 In that case, we cannot use 'pop25 Re,imm8u' directly.
2172 We have to caculate stack pointer from frame pointer
2173 and then use 'pop25 Re,0'. */
2174 sp_adjust = cfun->machine->local_size
2175 + cfun->machine->out_args_size
2176 + cfun->machine->callee_saved_area_gpr_padding_bytes
2177 + cfun->machine->callee_saved_fpr_regs_size;
2178 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
2179 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust)
2180 && !cfun->calls_alloca)
2181 operands[1] = GEN_INT (sp_adjust);
2182 else
2184 if (cfun->machine->callee_saved_first_fpr_regno != SP_REGNUM)
2186 /* If has fpr need to restore, the $sp on callee saved fpr
2187 position, so we need to consider gpr pading bytes and
2188 callee saved fpr size. */
2189 sp_adjust = cfun->machine->callee_saved_area_gpr_padding_bytes
2190 + cfun->machine->callee_saved_fpr_regs_size;
2191 operands[1] = GEN_INT (sp_adjust);
2193 else
2195 operands[1] = GEN_INT (0);
2199 /* Create assembly code pattern. */
2200 snprintf (pattern, sizeof (pattern), "pop25\t%%0, %%1");
2202 else
2204 /* For normal stack pop multiple:
2205 operands[0]: Rb
2206 operands[1]: Re
2207 operands[2]: En4 */
2209 /* This variable is used to check if we only need to generate En4 field.
2210 As long as Rb==Re=SP_REGNUM, we set this variable to 1. */
2211 int pop_en4_only_p = 0;
2213 /* Set operands[0] and operands[1]. */
2214 operands[0] = gen_rtx_REG (SImode, rb_callee_saved);
2215 operands[1] = gen_rtx_REG (SImode, re_callee_saved);
2217 /* 'lmw.bim $sp,[$sp],$sp,0' means pop nothing. */
2218 if (!cfun->machine->fp_size
2219 && !cfun->machine->gp_size
2220 && !cfun->machine->lp_size
2221 && REGNO (operands[0]) == SP_REGNUM
2222 && REGNO (operands[1]) == SP_REGNUM)
2224 /* No need to generate instruction. */
2225 return "";
2227 else
2229 /* If Rb==Re=SP_REGNUM, we only need to generate En4 field. */
2230 if (REGNO (operands[0]) == SP_REGNUM
2231 && REGNO (operands[1]) == SP_REGNUM)
2232 pop_en4_only_p = 1;
2234 /* Create assembly code pattern.
2235 We need to handle the form: "Rb, Re, { $fp $gp $lp }". */
2236 snprintf (pattern, sizeof (pattern),
2237 "pop.s\t%s{%s%s%s }",
2238 pop_en4_only_p ? "" : "%0, %1, ",
2239 cfun->machine->fp_size ? " $fp" : "",
2240 cfun->machine->gp_size ? " $gp" : "",
2241 cfun->machine->lp_size ? " $lp" : "");
2245 /* We use output_asm_insn() to output assembly code by ourself. */
2246 output_asm_insn (pattern, operands);
2247 return "";
2250 /* Function to output return operation. */
2251 const char *
2252 nds32_output_return (void)
2254 /* A string pattern for output_asm_insn(). */
2255 char pattern[100];
2256 /* The operands array which will be used in output_asm_insn(). */
2257 rtx operands[2];
2258 /* For stack v3pop:
2259 operands[0]: Re
2260 operands[1]: imm8u */
2261 int re_callee_saved = cfun->machine->callee_saved_last_gpr_regno;
2262 int sp_adjust;
2264 /* Set operands[0]. */
2265 operands[0] = gen_rtx_REG (SImode, re_callee_saved);
2267 /* Check if we can generate 'pop25 Re,imm8u',
2268 otherwise, generate 'pop25 Re,0'.
2269 We have to consider alloca issue as well.
2270 If the function does call alloca(), the stack pointer is not fixed.
2271 In that case, we cannot use 'pop25 Re,imm8u' directly.
2272 We have to caculate stack pointer from frame pointer
2273 and then use 'pop25 Re,0'. */
2274 sp_adjust = cfun->machine->local_size
2275 + cfun->machine->out_args_size
2276 + cfun->machine->callee_saved_area_gpr_padding_bytes
2277 + cfun->machine->callee_saved_fpr_regs_size;
2278 if (satisfies_constraint_Iu08 (GEN_INT (sp_adjust))
2279 && NDS32_DOUBLE_WORD_ALIGN_P (sp_adjust)
2280 && !cfun->calls_alloca)
2281 operands[1] = GEN_INT (sp_adjust);
2282 else
2283 operands[1] = GEN_INT (0);
2285 /* Create assembly code pattern. */
2286 snprintf (pattern, sizeof (pattern), "pop25\t%%0, %%1");
2287 /* We use output_asm_insn() to output assembly code by ourself. */
2288 output_asm_insn (pattern, operands);
2289 return "";
2293 /* output a float load instruction */
2294 const char *
2295 nds32_output_float_load (rtx *operands)
2297 char buff[100];
2298 const char *pattern;
2299 rtx addr, addr_op0, addr_op1;
2300 int dp = GET_MODE_SIZE (GET_MODE (operands[0])) == 8;
2301 addr = XEXP (operands[1], 0);
2302 switch (GET_CODE (addr))
2304 case REG:
2305 pattern = "fl%ci\t%%0, %%1";
2306 break;
2308 case PLUS:
2309 addr_op0 = XEXP (addr, 0);
2310 addr_op1 = XEXP (addr, 1);
2312 if (REG_P (addr_op0) && REG_P (addr_op1))
2313 pattern = "fl%c\t%%0, %%1";
2314 else if (REG_P (addr_op0) && CONST_INT_P (addr_op1))
2315 pattern = "fl%ci\t%%0, %%1";
2316 else if (GET_CODE (addr_op0) == MULT && REG_P (addr_op1)
2317 && REG_P (XEXP (addr_op0, 0))
2318 && CONST_INT_P (XEXP (addr_op0, 1)))
2319 pattern = "fl%c\t%%0, %%1";
2320 else
2321 gcc_unreachable ();
2322 break;
2324 case POST_MODIFY:
2325 addr_op0 = XEXP (addr, 0);
2326 addr_op1 = XEXP (addr, 1);
2328 if (REG_P (addr_op0) && GET_CODE (addr_op1) == PLUS
2329 && REG_P (XEXP (addr_op1, 1)))
2330 pattern = "fl%c.bi\t%%0, %%1";
2331 else if (REG_P (addr_op0) && GET_CODE (addr_op1) == PLUS
2332 && CONST_INT_P (XEXP (addr_op1, 1)))
2333 pattern = "fl%ci.bi\t%%0, %%1";
2334 else
2335 gcc_unreachable ();
2336 break;
2338 case POST_INC:
2339 if (REG_P (XEXP (addr, 0)))
2341 if (dp)
2342 pattern = "fl%ci.bi\t%%0, %%1, 8";
2343 else
2344 pattern = "fl%ci.bi\t%%0, %%1, 4";
2346 else
2347 gcc_unreachable ();
2348 break;
2350 case POST_DEC:
2351 if (REG_P (XEXP (addr, 0)))
2353 if (dp)
2354 pattern = "fl%ci.bi\t%%0, %%1, -8";
2355 else
2356 pattern = "fl%ci.bi\t%%0, %%1, -4";
2358 else
2359 gcc_unreachable ();
2360 break;
2362 default:
2363 gcc_unreachable ();
2366 sprintf (buff, pattern, dp ? 'd' : 's');
2367 output_asm_insn (buff, operands);
2368 return "";
2371 /* output a float store instruction */
2372 const char *
2373 nds32_output_float_store (rtx *operands)
2375 char buff[100];
2376 const char *pattern;
2377 rtx addr, addr_op0, addr_op1;
2378 int dp = GET_MODE_SIZE (GET_MODE (operands[0])) == 8;
2379 addr = XEXP (operands[0], 0);
2380 switch (GET_CODE (addr))
2382 case REG:
2383 pattern = "fs%ci\t%%1, %%0";
2384 break;
2386 case PLUS:
2387 addr_op0 = XEXP (addr, 0);
2388 addr_op1 = XEXP (addr, 1);
2390 if (REG_P (addr_op0) && REG_P (addr_op1))
2391 pattern = "fs%c\t%%1, %%0";
2392 else if (REG_P (addr_op0) && CONST_INT_P (addr_op1))
2393 pattern = "fs%ci\t%%1, %%0";
2394 else if (GET_CODE (addr_op0) == MULT && REG_P (addr_op1)
2395 && REG_P (XEXP (addr_op0, 0))
2396 && CONST_INT_P (XEXP (addr_op0, 1)))
2397 pattern = "fs%c\t%%1, %%0";
2398 else
2399 gcc_unreachable ();
2400 break;
2402 case POST_MODIFY:
2403 addr_op0 = XEXP (addr, 0);
2404 addr_op1 = XEXP (addr, 1);
2406 if (REG_P (addr_op0) && GET_CODE (addr_op1) == PLUS
2407 && REG_P (XEXP (addr_op1, 1)))
2408 pattern = "fs%c.bi\t%%1, %%0";
2409 else if (REG_P (addr_op0) && GET_CODE (addr_op1) == PLUS
2410 && CONST_INT_P (XEXP (addr_op1, 1)))
2411 pattern = "fs%ci.bi\t%%1, %%0";
2412 else
2413 gcc_unreachable ();
2414 break;
2416 case POST_INC:
2417 if (REG_P (XEXP (addr, 0)))
2419 if (dp)
2420 pattern = "fs%ci.bi\t%%1, %%0, 8";
2421 else
2422 pattern = "fs%ci.bi\t%%1, %%0, 4";
2424 else
2425 gcc_unreachable ();
2426 break;
2428 case POST_DEC:
2429 if (REG_P (XEXP (addr, 0)))
2431 if (dp)
2432 pattern = "fs%ci.bi\t%%1, %%0, -8";
2433 else
2434 pattern = "fs%ci.bi\t%%1, %%0, -4";
2436 else
2437 gcc_unreachable ();
2438 break;
2440 default:
2441 gcc_unreachable ();
2444 sprintf (buff, pattern, dp ? 'd' : 's');
2445 output_asm_insn (buff, operands);
2446 return "";
2449 const char *
2450 nds32_output_smw_single_word (rtx *operands)
2452 char buff[100];
2453 unsigned regno;
2454 int enable4;
2455 bool update_base_p;
2456 rtx base_addr = operands[0];
2457 rtx base_reg;
2458 rtx otherops[2];
2460 if (REG_P (XEXP (base_addr, 0)))
2462 update_base_p = false;
2463 base_reg = XEXP (base_addr, 0);
2465 else
2467 update_base_p = true;
2468 base_reg = XEXP (XEXP (base_addr, 0), 0);
2471 const char *update_base = update_base_p ? "m" : "";
2473 regno = REGNO (operands[1]);
2475 otherops[0] = base_reg;
2476 otherops[1] = operands[1];
2478 if (regno >= 28)
2480 enable4 = nds32_regno_to_enable4 (regno);
2481 sprintf (buff, "smw.bi%s\t$sp, [%%0], $sp, %x", update_base, enable4);
2483 else
2485 sprintf (buff, "smw.bi%s\t%%1, [%%0], %%1", update_base);
2487 output_asm_insn (buff, otherops);
2488 return "";
2491 /* ------------------------------------------------------------------------ */
2492 const char *
2493 nds32_output_smw_double_word (rtx *operands)
2495 char buff[100];
2496 unsigned regno;
2497 int enable4;
2498 bool update_base_p;
2499 rtx base_addr = operands[0];
2500 rtx base_reg;
2501 rtx otherops[3];
2503 if (REG_P (XEXP (base_addr, 0)))
2505 update_base_p = false;
2506 base_reg = XEXP (base_addr, 0);
2508 else
2510 update_base_p = true;
2511 base_reg = XEXP (XEXP (base_addr, 0), 0);
2514 const char *update_base = update_base_p ? "m" : "";
2516 regno = REGNO (operands[1]);
2518 otherops[0] = base_reg;
2519 otherops[1] = operands[1];
2520 otherops[2] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);;
2522 if (regno >= 28)
2524 enable4 = nds32_regno_to_enable4 (regno)
2525 | nds32_regno_to_enable4 (regno + 1);
2526 sprintf (buff, "smw.bi%s\t$sp, [%%0], $sp, %x", update_base, enable4);
2528 else if (regno == 27)
2530 enable4 = nds32_regno_to_enable4 (regno + 1);
2531 sprintf (buff, "smw.bi%s\t%%1, [%%0], %%1, %x", update_base, enable4);
2533 else
2535 sprintf (buff, "smw.bi%s\t%%1, [%%0], %%2", update_base);
2537 output_asm_insn (buff, otherops);
2538 return "";
2541 const char *
2542 nds32_output_lmw_single_word (rtx *operands)
2544 char buff[100];
2545 unsigned regno;
2546 bool update_base_p;
2547 int enable4;
2548 rtx base_addr = operands[1];
2549 rtx base_reg;
2550 rtx otherops[2];
2552 if (REG_P (XEXP (base_addr, 0)))
2554 update_base_p = false;
2555 base_reg = XEXP (base_addr, 0);
2557 else
2559 update_base_p = true;
2560 base_reg = XEXP (XEXP (base_addr, 0), 0);
2563 const char *update_base = update_base_p ? "m" : "";
2565 regno = REGNO (operands[0]);
2567 otherops[0] = operands[0];
2568 otherops[1] = base_reg;
2570 if (regno >= 28)
2572 enable4 = nds32_regno_to_enable4 (regno);
2573 sprintf (buff, "lmw.bi%s\t$sp, [%%1], $sp, %x", update_base, enable4);
2575 else
2577 sprintf (buff, "lmw.bi%s\t%%0, [%%1], %%0", update_base);
2579 output_asm_insn (buff, otherops);
2580 return "";
2583 void
2584 nds32_expand_unaligned_load (rtx *operands, enum machine_mode mode)
2586 /* Initial memory offset. */
2587 int offset = WORDS_BIG_ENDIAN ? GET_MODE_SIZE (mode) - 1 : 0;
2588 int offset_adj = WORDS_BIG_ENDIAN ? -1 : 1;
2589 /* Initial register shift byte. */
2590 int shift = 0;
2591 /* The first load byte instruction is not the same. */
2592 int width = GET_MODE_SIZE (mode) - 1;
2593 rtx mem[2];
2594 rtx reg[2];
2595 rtx sub_reg;
2596 rtx temp_reg, temp_sub_reg;
2597 int num_reg;
2599 /* Generating a series of load byte instructions.
2600 The first load byte instructions and other
2601 load byte instructions are not the same. like:
2602 First:
2603 lbi reg0, [mem]
2604 zeh reg0, reg0
2605 Second:
2606 lbi temp_reg, [mem + offset]
2607 sll temp_reg, (8 * shift)
2608 ior reg0, temp_reg
2610 lbi temp_reg, [mem + (offset + 1)]
2611 sll temp_reg, (8 * (shift + 1))
2612 ior reg0, temp_reg */
2614 temp_reg = gen_reg_rtx (SImode);
2615 temp_sub_reg = gen_lowpart (QImode, temp_reg);
2617 if (mode == DImode)
2619 /* Load doubleword, we need two registers to access. */
2620 reg[0] = nds32_di_low_part_subreg (operands[0]);
2621 reg[1] = nds32_di_high_part_subreg (operands[0]);
2622 /* A register only store 4 byte. */
2623 width = GET_MODE_SIZE (SImode) - 1;
2625 else
2627 if (VECTOR_MODE_P (mode))
2628 reg[0] = gen_reg_rtx (SImode);
2629 else
2630 reg[0] = operands[0];
2633 for (num_reg = (mode == DImode) ? 2 : 1; num_reg > 0; num_reg--)
2635 sub_reg = gen_lowpart (QImode, reg[0]);
2636 mem[0] = gen_rtx_MEM (QImode, plus_constant (Pmode, operands[1], offset));
2638 /* Generating the first part instructions.
2639 lbi reg0, [mem]
2640 zeh reg0, reg0 */
2641 emit_move_insn (sub_reg, mem[0]);
2642 emit_insn (gen_zero_extendqisi2 (reg[0], sub_reg));
2644 while (width > 0)
2646 offset = offset + offset_adj;
2647 shift++;
2648 width--;
2650 mem[1] = gen_rtx_MEM (QImode, plus_constant (Pmode,
2651 operands[1],
2652 offset));
2653 /* Generating the second part instructions.
2654 lbi temp_reg, [mem + offset]
2655 sll temp_reg, (8 * shift)
2656 ior reg0, temp_reg */
2657 emit_move_insn (temp_sub_reg, mem[1]);
2658 emit_insn (gen_ashlsi3 (temp_reg, temp_reg,
2659 GEN_INT (shift * 8)));
2660 emit_insn (gen_iorsi3 (reg[0], reg[0], temp_reg));
2663 if (mode == DImode)
2665 /* Using the second register to load memory information. */
2666 reg[0] = reg[1];
2667 shift = 0;
2668 width = GET_MODE_SIZE (SImode) - 1;
2669 offset = offset + offset_adj;
2672 if (VECTOR_MODE_P (mode))
2673 convert_move (operands[0], reg[0], false);
2676 void
2677 nds32_expand_unaligned_store (rtx *operands, enum machine_mode mode)
2679 /* Initial memory offset. */
2680 int offset = WORDS_BIG_ENDIAN ? GET_MODE_SIZE (mode) - 1 : 0;
2681 int offset_adj = WORDS_BIG_ENDIAN ? -1 : 1;
2682 /* Initial register shift byte. */
2683 int shift = 0;
2684 /* The first load byte instruction is not the same. */
2685 int width = GET_MODE_SIZE (mode) - 1;
2686 rtx mem[2];
2687 rtx reg[2];
2688 rtx sub_reg;
2689 rtx temp_reg, temp_sub_reg;
2690 int num_reg;
2692 /* Generating a series of store byte instructions.
2693 The first store byte instructions and other
2694 load byte instructions are not the same. like:
2695 First:
2696 sbi reg0, [mem + 0]
2697 Second:
2698 srli temp_reg, reg0, (8 * shift)
2699 sbi temp_reg, [mem + offset] */
2701 temp_reg = gen_reg_rtx (SImode);
2702 temp_sub_reg = gen_lowpart (QImode, temp_reg);
2704 if (mode == DImode)
2706 /* Load doubleword, we need two registers to access. */
2707 reg[0] = nds32_di_low_part_subreg (operands[1]);
2708 reg[1] = nds32_di_high_part_subreg (operands[1]);
2709 /* A register only store 4 byte. */
2710 width = GET_MODE_SIZE (SImode) - 1;
2712 else
2714 if (VECTOR_MODE_P (mode))
2716 reg[0] = gen_reg_rtx (SImode);
2717 convert_move (reg[0], operands[1], false);
2719 else
2720 reg[0] = operands[1];
2723 for (num_reg = (mode == DImode) ? 2 : 1; num_reg > 0; num_reg--)
2725 sub_reg = gen_lowpart (QImode, reg[0]);
2726 mem[0] = gen_rtx_MEM (QImode, plus_constant (Pmode, operands[0], offset));
2728 /* Generating the first part instructions.
2729 sbi reg0, [mem + 0] */
2730 emit_move_insn (mem[0], sub_reg);
2732 while (width > 0)
2734 offset = offset + offset_adj;
2735 shift++;
2736 width--;
2738 mem[1] = gen_rtx_MEM (QImode, plus_constant (Pmode,
2739 operands[0],
2740 offset));
2741 /* Generating the second part instructions.
2742 srli temp_reg, reg0, (8 * shift)
2743 sbi temp_reg, [mem + offset] */
2744 emit_insn (gen_lshrsi3 (temp_reg, reg[0],
2745 GEN_INT (shift * 8)));
2746 emit_move_insn (mem[1], temp_sub_reg);
2749 if (mode == DImode)
2751 /* Using the second register to load memory information. */
2752 reg[0] = reg[1];
2753 shift = 0;
2754 width = GET_MODE_SIZE (SImode) - 1;
2755 offset = offset + offset_adj;
2760 /* Using multiple load/store instruction to output doubleword instruction. */
2761 const char *
2762 nds32_output_double (rtx *operands, bool load_p)
2764 char pattern[100];
2765 int reg = load_p ? 0 : 1;
2766 int mem = load_p ? 1 : 0;
2767 rtx otherops[3];
2768 rtx addr = XEXP (operands[mem], 0);
2770 otherops[0] = gen_rtx_REG (SImode, REGNO (operands[reg]));
2771 otherops[1] = gen_rtx_REG (SImode, REGNO (operands[reg]) + 1);
2773 if (GET_CODE (addr) == POST_INC)
2775 /* (mem (post_inc (reg))) */
2776 otherops[2] = XEXP (addr, 0);
2777 snprintf (pattern, sizeof (pattern),
2778 "%cmw.bim\t%%0, [%%2], %%1, 0", load_p ? 'l' : 's');
2780 else
2782 /* (mem (reg)) */
2783 otherops[2] = addr;
2784 snprintf (pattern, sizeof (pattern),
2785 "%cmw.bi\t%%0, [%%2], %%1, 0", load_p ? 'l' : 's');
2789 output_asm_insn (pattern, otherops);
2790 return "";
2793 const char *
2794 nds32_output_cbranchsi4_equality_zero (rtx_insn *insn, rtx *operands)
2796 enum rtx_code code;
2797 bool long_jump_p = false;
2799 code = GET_CODE (operands[0]);
2801 /* This zero-comparison conditional branch has two forms:
2802 32-bit instruction => beqz/bnez imm16s << 1
2803 16-bit instruction => beqzs8/bnezs8/beqz38/bnez38 imm8s << 1
2805 For 32-bit case,
2806 we assume it is always reachable. (but check range -65500 ~ 65500)
2808 For 16-bit case,
2809 it must satisfy { 255 >= (label - pc) >= -256 } condition.
2810 However, since the $pc for nds32 is at the beginning of the instruction,
2811 we should leave some length space for current insn.
2812 So we use range -250 ~ 250. */
2814 switch (get_attr_length (insn))
2816 case 8:
2817 long_jump_p = true;
2818 /* fall through */
2819 case 2:
2820 if (which_alternative == 0)
2822 /* constraint: t */
2823 /* b<cond>zs8 .L0
2825 b<inverse_cond>zs8 .LCB0
2826 j .L0
2827 .LCB0:
2829 output_cond_branch_compare_zero (code, "s8", long_jump_p,
2830 operands, true);
2831 return "";
2833 else if (which_alternative == 1)
2835 /* constraint: l */
2836 /* b<cond>z38 $r0, .L0
2838 b<inverse_cond>z38 $r0, .LCB0
2839 j .L0
2840 .LCB0:
2842 output_cond_branch_compare_zero (code, "38", long_jump_p,
2843 operands, false);
2844 return "";
2846 else
2848 /* constraint: r */
2849 /* For which_alternative==2, it should not be here. */
2850 gcc_unreachable ();
2852 case 10:
2853 /* including constraints: t, l, and r */
2854 long_jump_p = true;
2855 /* fall through */
2856 case 4:
2857 /* including constraints: t, l, and r */
2858 output_cond_branch_compare_zero (code, "", long_jump_p, operands, false);
2859 return "";
2861 default:
2862 gcc_unreachable ();
2866 const char *
2867 nds32_output_cbranchsi4_equality_reg (rtx_insn *insn, rtx *operands)
2869 enum rtx_code code;
2870 bool long_jump_p, r5_p;
2871 int insn_length;
2873 insn_length = get_attr_length (insn);
2875 long_jump_p = (insn_length == 10 || insn_length == 8) ? true : false;
2876 r5_p = (insn_length == 2 || insn_length == 8) ? true : false;
2878 code = GET_CODE (operands[0]);
2880 /* This register-comparison conditional branch has one form:
2881 32-bit instruction => beq/bne imm14s << 1
2883 For 32-bit case,
2884 we assume it is always reachable. (but check range -16350 ~ 16350). */
2886 switch (code)
2888 case EQ:
2889 case NE:
2890 output_cond_branch (code, "", r5_p, long_jump_p, operands);
2891 return "";
2893 default:
2894 gcc_unreachable ();
2898 const char *
2899 nds32_output_cbranchsi4_equality_reg_or_const_int (rtx_insn *insn,
2900 rtx *operands)
2902 enum rtx_code code;
2903 bool long_jump_p, r5_p;
2904 int insn_length;
2906 insn_length = get_attr_length (insn);
2908 long_jump_p = (insn_length == 10 || insn_length == 8) ? true : false;
2909 r5_p = (insn_length == 2 || insn_length == 8) ? true : false;
2911 code = GET_CODE (operands[0]);
2913 /* This register-comparison conditional branch has one form:
2914 32-bit instruction => beq/bne imm14s << 1
2915 32-bit instruction => beqc/bnec imm8s << 1
2917 For 32-bit case, we assume it is always reachable.
2918 (but check range -16350 ~ 16350 and -250 ~ 250). */
2920 switch (code)
2922 case EQ:
2923 case NE:
2924 if (which_alternative == 2)
2926 /* r, Is11 */
2927 /* b<cond>c */
2928 output_cond_branch (code, "c", r5_p, long_jump_p, operands);
2930 else
2932 /* r, r */
2933 /* v, r */
2934 output_cond_branch (code, "", r5_p, long_jump_p, operands);
2936 return "";
2937 default:
2938 gcc_unreachable ();
2942 const char *
2943 nds32_output_cbranchsi4_greater_less_zero (rtx_insn *insn, rtx *operands)
2945 enum rtx_code code;
2946 bool long_jump_p;
2947 int insn_length;
2949 insn_length = get_attr_length (insn);
2951 gcc_assert (insn_length == 4 || insn_length == 10);
2953 long_jump_p = (insn_length == 10) ? true : false;
2955 code = GET_CODE (operands[0]);
2957 /* This zero-greater-less-comparison conditional branch has one form:
2958 32-bit instruction => bgtz/bgez/bltz/blez imm16s << 1
2960 For 32-bit case, we assume it is always reachable.
2961 (but check range -65500 ~ 65500). */
2963 switch (code)
2965 case GT:
2966 case GE:
2967 case LT:
2968 case LE:
2969 output_cond_branch_compare_zero (code, "", long_jump_p, operands, false);
2970 break;
2971 default:
2972 gcc_unreachable ();
2974 return "";
2977 const char *
2978 nds32_output_unpkd8 (rtx output, rtx input,
2979 rtx high_idx_rtx, rtx low_idx_rtx,
2980 bool signed_p)
2982 char pattern[100];
2983 rtx output_operands[2];
2984 HOST_WIDE_INT high_idx, low_idx;
2985 high_idx = INTVAL (high_idx_rtx);
2986 low_idx = INTVAL (low_idx_rtx);
2988 gcc_assert (high_idx >= 0 && high_idx <= 3);
2989 gcc_assert (low_idx >= 0 && low_idx <= 3);
2991 /* We only have 10, 20, 30 and 31. */
2992 if ((low_idx != 0 || high_idx == 0) &&
2993 !(low_idx == 1 && high_idx == 3))
2994 return "#";
2996 char sign_char = signed_p ? 's' : 'z';
2998 sprintf (pattern,
2999 "%cunpkd8" HOST_WIDE_INT_PRINT_DEC HOST_WIDE_INT_PRINT_DEC "\t%%0, %%1",
3000 sign_char, high_idx, low_idx);
3001 output_operands[0] = output;
3002 output_operands[1] = input;
3003 output_asm_insn (pattern, output_operands);
3004 return "";
3007 /* Return true if SYMBOL_REF X binds locally. */
3009 static bool
3010 nds32_symbol_binds_local_p (const_rtx x)
3012 return (SYMBOL_REF_DECL (x)
3013 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
3014 : SYMBOL_REF_LOCAL_P (x));
3017 const char *
3018 nds32_output_call (rtx insn, rtx *operands, rtx symbol, const char *long_call,
3019 const char *call, bool align_p)
3021 char pattern[100];
3022 bool noreturn_p;
3024 if (nds32_long_call_p (symbol))
3025 strcpy (pattern, long_call);
3026 else
3027 strcpy (pattern, call);
3029 if (flag_pic && CONSTANT_P (symbol)
3030 && !nds32_symbol_binds_local_p (symbol))
3031 strcat (pattern, "@PLT");
3033 if (align_p)
3034 strcat (pattern, "\n\t.align 2");
3036 noreturn_p = find_reg_note (insn, REG_NORETURN, NULL_RTX) != NULL_RTX;
3038 if (noreturn_p)
3040 if (TARGET_16_BIT)
3041 strcat (pattern, "\n\tnop16");
3042 else
3043 strcat (pattern, "\n\tnop");
3046 output_asm_insn (pattern, operands);
3047 return "";
3050 bool
3051 nds32_need_split_sms_p (rtx in0_idx0, rtx in1_idx0,
3052 rtx in0_idx1, rtx in1_idx1)
3054 /* smds or smdrs. */
3055 if (INTVAL (in0_idx0) == INTVAL (in1_idx0)
3056 && INTVAL (in0_idx1) == INTVAL (in1_idx1)
3057 && INTVAL (in0_idx0) != INTVAL (in0_idx1))
3058 return false;
3060 /* smxds. */
3061 if (INTVAL (in0_idx0) != INTVAL (in0_idx1)
3062 && INTVAL (in1_idx0) != INTVAL (in1_idx1))
3063 return false;
3065 return true;
3068 const char *
3069 nds32_output_sms (rtx in0_idx0, rtx in1_idx0,
3070 rtx in0_idx1, rtx in1_idx1)
3072 if (nds32_need_split_sms_p (in0_idx0, in1_idx0,
3073 in0_idx1, in1_idx1))
3074 return "#";
3075 /* out = in0[in0_idx0] * in1[in1_idx0] - in0[in0_idx1] * in1[in1_idx1] */
3077 /* smds or smdrs. */
3078 if (INTVAL (in0_idx0) == INTVAL (in1_idx0)
3079 && INTVAL (in0_idx1) == INTVAL (in1_idx1)
3080 && INTVAL (in0_idx0) != INTVAL (in0_idx1))
3082 if (INTVAL (in0_idx0) == 0)
3084 if (TARGET_BIG_ENDIAN)
3085 return "smds\t%0, %1, %2";
3086 else
3087 return "smdrs\t%0, %1, %2";
3089 else
3091 if (TARGET_BIG_ENDIAN)
3092 return "smdrs\t%0, %1, %2";
3093 else
3094 return "smds\t%0, %1, %2";
3098 if (INTVAL (in0_idx0) != INTVAL (in0_idx1)
3099 && INTVAL (in1_idx0) != INTVAL (in1_idx1))
3101 if (INTVAL (in0_idx0) == 1)
3103 if (TARGET_BIG_ENDIAN)
3104 return "smxds\t%0, %2, %1";
3105 else
3106 return "smxds\t%0, %1, %2";
3108 else
3110 if (TARGET_BIG_ENDIAN)
3111 return "smxds\t%0, %1, %2";
3112 else
3113 return "smxds\t%0, %2, %1";
3117 gcc_unreachable ();
3118 return "";
3121 void
3122 nds32_split_sms (rtx out, rtx in0, rtx in1,
3123 rtx in0_idx0, rtx in1_idx0,
3124 rtx in0_idx1, rtx in1_idx1)
3126 rtx result0 = gen_reg_rtx (SImode);
3127 rtx result1 = gen_reg_rtx (SImode);
3128 emit_insn (gen_mulhisi3v (result0, in0, in1,
3129 in0_idx0, in1_idx0));
3130 emit_insn (gen_mulhisi3v (result1, in0, in1,
3131 in0_idx1, in1_idx1));
3132 emit_insn (gen_subsi3 (out, result0, result1));
3135 /* Spilt a doubleword instrucion to two single word instructions. */
3136 void
3137 nds32_spilt_doubleword (rtx *operands, bool load_p)
3139 int reg = load_p ? 0 : 1;
3140 int mem = load_p ? 1 : 0;
3141 rtx reg_rtx = load_p ? operands[0] : operands[1];
3142 rtx mem_rtx = load_p ? operands[1] : operands[0];
3143 rtx low_part[2], high_part[2];
3144 rtx sub_mem = XEXP (mem_rtx, 0);
3146 /* Generate low_part and high_part register pattern.
3147 i.e. register pattern like:
3148 (reg:DI) -> (subreg:SI (reg:DI))
3149 (subreg:SI (reg:DI)) */
3150 low_part[reg] = simplify_gen_subreg (SImode, reg_rtx, GET_MODE (reg_rtx), 0);
3151 high_part[reg] = simplify_gen_subreg (SImode, reg_rtx, GET_MODE (reg_rtx), 4);
3153 /* Generate low_part and high_part memory pattern.
3154 Memory format is (post_dec) will generate:
3155 low_part: lwi.bi reg, [mem], 4
3156 high_part: lwi.bi reg, [mem], -12 */
3157 if (GET_CODE (sub_mem) == POST_DEC)
3159 /* memory format is (post_dec (reg)),
3160 so that extract (reg) from the (post_dec (reg)) pattern. */
3161 sub_mem = XEXP (sub_mem, 0);
3163 /* generate low_part and high_part memory format:
3164 low_part: (post_modify ((reg) (plus (reg) (const 4)))
3165 high_part: (post_modify ((reg) (plus (reg) (const -12))) */
3166 low_part[mem] = gen_frame_mem (SImode,
3167 gen_rtx_POST_MODIFY (Pmode, sub_mem,
3168 gen_rtx_PLUS (Pmode,
3169 sub_mem,
3170 GEN_INT (4))));
3171 high_part[mem] = gen_frame_mem (SImode,
3172 gen_rtx_POST_MODIFY (Pmode, sub_mem,
3173 gen_rtx_PLUS (Pmode,
3174 sub_mem,
3175 GEN_INT (-12))));
3177 else if (GET_CODE (sub_mem) == POST_MODIFY)
3179 /* Memory format is (post_modify (reg) (plus (reg) (const))),
3180 so that extract (reg) from the post_modify pattern. */
3181 rtx post_mem = XEXP (sub_mem, 0);
3183 /* Extract (const) from the (post_modify (reg) (plus (reg) (const)))
3184 pattern. */
3186 rtx plus_op = XEXP (sub_mem, 1);
3187 rtx post_val = XEXP (plus_op, 1);
3189 /* Generate low_part and high_part memory format:
3190 low_part: (post_modify ((reg) (plus (reg) (const)))
3191 high_part: ((plus (reg) (const 4))) */
3192 low_part[mem] = gen_frame_mem (SImode,
3193 gen_rtx_POST_MODIFY (Pmode, post_mem,
3194 gen_rtx_PLUS (Pmode,
3195 post_mem,
3196 post_val)));
3197 high_part[mem] = gen_frame_mem (SImode, plus_constant (Pmode,
3198 post_mem,
3199 4));
3201 else
3203 /* memory format: (symbol_ref), (const), (reg + const_int). */
3204 low_part[mem] = adjust_address (mem_rtx, SImode, 0);
3205 high_part[mem] = adjust_address (mem_rtx, SImode, 4);
3208 /* After reload completed, we have dependent issue by low part register and
3209 higt part memory. i.e. we cannot split a sequence
3210 like:
3211 load $r0, [%r1]
3212 spilt to
3213 lw $r0, [%r0]
3214 lwi $r1, [%r0 + 4]
3215 swap position
3216 lwi $r1, [%r0 + 4]
3217 lw $r0, [%r0]
3218 For store instruction we don't have a problem.
3220 When memory format is [post_modify], we need to emit high part instruction,
3221 before low part instruction.
3222 expamle:
3223 load $r0, [%r2], post_val
3224 spilt to
3225 load $r1, [%r2 + 4]
3226 load $r0, [$r2], post_val. */
3227 if ((load_p && reg_overlap_mentioned_p (low_part[0], high_part[1]))
3228 || GET_CODE (sub_mem) == POST_MODIFY)
3230 operands[2] = high_part[0];
3231 operands[3] = high_part[1];
3232 operands[4] = low_part[0];
3233 operands[5] = low_part[1];
3235 else
3237 operands[2] = low_part[0];
3238 operands[3] = low_part[1];
3239 operands[4] = high_part[0];
3240 operands[5] = high_part[1];
3244 void
3245 nds32_split_ashiftdi3 (rtx dst, rtx src, rtx shiftamount)
3247 rtx src_high_part, src_low_part;
3248 rtx dst_high_part, dst_low_part;
3250 dst_high_part = nds32_di_high_part_subreg (dst);
3251 dst_low_part = nds32_di_low_part_subreg (dst);
3253 src_high_part = nds32_di_high_part_subreg (src);
3254 src_low_part = nds32_di_low_part_subreg (src);
3256 /* We need to handle shift more than 32 bit!!!! */
3257 if (CONST_INT_P (shiftamount))
3259 if (INTVAL (shiftamount) < 32)
3261 rtx ext_start;
3262 ext_start = gen_int_mode(32 - INTVAL (shiftamount), SImode);
3264 emit_insn (gen_wext (dst_high_part, src, ext_start));
3265 emit_insn (gen_ashlsi3 (dst_low_part, src_low_part, shiftamount));
3267 else
3269 rtx new_shift_amout = gen_int_mode(INTVAL (shiftamount) - 32, SImode);
3271 emit_insn (gen_ashlsi3 (dst_high_part, src_low_part,
3272 new_shift_amout));
3274 emit_move_insn (dst_low_part, GEN_INT (0));
3277 else
3279 rtx dst_low_part_l32, dst_high_part_l32;
3280 rtx dst_low_part_g32, dst_high_part_g32;
3281 rtx new_shift_amout, select_reg;
3282 dst_low_part_l32 = gen_reg_rtx (SImode);
3283 dst_high_part_l32 = gen_reg_rtx (SImode);
3284 dst_low_part_g32 = gen_reg_rtx (SImode);
3285 dst_high_part_g32 = gen_reg_rtx (SImode);
3286 new_shift_amout = gen_reg_rtx (SImode);
3287 select_reg = gen_reg_rtx (SImode);
3289 rtx ext_start;
3290 ext_start = gen_reg_rtx (SImode);
3293 if (shiftamount < 32)
3294 dst_low_part = src_low_part << shiftamout
3295 dst_high_part = wext (src, 32 - shiftamount)
3296 # wext can't handle wext (src, 32) since it's only take rb[0:4]
3297 # for extract.
3298 dst_high_part = shiftamount == 0 ? src_high_part : dst_high_part
3299 else
3300 dst_low_part = 0
3301 dst_high_part = src_low_part << shiftamount & 0x1f
3304 emit_insn (gen_subsi3 (ext_start,
3305 gen_int_mode (32, SImode),
3306 shiftamount));
3307 emit_insn (gen_wext (dst_high_part_l32, src, ext_start));
3309 /* Handle for shiftamout == 0. */
3310 emit_insn (gen_cmovzsi (dst_high_part_l32, shiftamount,
3311 src_high_part, dst_high_part_l32));
3313 emit_insn (gen_ashlsi3 (dst_low_part_l32, src_low_part, shiftamount));
3315 emit_move_insn (dst_low_part_g32, const0_rtx);
3316 emit_insn (gen_andsi3 (new_shift_amout, shiftamount, GEN_INT (0x1f)));
3317 emit_insn (gen_ashlsi3 (dst_high_part_g32, src_low_part,
3318 new_shift_amout));
3320 emit_insn (gen_slt_compare (select_reg, shiftamount, GEN_INT (32)));
3322 emit_insn (gen_cmovnsi (dst_low_part, select_reg,
3323 dst_low_part_l32, dst_low_part_g32));
3324 emit_insn (gen_cmovnsi (dst_high_part, select_reg,
3325 dst_high_part_l32, dst_high_part_g32));
3329 void
3330 nds32_split_ashiftrtdi3 (rtx dst, rtx src, rtx shiftamount)
3332 nds32_split_shiftrtdi3 (dst, src, shiftamount, false);
3335 void
3336 nds32_split_lshiftrtdi3 (rtx dst, rtx src, rtx shiftamount)
3338 nds32_split_shiftrtdi3 (dst, src, shiftamount, true);
3341 void
3342 nds32_split_rotatertdi3 (rtx dst, rtx src, rtx shiftamount)
3344 rtx dst_low_part_l32, dst_high_part_l32;
3345 rtx dst_low_part_g32, dst_high_part_g32;
3346 rtx select_reg, low5bit, low5bit_inv, minus32sa;
3347 rtx dst_low_part_g32_tmph;
3348 rtx dst_low_part_g32_tmpl;
3349 rtx dst_high_part_l32_tmph;
3350 rtx dst_high_part_l32_tmpl;
3352 rtx src_low_part, src_high_part;
3353 rtx dst_high_part, dst_low_part;
3355 shiftamount = force_reg (SImode, shiftamount);
3357 emit_insn (gen_andsi3 (shiftamount,
3358 shiftamount,
3359 gen_int_mode (0x3f, SImode)));
3361 dst_high_part = nds32_di_high_part_subreg (dst);
3362 dst_low_part = nds32_di_low_part_subreg (dst);
3364 src_high_part = nds32_di_high_part_subreg (src);
3365 src_low_part = nds32_di_low_part_subreg (src);
3367 dst_low_part_l32 = gen_reg_rtx (SImode);
3368 dst_high_part_l32 = gen_reg_rtx (SImode);
3369 dst_low_part_g32 = gen_reg_rtx (SImode);
3370 dst_high_part_g32 = gen_reg_rtx (SImode);
3371 low5bit = gen_reg_rtx (SImode);
3372 low5bit_inv = gen_reg_rtx (SImode);
3373 minus32sa = gen_reg_rtx (SImode);
3374 select_reg = gen_reg_rtx (SImode);
3376 dst_low_part_g32_tmph = gen_reg_rtx (SImode);
3377 dst_low_part_g32_tmpl = gen_reg_rtx (SImode);
3379 dst_high_part_l32_tmph = gen_reg_rtx (SImode);
3380 dst_high_part_l32_tmpl = gen_reg_rtx (SImode);
3382 emit_insn (gen_slt_compare (select_reg, shiftamount, GEN_INT (32)));
3384 /* if shiftamount < 32
3385 dst_low_part = wext(src, shiftamount)
3386 else
3387 dst_low_part = ((src_high_part >> (shiftamount & 0x1f))
3388 | (src_low_part << (32 - (shiftamount & 0x1f))))
3390 emit_insn (gen_andsi3 (low5bit, shiftamount, gen_int_mode (0x1f, SImode)));
3391 emit_insn (gen_subsi3 (low5bit_inv, gen_int_mode (32, SImode), low5bit));
3393 emit_insn (gen_wext (dst_low_part_l32, src, shiftamount));
3395 emit_insn (gen_lshrsi3 (dst_low_part_g32_tmpl, src_high_part, low5bit));
3396 emit_insn (gen_ashlsi3 (dst_low_part_g32_tmph, src_low_part, low5bit_inv));
3398 emit_insn (gen_iorsi3 (dst_low_part_g32,
3399 dst_low_part_g32_tmpl,
3400 dst_low_part_g32_tmph));
3402 emit_insn (gen_cmovnsi (dst_low_part, select_reg,
3403 dst_low_part_l32, dst_low_part_g32));
3405 /* if shiftamount < 32
3406 dst_high_part = ((src_high_part >> shiftamount)
3407 | (src_low_part << (32 - shiftamount)))
3408 dst_high_part = shiftamount == 0 ? src_high_part : dst_high_part
3409 else
3410 dst_high_part = wext(src, shiftamount & 0x1f)
3413 emit_insn (gen_subsi3 (minus32sa, gen_int_mode (32, SImode), shiftamount));
3415 emit_insn (gen_lshrsi3 (dst_high_part_l32_tmpl, src_high_part, shiftamount));
3416 emit_insn (gen_ashlsi3 (dst_high_part_l32_tmph, src_low_part, minus32sa));
3418 emit_insn (gen_iorsi3 (dst_high_part_l32,
3419 dst_high_part_l32_tmpl,
3420 dst_high_part_l32_tmph));
3422 emit_insn (gen_cmovzsi (dst_high_part_l32, shiftamount,
3423 src_high_part, dst_high_part_l32));
3425 emit_insn (gen_wext (dst_high_part_g32, src, low5bit));
3427 emit_insn (gen_cmovnsi (dst_high_part, select_reg,
3428 dst_high_part_l32, dst_high_part_g32));
3431 /* Return true if OP contains a symbol reference. */
3432 bool
3433 symbolic_reference_mentioned_p (rtx op)
3435 const char *fmt;
3436 int i;
3438 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3439 return true;
3441 fmt = GET_RTX_FORMAT (GET_CODE (op));
3442 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3444 if (fmt[i] == 'E')
3446 int j;
3448 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3449 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3450 return true;
3453 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3454 return true;
3457 return false;
3460 /* Expand PIC code for @GOTOFF and @GOT.
3462 Example for @GOTOFF:
3464 la $r0, symbol@GOTOFF
3465 -> sethi $ta, hi20(symbol@GOTOFF)
3466 ori $ta, $ta, lo12(symbol@GOTOFF)
3467 add $r0, $ta, $gp
3469 Example for @GOT:
3471 la $r0, symbol@GOT
3472 -> sethi $ta, hi20(symbol@GOT)
3473 ori $ta, $ta, lo12(symbol@GOT)
3474 lw $r0, [$ta + $gp]
3477 nds32_legitimize_pic_address (rtx x)
3479 rtx addr = x;
3480 rtx reg = gen_reg_rtx (Pmode);
3481 rtx pat;
3483 if (GET_CODE (x) == LABEL_REF
3484 || (GET_CODE (x) == SYMBOL_REF
3485 && (CONSTANT_POOL_ADDRESS_P (x)
3486 || SYMBOL_REF_LOCAL_P (x))))
3488 addr = gen_rtx_UNSPEC (SImode, gen_rtvec (1, x), UNSPEC_GOTOFF);
3489 addr = gen_rtx_CONST (SImode, addr);
3490 emit_insn (gen_sethi (reg, addr));
3491 emit_insn (gen_lo_sum (reg, reg, addr));
3492 x = gen_rtx_PLUS (Pmode, reg, pic_offset_table_rtx);
3494 else if (GET_CODE (x) == SYMBOL_REF)
3496 addr = gen_rtx_UNSPEC (SImode, gen_rtvec (1, x), UNSPEC_GOT);
3497 addr = gen_rtx_CONST (SImode, addr);
3498 emit_insn (gen_sethi (reg, addr));
3499 emit_insn (gen_lo_sum (reg, reg, addr));
3501 x = gen_const_mem (SImode, gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3502 reg));
3504 else if (GET_CODE (x) == CONST)
3506 /* We don't split constant in expand_pic_move because GOTOFF can combine
3507 the addend with the symbol. */
3508 addr = XEXP (x, 0);
3509 gcc_assert (GET_CODE (addr) == PLUS);
3511 rtx op0 = XEXP (addr, 0);
3512 rtx op1 = XEXP (addr, 1);
3514 if ((GET_CODE (op0) == LABEL_REF
3515 || (GET_CODE (op0) == SYMBOL_REF
3516 && (CONSTANT_POOL_ADDRESS_P (op0)
3517 || SYMBOL_REF_LOCAL_P (op0))))
3518 && GET_CODE (op1) == CONST_INT)
3520 pat = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0), UNSPEC_GOTOFF);
3521 pat = gen_rtx_PLUS (Pmode, pat, op1);
3522 pat = gen_rtx_CONST (Pmode, pat);
3523 emit_insn (gen_sethi (reg, pat));
3524 emit_insn (gen_lo_sum (reg, reg, pat));
3525 x = gen_rtx_PLUS (Pmode, reg, pic_offset_table_rtx);
3527 else if (GET_CODE (op0) == SYMBOL_REF
3528 && GET_CODE (op1) == CONST_INT)
3530 /* This is a constant offset from a @GOT symbol reference. */
3531 addr = gen_rtx_UNSPEC (SImode, gen_rtvec (1, op0), UNSPEC_GOT);
3532 addr = gen_rtx_CONST (SImode, addr);
3533 emit_insn (gen_sethi (reg, addr));
3534 emit_insn (gen_lo_sum (reg, reg, addr));
3535 addr = gen_const_mem (SImode, gen_rtx_PLUS (Pmode,
3536 pic_offset_table_rtx,
3537 reg));
3538 emit_move_insn (reg, addr);
3539 if (satisfies_constraint_Is15 (op1))
3540 x = gen_rtx_PLUS (Pmode, reg, op1);
3541 else
3543 rtx tmp_reg = gen_reg_rtx (SImode);
3544 emit_insn (gen_movsi (tmp_reg, op1));
3545 x = gen_rtx_PLUS (Pmode, reg, tmp_reg);
3548 else
3550 /* Don't handle this pattern. */
3551 debug_rtx (x);
3552 gcc_unreachable ();
3555 return x;
3558 void
3559 nds32_expand_pic_move (rtx *operands)
3561 rtx src;
3563 src = nds32_legitimize_pic_address (operands[1]);
3564 emit_move_insn (operands[0], src);
3567 /* Expand ICT symbol.
3568 Example for @ICT and ICT model=large:
3570 la $r0, symbol@ICT
3571 -> sethi $rt, hi20(symbol@ICT)
3572 lwi $r0, [$rt + lo12(symbol@ICT)]
3576 nds32_legitimize_ict_address (rtx x)
3578 rtx symbol = x;
3579 rtx addr = x;
3580 rtx reg = gen_reg_rtx (Pmode);
3581 gcc_assert (GET_CODE (x) == SYMBOL_REF
3582 && nds32_indirect_call_referenced_p (x));
3584 addr = gen_rtx_UNSPEC (SImode, gen_rtvec (1, symbol), UNSPEC_ICT);
3585 addr = gen_rtx_CONST (SImode, addr);
3586 emit_insn (gen_sethi (reg, addr));
3588 x = gen_const_mem (SImode, gen_rtx_LO_SUM (Pmode, reg, addr));
3590 return x;
3593 void
3594 nds32_expand_ict_move (rtx *operands)
3596 rtx src = operands[1];
3598 src = nds32_legitimize_ict_address (src);
3600 emit_move_insn (operands[0], src);
3603 /* Return true X is a indirect call symbol. */
3604 bool
3605 nds32_indirect_call_referenced_p (rtx x)
3607 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_ICT)
3608 x = XVECEXP (x, 0, 0);
3610 if (GET_CODE (x) == SYMBOL_REF)
3612 tree decl = SYMBOL_REF_DECL (x);
3614 return decl
3615 && (lookup_attribute("indirect_call",
3616 DECL_ATTRIBUTES(decl))
3617 != NULL);
3620 return false;
3623 /* Return true X is need use long call. */
3624 bool
3625 nds32_long_call_p (rtx symbol)
3627 if (nds32_indirect_call_referenced_p (symbol))
3628 return TARGET_ICT_MODEL_LARGE;
3629 else
3630 return TARGET_CMODEL_LARGE;
3633 /* Return true if X contains a thread-local symbol. */
3634 bool
3635 nds32_tls_referenced_p (rtx x)
3637 if (!targetm.have_tls)
3638 return false;
3640 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3641 x = XEXP (XEXP (x, 0), 0);
3643 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3644 return true;
3646 return false;
3649 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3650 this (thread-local) address. */
3652 nds32_legitimize_tls_address (rtx x)
3654 rtx tmp_reg;
3655 rtx tp_reg = gen_rtx_REG (Pmode, TP_REGNUM);
3656 rtx pat, insns, reg0;
3658 if (GET_CODE (x) == SYMBOL_REF)
3659 switch (SYMBOL_REF_TLS_MODEL (x))
3661 case TLS_MODEL_GLOBAL_DYNAMIC:
3662 case TLS_MODEL_LOCAL_DYNAMIC:
3663 /* Emit UNSPEC_TLS_DESC rather than expand rtl directly because spill
3664 may destroy the define-use chain anylysis to insert relax_hint. */
3665 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC)
3666 pat = gen_rtx_UNSPEC (SImode, gen_rtvec (1, x), UNSPEC_TLSGD);
3667 else
3668 pat = gen_rtx_UNSPEC (SImode, gen_rtvec (1, x), UNSPEC_TLSLD);
3670 pat = gen_rtx_CONST (SImode, pat);
3671 reg0 = gen_rtx_REG (Pmode, 0);
3672 /* If we can confirm all clobber reigsters, it doesn't have to use call
3673 instruction. */
3674 insns = emit_call_insn (gen_tls_desc (pat, GEN_INT (0)));
3675 use_reg (&CALL_INSN_FUNCTION_USAGE (insns), pic_offset_table_rtx);
3676 RTL_CONST_CALL_P (insns) = 1;
3677 tmp_reg = gen_reg_rtx (SImode);
3678 emit_move_insn (tmp_reg, reg0);
3679 x = tmp_reg;
3680 break;
3682 case TLS_MODEL_INITIAL_EXEC:
3683 pat = gen_rtx_UNSPEC (SImode, gen_rtvec (1, x), UNSPEC_TLSIE);
3684 tmp_reg = gen_reg_rtx (SImode);
3685 pat = gen_rtx_CONST (SImode, pat);
3686 emit_insn (gen_tls_ie (tmp_reg, pat, GEN_INT (0)));
3687 if (flag_pic)
3688 emit_use (pic_offset_table_rtx);
3689 x = gen_rtx_PLUS (Pmode, tmp_reg, tp_reg);
3690 break;
3692 case TLS_MODEL_LOCAL_EXEC:
3693 /* Expand symbol_ref@TPOFF':
3694 sethi $ta, hi20(symbol_ref@TPOFF)
3695 ori $ta, $ta, lo12(symbol_ref@TPOFF)
3696 add $r0, $ta, $tp */
3697 tmp_reg = gen_reg_rtx (SImode);
3698 pat = gen_rtx_UNSPEC (SImode, gen_rtvec (1, x), UNSPEC_TLSLE);
3699 pat = gen_rtx_CONST (SImode, pat);
3700 emit_insn (gen_sethi (tmp_reg, pat));
3701 emit_insn (gen_lo_sum (tmp_reg, tmp_reg, pat));
3702 x = gen_rtx_PLUS (Pmode, tmp_reg, tp_reg);
3703 break;
3705 default:
3706 gcc_unreachable ();
3708 else if (GET_CODE (x) == CONST)
3710 rtx base, addend;
3711 split_const (x, &base, &addend);
3713 if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_EXEC)
3715 /* Expand symbol_ref@TPOFF':
3716 sethi $ta, hi20(symbol_ref@TPOFF + addend)
3717 ori $ta, $ta, lo12(symbol_ref@TPOFF + addend)
3718 add $r0, $ta, $tp */
3719 tmp_reg = gen_reg_rtx (SImode);
3720 pat = gen_rtx_UNSPEC (SImode, gen_rtvec (1, base), UNSPEC_TLSLE);
3721 pat = gen_rtx_PLUS (SImode, pat, addend);
3722 pat = gen_rtx_CONST (SImode, pat);
3723 emit_insn (gen_sethi (tmp_reg, pat));
3724 emit_insn (gen_lo_sum (tmp_reg, tmp_reg, pat));
3725 x = gen_rtx_PLUS (Pmode, tmp_reg, tp_reg);
3729 return x;
3732 void
3733 nds32_expand_tls_move (rtx *operands)
3735 rtx src = operands[1];
3736 rtx base, addend;
3738 if (CONSTANT_P (src))
3739 split_const (src, &base, &addend);
3741 if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_EXEC)
3742 src = nds32_legitimize_tls_address (src);
3743 else
3745 src = nds32_legitimize_tls_address (base);
3746 if (addend != const0_rtx)
3748 src = gen_rtx_PLUS (SImode, src, addend);
3749 src = force_operand (src, operands[0]);
3753 emit_move_insn (operands[0], src);
3756 void
3757 nds32_expand_constant (machine_mode mode, HOST_WIDE_INT val,
3758 rtx target, rtx source)
3760 rtx temp = gen_reg_rtx (mode);
3761 int clear_sign_bit_copies = 0;
3762 int clear_zero_bit_copies = 0;
3763 unsigned HOST_WIDE_INT remainder = val & 0xffffffffUL;
3765 /* Count number of leading zeros. */
3766 clear_sign_bit_copies = __builtin_clz (remainder);
3767 /* Count number of trailing zeros. */
3768 clear_zero_bit_copies = __builtin_ctz (remainder);
3770 HOST_WIDE_INT sign_shift_mask = ((0xffffffffUL
3771 << (32 - clear_sign_bit_copies))
3772 & 0xffffffffUL);
3773 HOST_WIDE_INT zero_shift_mask = (1 << clear_zero_bit_copies) - 1;
3775 if (clear_sign_bit_copies > 0 && clear_sign_bit_copies < 17
3776 && (remainder | sign_shift_mask) == 0xffffffffUL)
3778 /* Transfer AND to two shifts, example:
3779 a = b & 0x7fffffff => (b << 1) >> 1 */
3780 rtx shift = GEN_INT (clear_sign_bit_copies);
3782 emit_insn (gen_ashlsi3 (temp, source, shift));
3783 emit_insn (gen_lshrsi3 (target, temp, shift));
3785 else if (clear_zero_bit_copies > 0 && clear_sign_bit_copies < 17
3786 && (remainder | zero_shift_mask) == 0xffffffffUL)
3788 /* Transfer AND to two shifts, example:
3789 a = b & 0xfff00000 => (b >> 20) << 20 */
3790 rtx shift = GEN_INT (clear_zero_bit_copies);
3792 emit_insn (gen_lshrsi3 (temp, source, shift));
3793 emit_insn (gen_ashlsi3 (target, temp, shift));
3795 else
3797 emit_move_insn (temp, GEN_INT (val));
3798 emit_move_insn (target, gen_rtx_fmt_ee (AND, mode, source, temp));
3802 /* Auxiliary functions for lwm/smw. */
3803 bool
3804 nds32_valid_smw_lwm_base_p (rtx op)
3806 rtx base_addr;
3808 if (!MEM_P (op))
3809 return false;
3811 base_addr = XEXP (op, 0);
3813 if (REG_P (base_addr))
3814 return true;
3815 else
3817 if (GET_CODE (base_addr) == POST_INC
3818 && REG_P (XEXP (base_addr, 0)))
3819 return true;
3822 return false;
3825 /* Auxiliary functions for manipulation DI mode. */
3826 rtx nds32_di_high_part_subreg(rtx reg)
3828 unsigned high_part_offset = subreg_highpart_offset (SImode, DImode);
3830 return simplify_gen_subreg (
3831 SImode, reg,
3832 DImode, high_part_offset);
3835 rtx nds32_di_low_part_subreg(rtx reg)
3837 unsigned low_part_offset = subreg_lowpart_offset (SImode, DImode);
3839 return simplify_gen_subreg (
3840 SImode, reg,
3841 DImode, low_part_offset);
3844 /* ------------------------------------------------------------------------ */
3846 /* Auxiliary function for output TLS patterns. */
3848 const char *
3849 nds32_output_tls_desc (rtx *operands)
3851 char pattern[1000];
3853 if (TARGET_RELAX_HINT)
3854 snprintf (pattern, sizeof (pattern),
3855 ".relax_hint %%1\n\tsethi $r0, hi20(%%0)\n\t"
3856 ".relax_hint %%1\n\tori $r0, $r0, lo12(%%0)\n\t"
3857 ".relax_hint %%1\n\tlw $r15, [$r0 + $gp]\n\t"
3858 ".relax_hint %%1\n\tadd $r0, $r0, $gp\n\t"
3859 ".relax_hint %%1\n\tjral $r15");
3860 else
3861 snprintf (pattern, sizeof (pattern),
3862 "sethi $r0, hi20(%%0)\n\t"
3863 "ori $r0, $r0, lo12(%%0)\n\t"
3864 "lw $r15, [$r0 + $gp]\n\t"
3865 "add $r0, $r0, $gp\n\t"
3866 "jral $r15");
3867 output_asm_insn (pattern, operands);
3868 return "";
3871 const char *
3872 nds32_output_tls_ie (rtx *operands)
3874 char pattern[1000];
3876 if (flag_pic)
3878 if (TARGET_RELAX_HINT)
3879 snprintf (pattern, sizeof (pattern),
3880 ".relax_hint %%2\n\tsethi %%0, hi20(%%1)\n\t"
3881 ".relax_hint %%2\n\tori %%0, %%0, lo12(%%1)\n\t"
3882 ".relax_hint %%2\n\tlw %%0, [%%0 + $gp]");
3883 else
3884 snprintf (pattern, sizeof (pattern),
3885 "sethi %%0, hi20(%%1)\n\t"
3886 "ori %%0, %%0, lo12(%%1)\n\t"
3887 "lw %%0, [%%0 + $gp]");
3889 else
3891 if (TARGET_RELAX_HINT)
3892 snprintf (pattern, sizeof (pattern),
3893 ".relax_hint %%2\n\tsethi %%0, hi20(%%1)\n\t"
3894 ".relax_hint %%2\n\tlwi %%0, [%%0 + lo12(%%1)]");
3895 else
3896 snprintf (pattern, sizeof (pattern),
3897 "sethi %%0, hi20(%%1)\n\t"
3898 "lwi %%0, [%%0 + lo12(%%1)]");
3900 output_asm_insn (pattern, operands);
3901 return "";