From 9c08d1fa8e7213d04f28050bb69375bdc6f6def5 Mon Sep 17 00:00:00 2001 From: erich Date: Sun, 3 Oct 1993 16:33:02 +0000 Subject: [PATCH] Major rewrite -- See ChangeLog for details git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@5564 138bc75d-0d04-0410-961f-82ee72b054a4 --- gcc/config/arm/arm.c | 1369 ++++++++++- gcc/config/arm/arm.h | 931 +++++-- gcc/config/arm/arm.md | 6208 ++++++++++++++++++++++++++++++++++++++++++----- gcc/config/arm/xm-arm.h | 25 + 4 files changed, 7557 insertions(+), 976 deletions(-) diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c index 75a91aff712..fa10ce23201 100644 --- a/gcc/config/arm/arm.c +++ b/gcc/config/arm/arm.c @@ -1,7 +1,8 @@ /* Output routines for GCC for ARM/RISCiX. - Copyright (C) 1991 Free Software Foundation, Inc. + Copyright (C) 1991, 1993 Free Software Foundation, Inc. Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl) and Martin Simmons (@harleqn.co.uk). + More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk) This file is part of GNU CC. @@ -18,7 +19,7 @@ GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ - + #include #include "assert.h" #include "config.h" @@ -43,6 +44,16 @@ extern char *output_multi_immediate (); extern char *arm_output_asm_insn (); extern void arm_increase_location (); +/* Define the information needed to generate branch insns. This is + stored from the compare operation. */ + +rtx arm_compare_op0, arm_compare_op1; +int arm_compare_fp; + +/* What type of cpu are we compiling for? */ + +enum processor_type arm_cpu; + /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we must report the mode of the memory reference from PRINT_OPERAND to PRINT_OPERAND_ADDRESS. */ @@ -54,6 +65,10 @@ int current_function_anonymous_args; /* Location counter of .text segment. */ int arm_text_location = 0; +/* Set to one if we think that lr is only saved because of subroutine calls, + but all of these can be `put after' return insns */ +int lr_save_eliminated; + /* A hash table is used to store text segment labels and their associated offset from the start of the text segment. */ struct label_offset @@ -67,13 +82,38 @@ struct label_offset static struct label_offset *offset_table[LABEL_HASH_SIZE]; +/* Set to 1 when a return insn is output, this means that the epilogue + is not needed. */ + +static int return_used_this_function; + /* For an explanation of these variables, see final_prescan_insn below. */ int arm_ccfsm_state; int arm_current_cc; rtx arm_target_insn; int arm_target_label; -char *arm_condition_codes[]; +/* Return 1 if it is possible to return using a single instruction */ + +int +use_return_insn () +{ + int regno; + + if (!reload_completed ||current_function_pretend_args_size + || current_function_anonymous_args + || (get_frame_size () && !(TARGET_APCS || frame_pointer_needed))) + return 0; + + /* Can't be done if any of the FPU regs are pushed, since this also + requires an insn */ + for (regno = 20; regno < 24; regno++) + if (regs_ever_live[regno]) + return 0; + + return 1; +} + /* Return the number of mov instructions needed to get the constant VALUE into a register. */ @@ -96,38 +136,134 @@ arm_const_nmoves (value) int const_ok_for_arm (i) - int i; + HOST_WIDE_INT i; { - unsigned int mask = ~0xFF; + unsigned HOST_WIDE_INT mask = ~0xFF; do { - if ((i & mask) == 0) - return(TRUE); - mask = (mask << 2) | (mask >> (32 - 2)); + if ((i & mask & 0xffffffffu) == 0) + return(TRUE); + mask = (mask << 2) | ((mask & 0xffffffffu) >> (32 - 2)) | ~0xffffffffu; } while (mask != ~0xFF); return (FALSE); } /* const_ok_for_arm */ +/* This code has been fixed for cross compilation. */ + +static int fpa_consts_inited = 0; + +char *strings_fpa[8] = { + "0.0", + "1.0", + "2.0", + "3.0", + "4.0", + "5.0", + "0.5", + "10.0" + }; + +static REAL_VALUE_TYPE values_fpa[8]; + +static void +init_fpa_table () +{ + int i; + REAL_VALUE_TYPE r; + + for (i = 0; i < 8; i++) + { + r = REAL_VALUE_ATOF (strings_fpa[i], DFmode); + values_fpa[i] = r; + } + fpa_consts_inited = 1; +} + /* Return TRUE if rtx X is a valid immediate FPU constant. */ int const_double_rtx_ok_for_fpu (x) rtx x; { - double d; - union real_extract u; - u.i[0] = CONST_DOUBLE_LOW(x); - u.i[1] = CONST_DOUBLE_HIGH(x); - d = u.d; - - return (d == 0.0 || d == 1.0 || d == 2.0 || d == 3.0 - || d == 4.0 || d == 5.0 || d == 0.5 || d == 10.0); + REAL_VALUE_TYPE r; + int i; + + if (!fpa_consts_inited) + init_fpa_table (); + + REAL_VALUE_FROM_CONST_DOUBLE (r, x); + if (REAL_VALUE_MINUS_ZERO (r)) + return 0; + for (i = 0; i < 8; i++) + if (REAL_VALUES_EQUAL (r, values_fpa[i])) + return 1; + return 0; } /* const_double_rtx_ok_for_fpu */ + +/* Return TRUE if rtx X is a valid immediate FPU constant. */ + +int +neg_const_double_rtx_ok_for_fpu (x) + rtx x; +{ + REAL_VALUE_TYPE r; + int i; + + if (!fpa_consts_inited) + init_fpa_table (); + + REAL_VALUE_FROM_CONST_DOUBLE (r, x); + r = REAL_VALUE_NEGATE (r); + if (REAL_VALUE_MINUS_ZERO (r)) + return 0; + for (i = 0; i < 8; i++) + if (REAL_VALUES_EQUAL (r, values_fpa[i])) + return 1; + return 0; +} /* neg_const_double_rtx_ok_for_fpu */ /* Predicates for `match_operand' and `match_operator'. */ +/* s_register_operand is the same as register_operand, but it doesn't accept + (SUBREG (MEM)...). */ + +int +s_register_operand (op, mode) + register rtx op; + enum machine_mode mode; +{ + if (GET_MODE (op) != mode && mode != VOIDmode) + return 0; + + if (GET_CODE (op) == SUBREG) + { + op = SUBREG_REG (op); + } + + /* We don't consider registers whose class is NO_REGS + to be a register operand. */ + return (GET_CODE (op) == REG + && (REGNO (op) >= FIRST_PSEUDO_REGISTER + || REGNO_REG_CLASS (REGNO (op)) != NO_REGS)); +} + +/* Return 1 if OP is an item in memory, given that we are in reload. */ + +int +reload_memory_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + int regno = true_regnum (op); + + return (! CONSTANT_P (op) + && (regno == -1 + || (GET_CODE (op) == REG + && REGNO (op) >= FIRST_PSEUDO_REGISTER))); +} + /* Return TRUE for valid operands for the rhs of an ARM instruction. */ int @@ -135,10 +271,48 @@ arm_rhs_operand (op, mode) rtx op; enum machine_mode mode; { - return (register_operand (op, mode) + return (s_register_operand (op, mode) || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))); } /* arm_rhs_operand */ +/* Return TRUE for valid operands for the rhs of an ARM instruction, or a load. + */ + +int +arm_rhsm_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + return (s_register_operand (op, mode) + || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))) + || memory_operand (op, mode)); +} /* arm_rhs_operand */ + +/* Return TRUE for valid operands for the rhs of an ARM instruction, or if a + constant that is valid when negated. */ + +int +arm_add_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + return (s_register_operand (op, mode) + || (GET_CODE (op) == CONST_INT + && (const_ok_for_arm (INTVAL (op)) + || const_ok_for_arm (-INTVAL (op))))); +} /* arm_rhs_operand */ + +int +arm_not_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + return (s_register_operand (op, mode) + || (GET_CODE (op) == CONST_INT + && (const_ok_for_arm (INTVAL (op)) + || const_ok_for_arm (~INTVAL (op))))); +} /* arm_rhs_operand */ + /* Return TRUE for valid operands for the rhs of an FPU instruction. */ int @@ -146,13 +320,26 @@ fpu_rhs_operand (op, mode) rtx op; enum machine_mode mode; { - if (register_operand (op, mode)) + if (s_register_operand (op, mode)) return(TRUE); else if (GET_CODE (op) == CONST_DOUBLE) return (const_double_rtx_ok_for_fpu (op)); else return (FALSE); } /* fpu_rhs_operand */ +int +fpu_add_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + if (s_register_operand (op, mode)) + return(TRUE); + else if (GET_CODE (op) == CONST_DOUBLE) + return const_double_rtx_ok_for_fpu (op) + || neg_const_double_rtx_ok_for_fpu (op); + return (FALSE); +} + /* Return nonzero if OP is a constant power of two. */ int @@ -169,15 +356,16 @@ power_of_two_operand (op, mode) } /* power_of_two_operand */ /* Return TRUE for a valid operand of a DImode operation. - Either: REG, CONST_DOUBLE or MEM(offsettable). - Note that this disallows MEM(REG+REG). */ + Either: REG, CONST_DOUBLE or MEM(DImode_address). + Note that this disallows MEM(REG+REG), but allows + MEM(PRE/POST_INC/DEC(REG)). */ int di_operand (op, mode) rtx op; enum machine_mode mode; { - if (register_operand (op, mode)) + if (s_register_operand (op, mode)) return (TRUE); switch (GET_CODE (op)) @@ -186,8 +374,7 @@ di_operand (op, mode) case CONST_INT: return (TRUE); case MEM: - return (memory_address_p (DImode, XEXP (op, 0)) - && offsettable_address_p (FALSE, DImode, XEXP (op, 0))); + return (memory_address_p (DImode, XEXP (op, 0))); default: return (FALSE); } @@ -200,10 +387,25 @@ index_operand (op, mode) rtx op; enum machine_mode mode; { - return (register_operand(op, mode) - || (immediate_operand (op, mode) && abs (INTVAL (op)) < 4096)); + return (s_register_operand(op, mode) + || (immediate_operand (op, mode) + && INTVAL (op) < 4096 && INTVAL (op) > -4096)); } /* index_operand */ +/* Return TRUE for valid shifts by a constant. This also accepts any + power of two on the (somewhat overly relaxed) assumption that the + shift operator in this case was a mult. */ + +int +const_shift_operand (op, mode) + rtx op; + enum machine_mode mode; +{ + return (power_of_two_operand (op, mode) + || (immediate_operand (op, mode) + && (INTVAL (op) < 32 && INTVAL (op) > 0))); +} /* const_shift_operand */ + /* Return TRUE for arithmetic operators which can be combined with a multiply (shift). */ @@ -236,13 +438,384 @@ shift_operator (x, mode) { enum rtx_code code = GET_CODE (x); + if (code == MULT) + return power_of_two_operand (XEXP (x, 1)); return (code == ASHIFT || code == LSHIFT || code == ASHIFTRT || code == LSHIFTRT); } } /* shift_operator */ + +int equality_operator (x, mode) +rtx x; +enum machine_mode mode; +{ + return (GET_CODE (x) == EQ || GET_CODE (x) == NE); +} + +/* Return TRUE for SMIN SMAX UMIN UMAX operators. */ + +int +minmax_operator (x, mode) + rtx x; + enum machine_mode mode; +{ + enum rtx_code code = GET_CODE (x); + + if (GET_MODE (x) != mode) + return FALSE; + return code == SMIN || code == SMAX || code == UMIN || code == UMAX; +} /* minmax_operator */ + +/* return TRUE if x is EQ or NE */ + +/* Return TRUE if this is the condition code register, if we aren't given + a mode, accept any class CCmode register */ + +int +cc_register (x, mode) +rtx x; +enum machine_mode mode; +{ + if (mode == VOIDmode) + { + mode = GET_MODE (x); + if (GET_MODE_CLASS (mode) != MODE_CC) + return FALSE; + } + if (mode == GET_MODE (x) && GET_CODE (x) == REG && REGNO (x) == 24) + return TRUE; + return FALSE; +} + +enum rtx_code +minmax_code (x) +rtx x; +{ + enum rtx_code code = GET_CODE (x); + + if (code == SMAX) + return GE; + if (code == SMIN) + return LE; + if (code == UMIN) + return LEU; + if (code == UMAX) + return GEU; + abort (); +} + +/* Return 1 if memory locations are adjacent */ + +adjacent_mem_locations (a, b) + rtx a, b; +{ + int val0 = 0, val1 = 0; + int reg0, reg1; + + if ((GET_CODE (XEXP (a, 0)) == REG + || (GET_CODE (XEXP (a, 0)) == PLUS + && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT)) + && (GET_CODE (XEXP (b, 0)) == REG + || (GET_CODE (XEXP (b, 0)) == PLUS + && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT))) + { + if (GET_CODE (XEXP (a, 0)) == PLUS) + { + reg0 = REGNO (XEXP (XEXP (a, 0), 0)); + val0 = INTVAL (XEXP (XEXP (a, 0), 1)); + } + else + reg0 = REGNO (XEXP (a, 0)); + if (GET_CODE (XEXP (b, 0)) == PLUS) + { + reg1 = REGNO (XEXP (XEXP (b, 0), 0)); + val1 = INTVAL (XEXP (XEXP (b, 0), 1)); + } + else + reg1 = REGNO (XEXP (b, 0)); + return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4); + } + return 0; +} + +/* Return 1 if OP is a load multiple operation. It is known to be + parallel and the first section will be tested. */ + +load_multiple_operation (op, mode) + rtx op; + enum machine_mode mode; +{ + int count = XVECLEN (op, 0); + int dest_regno; + rtx src_addr; + int i = 1, base = 0; + rtx elt; + + if (count <= 1 + || GET_CODE (XVECEXP (op, 0, 0)) != SET) + return 0; + + /* Check to see if this might be a write-back */ + if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS) + { + i++; + base = 1; + + /* Now check it more carefully */ + if (GET_CODE (SET_DEST (elt)) != REG + || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG + || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt)) + || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT + || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4 + || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER + || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG + || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0)) + != REGNO (SET_DEST (elt))) + return 0; + count--; + } + + /* Perform a quick check so we don't blow up below. */ + if (count <= i + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM) + return 0; + + dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1))); + src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0); + + for (; i < count; i++) + { + rtx elt = XVECEXP (op, 0, i); + + if (GET_CODE (elt) != SET + || GET_CODE (SET_DEST (elt)) != REG + || GET_MODE (SET_DEST (elt)) != SImode + || REGNO (SET_DEST (elt)) != dest_regno + i - base + || GET_CODE (SET_SRC (elt)) != MEM + || GET_MODE (SET_SRC (elt)) != SImode + || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS + || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr) + || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT + || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4) + return 0; + } + + return 1; +} + +/* Return 1 if OP is a store multiple operation. It is known to be + parallel and the first section will be tested. */ + +store_multiple_operation (op, mode) + rtx op; + enum machine_mode mode; +{ + int count = XVECLEN (op, 0); + int src_regno; + rtx dest_addr; + int i = 1, base = 0; + rtx elt; + + if (count <= 1 + || GET_CODE (XVECEXP (op, 0, 0)) != SET) + return 0; + + /* Check to see if this might be a write-back */ + if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS) + { + i++; + base = 1; + + /* Now check it more carefully */ + if (GET_CODE (SET_DEST (elt)) != REG + || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG + || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt)) + || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT + || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4 + || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER + || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG + || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0)) + != REGNO (SET_DEST (elt))) + return 0; + count--; + } + + /* Perform a quick check so we don't blow up below. */ + if (count <= i + || GET_CODE (XVECEXP (op, 0, i - 1)) != SET + || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM + || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG) + return 0; + + src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1))); + dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0); + + for (; i < count; i++) + { + elt = XVECEXP (op, 0, i); + + if (GET_CODE (elt) != SET + || GET_CODE (SET_SRC (elt)) != REG + || GET_MODE (SET_SRC (elt)) != SImode + || REGNO (SET_SRC (elt)) != src_regno + i - base + || GET_CODE (SET_DEST (elt)) != MEM + || GET_MODE (SET_DEST (elt)) != SImode + || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS + || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr) + || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT + || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4) + return 0; + } + + return 1; +} + +/* Routines for use in generating RTL */ + +rtx arm_gen_load_multiple (base_regno, count, from, up, write_back) + int base_regno; + int count; + rtx from; + int up; + int write_back; +{ + int i = 0, j; + rtx result; + int sign = up ? 1 : -1; + + result = gen_rtx (PARALLEL, VOIDmode, + rtvec_alloc (count + (write_back ? 2 : 0))); + if (write_back) + { + XVECEXP (result, 0, 0) + = gen_rtx (SET, GET_MODE (from), from, + plus_constant (from, count * 4 * sign)); + i = 1; + count++; + } + for (j = 0; i < count; i++, j++) + { + XVECEXP (result, 0, i) + = gen_rtx (SET, VOIDmode, gen_rtx (REG, SImode, base_regno + j), + gen_rtx (MEM, SImode, + plus_constant (from, j * 4 * sign))); + } + if (write_back) + XVECEXP (result, 0, i) = gen_rtx (CLOBBER, SImode, from); + + return result; +} + +rtx arm_gen_store_multiple (base_regno, count, to, up, write_back) + int base_regno; + int count; + rtx to; + int up; + int write_back; +{ + int i = 0, j; + rtx result; + int sign = up ? 1 : -1; + + result = gen_rtx (PARALLEL, VOIDmode, + rtvec_alloc (count + (write_back ? 2 : 0))); + if (write_back) + { + XVECEXP (result, 0, 0) + = gen_rtx (SET, GET_MODE (to), to, + plus_constant (to, count * 4 * sign)); + i = 1; + count++; + } + for (j = 0; i < count; i++, j++) + { + XVECEXP (result, 0, i) + = gen_rtx (SET, VOIDmode, + gen_rtx (MEM, SImode, plus_constant (to, j * 4 * sign)), + gen_rtx (REG, SImode, base_regno + j)); + } + if (write_back) + XVECEXP (result, 0, i) = gen_rtx (CLOBBER, SImode, to); + + return result; +} + +/* X and Y are two things to compare using CODE. Emit the compare insn and + return the rtx for register 0 in the proper mode. FP means this is a + floating point compare: I don't think that it is needed on the arm. */ + +rtx +gen_compare_reg (code, x, y, fp) + enum rtx_code code; + rtx x, y; +{ + enum machine_mode mode = SELECT_CC_MODE (code, x, y); + rtx cc_reg = gen_rtx (REG, mode, 24); + + emit_insn (gen_rtx (SET, VOIDmode, cc_reg, + gen_rtx (COMPARE, mode, x, y))); + + return cc_reg; +} + + +/* Check to see if a branch is forwards or backwards. Return TRUE if it + is backwards. */ + +int +arm_backwards_branch (from, to) +int from, to; +{ + return (insn_addresses[to] < insn_addresses[from]); +} + +/* Check to see if a branch is within the distance that can be done using + an arithmetic expression. */ +int +short_branch (from, to) +int from, to; +{ + int delta = insn_addresses[from] + 2 - insn_addresses[to]; + + return abs (delta) < 245; /* A small margin for safety */ +} + +/* Check to see that the insn isn't the target of the conditionalizing + code */ +int +arm_insn_not_targeted (insn) +rtx insn; +{ + return insn != arm_target_insn; +} + /* Routines to output assembly language. */ +/* fp_immediate_constant + if the rtx is the correct value then return the string of the number. + In this way we can ensure that valid double constants are generated even + when cross compiling. */ +char * +fp_immediate_constant (x) +rtx (x); +{ + REAL_VALUE_TYPE r; + int i; + + if (!fpa_consts_inited) + init_fpa_table (); + + REAL_VALUE_FROM_CONST_DOUBLE (r, x); + for (i = 0; i < 8; i++) + if (REAL_VALUES_EQUAL (r, values_fpa[i])) + return strings_fpa[i]; + abort (); +} + + /* Output the operands of a LDM/STM instruction to STREAM. MASK is the ARM register set mask of which only bits 0-15 are important. INSTR is the possibly suffixed base register. HAT unequals zero if a hat @@ -289,6 +862,134 @@ output_call (operands) return (""); } /* output_call */ +static int +eliminate_lr2ip (x) +rtx *x; +{ + int something_changed = 0; + rtx x0 = *x; + int code = GET_CODE (x0); + register int i, j; + register char *fmt; + + switch (code) + { + case REG: + if (REGNO (x0) == 14) + { + *x = gen_rtx (REG, SImode, 12); + return 1; + } + return 0; + default: + /* Scan through the sub-elements and change any references there */ + fmt = GET_RTX_FORMAT (code); + for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) + if (fmt[i] == 'e') + something_changed |= eliminate_lr2ip (&XEXP (x0, i)); + else if (fmt[i] == 'E') + for (j = 0; j < XVECLEN (x0, i); j++) + something_changed |= eliminate_lr2ip (&XVECEXP (x0, i, j)); + return something_changed; + } +} + +/* Output a 'call' insn that is a reference in memory. */ + +char * +output_call_mem (operands) + rtx operands[]; +{ + operands[0] = copy_rtx (operands[0]); /* Be ultra careful */ + /* Handle calls using lr by using ip (which may be clobbered in subr anyway). + */ + if (eliminate_lr2ip (&operands[0])) + arm_output_asm_insn ("mov\tip, lr", operands); + arm_output_asm_insn ("mov\tlr, pc", operands); + arm_output_asm_insn ("ldr\tpc, %0", operands); + return (""); +} /* output_call */ + + +/* Output a move from arm registers to an fpu registers. + OPERANDS[0] is an fpu register. + OPERANDS[1] is the first registers of an arm register pair. */ + +char * +output_mov_long_double_fpu_from_arm (operands) + rtx operands[]; +{ + int arm_reg0 = REGNO (operands[1]); + rtx ops[3]; + + if (arm_reg0 == 12) + abort(); + ops[0] = gen_rtx (REG, SImode, arm_reg0); + ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0); + ops[2] = gen_rtx (REG, SImode, 2 + arm_reg0); + + arm_output_asm_insn ("stmfd\tsp!, {%0, %1, %2}", ops); + arm_output_asm_insn ("ldfe\t%0, [sp], #12", operands); + return (""); +} /* output_mov_long_double_fpu_from_arm */ + +/* Output a move from an fpu register to arm registers. + OPERANDS[0] is the first registers of an arm register pair. + OPERANDS[1] is an fpu register. */ + +char * +output_mov_long_double_arm_from_fpu (operands) + rtx operands[]; +{ + int arm_reg0 = REGNO (operands[0]); + rtx ops[3]; + + if (arm_reg0 == 12) + abort(); + ops[0] = gen_rtx (REG, SImode, arm_reg0); + ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0); + ops[2] = gen_rtx (REG, SImode, 2 + arm_reg0); + + arm_output_asm_insn ("stfe\t%1, [sp, #-12]!", operands); + arm_output_asm_insn ("ldmfd\tsp!, {%0, %1, %2}", ops); + return(""); +} /* output_mov_long_double_arm_from_fpu */ + +/* Output a move from arm registers to arm registers of a long double + OPERANDS[0] is the destination. + OPERANDS[1] is the source. */ +char * +output_mov_long_double_arm_from_arm (operands) +rtx operands[]; +{ + /* We have to be careful here because the two might overlap */ + int dest_start = REGNO (operands[0]); + int src_start = REGNO (operands[1]); + rtx ops[2]; + int i; + + if (dest_start < src_start) + { + for (i = 0; i < 3; i++) + { + ops[0] = gen_rtx (REG, SImode, dest_start + i); + ops[1] = gen_rtx (REG, SImode, src_start + i); + arm_output_asm_insn ("mov\t%0, %1", ops); + } + } + else + { + for (i = 2; i >= 0; i--) + { + ops[0] = gen_rtx (REG, SImode, dest_start + i); + ops[1] = gen_rtx (REG, SImode, src_start + i); + arm_output_asm_insn ("mov\t%0, %1", ops); + } + } + return ""; +} + + /* Output a move from arm registers to an fpu registers. OPERANDS[0] is an fpu register. OPERANDS[1] is the first registers of an arm register pair. */ @@ -371,26 +1072,51 @@ output_move_double (operands) CONST_DOUBLE_HIGH (operands[1])); operands[1] = gen_rtx (CONST_INT, VOIDmode, CONST_DOUBLE_LOW (operands[1])); - arm_output_asm_insn ("mov\t%0, %1", operands); - arm_output_asm_insn ("mov\t%0, %1", otherops); + output_mov_immediate (operands, FALSE, ""); + output_mov_immediate (otherops, FALSE, ""); } else if (code1 == CONST_INT) { otherops[1] = const0_rtx; - arm_output_asm_insn ("mov\t%0, %1", operands); - arm_output_asm_insn ("mov\t%0, %1", otherops); + /* sign extend the intval into the high-order word */ + /* Note: output_mov_immediate may clobber operands[1], so we + put this out first */ + if (INTVAL (operands[1]) < 0) + arm_output_asm_insn ("mvn\t%0, %1", otherops); + else + arm_output_asm_insn ("mov\t%0, %1", otherops); + output_mov_immediate (operands, FALSE, ""); } else if (code1 == MEM) { - if (GET_CODE (XEXP (operands[1], 0)) == REG) + switch (GET_CODE (XEXP (operands[1], 0))) { + case REG: /* Handle the simple case where address is [r, #0] more efficient. */ operands[1] = XEXP (operands[1], 0); arm_output_asm_insn ("ldmia\t%1, %M0", operands); - } - else - { + break; + case PRE_INC: + operands[1] = XEXP (XEXP (operands[1], 0), 0); + arm_output_asm_insn ("add\t%1, %1, #8", operands); + arm_output_asm_insn ("ldmia\t%1, %M0", operands); + break; + case PRE_DEC: + operands[1] = XEXP (XEXP (operands[1], 0), 0); + arm_output_asm_insn ("sub\t%1, %1, #8", operands); + arm_output_asm_insn ("ldmia\t%1, %M0", operands); + break; + case POST_INC: + operands[1] = XEXP (XEXP (operands[1], 0), 0); + arm_output_asm_insn ("ldmia\t%1!, %M0", operands); + break; + case POST_DEC: + operands[1] = XEXP (XEXP (operands[1], 0), 0); + arm_output_asm_insn ("ldmia\t%1, %M0", operands); + arm_output_asm_insn ("sub\t%1, %1, #8", operands); + break; + default: otherops[1] = adj_offsettable_operand (operands[1], 4); /* Take care of overlapping base/data reg. */ if (reg_mentioned_p (operands[0], operands[1])) @@ -411,14 +1137,32 @@ output_move_double (operands) { if (REGNO (operands[1]) == 12) abort(); - - if (GET_CODE (XEXP (operands[0], 0)) == REG) - { + switch (GET_CODE (XEXP (operands[0], 0))) + { + case REG: operands[0] = XEXP (operands[0], 0); arm_output_asm_insn ("stmia\t%0, %M1", operands); - } - else - { + break; + case PRE_INC: + operands[0] = XEXP (XEXP (operands[0], 0), 0); + arm_output_asm_insn ("add\t%0, %0, #8", operands); + arm_output_asm_insn ("stmia\t%0, %M1", operands); + break; + case PRE_DEC: + operands[0] = XEXP (XEXP (operands[0], 0), 0); + arm_output_asm_insn ("sub\t%0, %0, #8", operands); + arm_output_asm_insn ("stmia\t%0, %M1", operands); + break; + case POST_INC: + operands[0] = XEXP (XEXP (operands[0], 0), 0); + arm_output_asm_insn ("stmia\t%0!, %M1", operands); + break; + case POST_DEC: + operands[0] = XEXP (XEXP (operands[0], 0), 0); + arm_output_asm_insn ("stmia\t%0, %M1", operands); + arm_output_asm_insn ("sub\t%0, %0, #8", operands); + break; + default: otherops[0] = adj_offsettable_operand (operands[0], 4); otherops[1] = gen_rtx (REG, SImode, 1 + REGNO (operands[1])); arm_output_asm_insn ("str\t%1, %0", operands); @@ -593,6 +1337,10 @@ shift_instr (op, shift_ptr) mnem = "lsr"; max_shift = 32; break; + case MULT: + *shift_ptr = gen_rtx (CONST_INT, VOIDmode, + int_log2 (INTVAL (*shift_ptr))); + return ("asl"); default: abort(); } @@ -719,6 +1467,21 @@ output_shifted_move (op, operands) return (arm_output_asm_insn (mnemonic, operands)); } /* output_shifted_move */ +char * +output_shift_compare (operands, neg) +rtx *operands; +int neg; +{ + char buf[80]; + + if (neg) + sprintf (buf, "cmn\t%%1, %%3, %s %%4", shift_instr (GET_CODE (operands[2]), + &operands[4])); + else + sprintf (buf, "cmp\t%%1, %%3, %s %%4", shift_instr (GET_CODE (operands[2]), + &operands[4])); + return arm_output_asm_insn (buf, operands); +} /* output_shift_compare */ /* Output a .ascii pseudo-op, keeping track of lengths. This is because /bin/as is horribly restrictive. */ @@ -768,31 +1531,211 @@ output_ascii_pseudo_op (stream, p, len) arm_increase_location (chars_so_far); } /* output_ascii_pseudo_op */ + +/* Try to determine whether a pattern really clobbers the link register. + This information is useful when peepholing, so that lr need not be pushed + if we combine a call followed by a return */ + +static int +pattern_really_clobbers_lr (x) +rtx x; +{ + int i; + + switch (GET_CODE (x)) + { + case SET: + switch (GET_CODE (SET_DEST (x))) + { + case REG: + return REGNO (SET_DEST (x)) == 14; + case SUBREG: + if (GET_CODE (XEXP (SET_DEST (x), 0)) == REG) + return REGNO (XEXP (SET_DEST (x), 0)) == 14; + abort (); + default: + return 0; + } + case PARALLEL: + for (i = 0; i < XVECLEN (x, 0); i++) + if (pattern_really_clobbers_lr (XVECEXP (x, 0, i))) + return 1; + return 0; + case CLOBBER: + switch (GET_CODE (XEXP (x, 0))) + { + case REG: + return REGNO (XEXP (x, 0)) == 14; + case SUBREG: + if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG) + return REGNO (XEXP (XEXP (x, 0), 0)) == 14; + abort (); + default: + return 0; + } + case UNSPEC: + return 1; + default: + return 0; + } +} + +static int +function_really_clobbers_lr (first) +rtx first; +{ + rtx insn, next; + + for (insn = first; insn; insn = next_nonnote_insn (insn)) + { + switch (GET_CODE (insn)) + { + case BARRIER: + case NOTE: + case CODE_LABEL: + case JUMP_INSN: /* Jump insns only change the PC (and conds) */ + case INLINE_HEADER: + break; + case INSN: + if (pattern_really_clobbers_lr (PATTERN (insn))) + return 1; + break; + case CALL_INSN: + /* Don't yet know how to handle those calls that are not to a + SYMBOL_REF */ + if (GET_CODE (PATTERN (insn)) != PARALLEL) + abort (); + switch (GET_CODE (XVECEXP (PATTERN (insn), 0, 0))) + { + case CALL: + if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn), 0, 0), 0), 0)) + != SYMBOL_REF) + return 1; + break; + case SET: + if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn), + 0, 0)), 0), 0)) + != SYMBOL_REF) + return 1; + break; + default: /* Don't recognize it, be safe */ + return 1; + } + /* A call can be made (by peepholing) not to clobber lr iff it is + followed by a return. There may, however, be a use insn iff + we are returning the result of the call. + If we run off the end of the insn chain, then that means the + call was at the end of the function. Unfortunately we don't + have a return insn for the peephole to recognize, so we + must reject this. (Can this be fixed by adding our own insn?) */ + if ((next = next_nonnote_insn (insn)) == NULL) + return 1; + if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == USE + && (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET) + && (REGNO (SET_DEST (XVECEXP (PATTERN (insn), 0, 0))) + == REGNO (XEXP (PATTERN (next), 0)))) + if ((next = next_nonnote_insn (next)) == NULL) + return 1; + if (GET_CODE (next) == JUMP_INSN + && GET_CODE (PATTERN (next)) == RETURN) + break; + return 1; + default: + abort (); + } + } + /* We have reached the end of the chain so lr was _not_ clobbered */ + return 0; +} + +char * +output_return_instruction (operand, really_return) +rtx operand; +int really_return; +{ + char instr[100]; + int reg, live_regs = 0; + + if (current_function_calls_alloca && !really_return) + abort(); + + for (reg = 4; reg < 10; reg++) + if (regs_ever_live[reg]) + live_regs++; + + if (live_regs || (regs_ever_live[14] && !lr_save_eliminated)) + live_regs++; + + if (frame_pointer_needed) + live_regs += 4; + + if (live_regs) + { + if (lr_save_eliminated || !regs_ever_live[14]) + live_regs++; + if (frame_pointer_needed) + strcpy (instr, "ldm%d0ea\tfp, {"); + else + strcpy (instr, "ldm%d0fd\tsp!, {"); + for (reg = 4; reg < 10; reg++) + if (regs_ever_live[reg]) + { + strcat (instr, reg_names[reg]); + if (--live_regs) + strcat (instr, ", "); + } + if (frame_pointer_needed) + { + strcat (instr, reg_names[11]); + strcat (instr, ", "); + strcat (instr, reg_names[13]); + strcat (instr, ", "); + strcat (instr, really_return ? reg_names[15] : reg_names[14]); + } + else + strcat (instr, really_return ? reg_names[15] : reg_names[14]); + strcat (instr, (TARGET_6 || !really_return) ? "}" : "}^"); + arm_output_asm_insn (instr, &operand); + } + else if (really_return) + { + strcpy (instr, TARGET_6 ? "mov%d0\tpc, lr" : "mov%d0s\tpc, lr"); + arm_output_asm_insn (instr, &operand); + } + return_used_this_function = 1; + return ""; +} + +/* The amount of stack adjustment that happens here, in output_return and in + output_epilogue must be exactly the same as was calculated during reload, + or things will point to the wrong place. The only time we can safely + ignore this constraint is when a function has no arguments on the stack, + no stack frame requirement and no live registers execpt for `lr'. If we + can guarantee that by making all function calls into tail calls and that + lr is not clobbered in any other way, then there is no need to push lr + onto the stack. */ + void output_prologue (f, frame_size) FILE *f; int frame_size; { - int reg, live_regs_mask = 0, code_size = 0; rtx operands[3]; - /* Nonzero if the `fp' (argument pointer) register is needed. */ - int fp_needed = 0; - /* Nonzero if we must stuff some register arguments onto the stack as if they were passed there. */ int store_arg_regs = 0; + return_used_this_function = 0; + lr_save_eliminated = 0; + fprintf (f, "\t@ args = %d, pretend = %d, frame = %d\n", - current_function_args_size, current_function_pretend_args_size, frame_size); - fprintf (f, "\t@ frame_pointer_needed = %d, current_function_anonymous_args = %d\n", + current_function_args_size, current_function_pretend_args_size, + frame_size); + fprintf (f, "\t@ frame_needed = %d, current_function_anonymous_args = %d\n", frame_pointer_needed, current_function_anonymous_args); - if (current_function_pretend_args_size || current_function_args_size - || frame_pointer_needed || current_function_anonymous_args || TARGET_APCS) - fp_needed = 1; - if (current_function_anonymous_args && current_function_pretend_args_size) store_arg_regs = 1; @@ -800,18 +1743,23 @@ output_prologue (f, frame_size) if (regs_ever_live[reg]) live_regs_mask |= (1 << reg); - if (fp_needed) + if (frame_pointer_needed) { live_regs_mask |= 0xD800; - /* The following statement is probably redundant now - because the frame pointer is recorded in regs_ever_live. */ - if (frame_pointer_needed) - live_regs_mask |= (1 << FRAME_POINTER_REGNUM); fputs ("\tmov\tip, sp\n", f); code_size += 4; } else if (regs_ever_live[14]) - live_regs_mask |= 0x4000; + { + if (! current_function_args_size + && !function_really_clobbers_lr (get_insns ())) + { + fprintf (f,"\t@ I don't think this function clobbers lr\n"); + lr_save_eliminated = 1; + } + else + live_regs_mask |= 0x4000; + } /* If CURRENT_FUNCTION_PRETEND_ARGS_SIZE, adjust the stack pointer to make room. If also STORE_ARG_REGS store the argument registers involved in @@ -827,6 +1775,7 @@ output_prologue (f, frame_size) arg_size > 0; reg--, arg_size -= 4) mask |= (1 << reg); print_multi_reg (f, "stmfd\tsp!", mask, FALSE); + code_size += 4; } else { @@ -839,6 +1788,13 @@ output_prologue (f, frame_size) if (live_regs_mask) { + /* if a di mode load/store multiple is used, and the base register + is r3, then r4 can become an ever live register without lr + doing so, in this case we need to push lr as well, or we + will fail to get a proper return. */ + + live_regs_mask |= 0x4000; + lr_save_eliminated = 0; print_multi_reg (f, "stmfd\tsp!", live_regs_mask, FALSE); code_size += 4; } @@ -850,23 +1806,17 @@ output_prologue (f, frame_size) code_size += 4; } - if (fp_needed) + if (frame_pointer_needed) { /* Make `fp' point to saved value of `pc'. */ - operands[0] = arg_pointer_rtx; + operands[0] = gen_rtx (REG, SImode, HARD_FRAME_POINTER_REGNUM); operands[1] = gen_rtx (REG, SImode, 12); operands[2] = gen_rtx (CONST_INT, VOIDmode, - (4 + current_function_pretend_args_size)); output_add_immediate (operands); } - if (frame_pointer_needed) - { - fprintf (f, "\tmov\trfp, sp\n"); - code_size += 4; - } - if (frame_size) { operands[0] = operands[1] = stack_pointer_rtx; @@ -884,36 +1834,41 @@ output_epilogue (f, frame_size) int frame_size; { int reg, live_regs_mask = 0, code_size = 0, fp_needed = 0; + /* If we need this then it will always be at lesat this much */ + int floats_offset = 24; rtx operands[3]; - if (current_function_pretend_args_size || current_function_args_size - || frame_pointer_needed || current_function_anonymous_args || TARGET_APCS) - fp_needed = 1; - - for (reg = 4; reg < 10; reg++) - if (regs_ever_live[reg]) - live_regs_mask |= (1 << reg); - - if (fp_needed) + if (use_return_insn() && return_used_this_function) { - live_regs_mask |= 0xA800; - if (frame_pointer_needed) - live_regs_mask |= (1 << FRAME_POINTER_REGNUM); + if (frame_size && !(frame_pointer_needed || TARGET_APCS)) + { + abort (); + } + return; } - else if (regs_ever_live[14]) - live_regs_mask |= 0x4000; - for (reg = 20; reg < 24; reg++) + for (reg = 4; reg <= 10; reg++) if (regs_ever_live[reg]) { - fprintf (f, "\tldfe\t%s, [%s], #12\n", reg_names[reg], - frame_pointer_needed ? "rfp" : "sp"); - code_size += 4; + live_regs_mask |= (1 << reg); + floats_offset += 4; } - if (fp_needed) + + if (frame_pointer_needed) { - print_multi_reg (f, "ldmea\tfp", live_regs_mask, TRUE); + for (reg = 23; reg >= 20; reg--) + if (regs_ever_live[reg]) + { + fprintf (f, "\tldfe\t%s, [fp, #-%d]\n", reg_names[reg], + floats_offset); + floats_offset += 12; + code_size += 4; + } + + live_regs_mask |= 0xA800; + print_multi_reg (f, "ldmea\tfp", live_regs_mask, + TARGET_6 ? FALSE : TRUE); code_size += 4; } else @@ -926,16 +1881,23 @@ output_epilogue (f, frame_size) output_add_immediate (operands); } + for (reg = 20; reg < 24; reg++) + if (regs_ever_live[reg]) + { + fprintf (f, "\tldfe\t%s, [sp], #12\n", reg_names[reg]); + code_size += 4; + } if (current_function_pretend_args_size == 0 && regs_ever_live[14]) { - print_multi_reg (f, "ldmfd\tsp!", - (live_regs_mask & ~0x4000) | 0x8000, TRUE); + print_multi_reg (f, "ldmfd\tsp!", live_regs_mask | 0x8000, + TARGET_6 ? FALSE : TRUE); code_size += 4; } else { - if (live_regs_mask) + if (live_regs_mask || regs_ever_live[14]) { + live_regs_mask |= 0x4000; print_multi_reg (f, "ldmfd\tsp!", live_regs_mask, FALSE); code_size += 4; } @@ -946,7 +1908,7 @@ output_epilogue (f, frame_size) current_function_pretend_args_size); output_add_immediate (operands); } - fputs ("\tmovs\tpc, lr\n", f); + fputs (TARGET_6 ? "\tmov\tpc, lr\n" : "\tmovs\tpc, lr\n", f); code_size += 4; } } @@ -1086,6 +2048,72 @@ arm_output_llc (operands) return (""); } /* arm_output_llc */ +/* output_load_symbol () + load a symbol that is known to be in the text segment into a register */ + +char * +output_load_symbol (operands) +rtx *operands; +{ + char *s, *name = XSTR (operands[1], 0); + struct label_offset *he; + int hash = 0; + int offset; + + if (*name != '*') + abort (); + + for (s = &name[1]; *s; s++) + hash += *s; + hash = hash % LABEL_HASH_SIZE; + he = offset_table[hash]; + while (he && strcmp (he->name, &name[1])) + he = he->cdr; + + if (!he) + abort (); + + offset = (arm_text_location + 8 - he->offset); + if (offset < 0) + abort (); + + /* If the symbol is word aligned then we might be able to reduce the + number of loads */ + if ((offset & 3) == 0) + { + arm_output_asm_insn ("sub\t%0, pc, #(8 + . -%a1) & 1023", operands); + if (offset > 0x3ff) + { + arm_output_asm_insn ("sub\t%0, %0, #(4 + . -%a1) & 261120", + operands); + if (offset > 0x3ffff) + { + arm_output_asm_insn ("sub\t%0, %0, #(. -%a1) & 66846720", + operands); + if (offset > 0x3ffffff) + arm_output_asm_insn ("sub\t%0, %0, #(. - 4 -%a1) & -67108864", + operands); + } + } + } + else + { + arm_output_asm_insn ("sub\t%0, pc, #(8 + . -%a1) & 255", operands); + if (offset > 0x0ff) + { + arm_output_asm_insn ("sub\t%0, %0, #(4 + . -%a1) & 65280", operands); + if (offset > 0x0ffff) + { + arm_output_asm_insn ("sub\t%0, %0, #(. -%a1) & 16711680", + operands); + if (offset > 0x0ffffff) + arm_output_asm_insn ("sub\t%0, %0, #(. - 4 -%a1) & -16777216", + operands); + } + } + } + return ""; +} /* Output code resembling an .lcomm directive. /bin/as doesn't have this directive hence this hack, which works by reserving some `.space' in the @@ -1131,6 +2159,10 @@ output_lcomm_directive (stream, name, size, rounded) 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached (the target insn is arm_target_insn). + If the jump clobbers the conditions then we use states 2 and 4. + + A similar thing can be done with conditional return insns. + XXX In case the `target' is an unconditional branch, this conditionalising of the instructions always reduces code size, but not always execution time. But then, I want to reduce the code size to somewhere near what @@ -1185,6 +2217,15 @@ final_prescan_insn (insn, opvec, noperands) reversed if it appears to fail. */ int reverse = 0; + /* JUMP_CLOBBERS will be one implies that the conditions if a branch is + taken are clobbered, even if the rtl suggests otherwise. It also + means that we have to grub around within the jump expression to find + out what the conditions are when the jump isn't taken. */ + int jump_clobbers = 0; + + /* If we start with a return insn, we only succeed if we find another one. */ + int seeking_return = 0; + /* START_INSN will hold the insn from where we start looking. This is the first insn after the following code_label if REVERSE is true. */ rtx start_insn = insn; @@ -1219,6 +2260,21 @@ final_prescan_insn (insn, opvec, noperands) else return; } + else if (GET_CODE (body) == RETURN) + { + start_insn = next_nonnote_insn (start_insn); + if (GET_CODE (start_insn) == BARRIER) + start_insn = next_nonnote_insn (start_insn); + if (GET_CODE (start_insn) == CODE_LABEL + && CODE_LABEL_NUMBER (start_insn) == arm_target_label + && LABEL_NUSES (start_insn) == 1) + { + reverse = TRUE; + seeking_return = 1; + } + else + return; + } else return; } @@ -1228,6 +2284,20 @@ final_prescan_insn (insn, opvec, noperands) if (GET_CODE (insn) != JUMP_INSN) return; + /* This jump might be paralled with a clobber of the condition codes + the jump should always come first */ + if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0) + body = XVECEXP (body, 0, 0); + +#if 0 + /* If this is a conditional return then we don't want to know */ + if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC + && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE + && (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN + || GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)) + return; +#endif + if (reverse || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE)) @@ -1235,11 +2305,17 @@ final_prescan_insn (insn, opvec, noperands) int insns_skipped = 0, fail = FALSE, succeed = FALSE; /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */ int then_not_else = TRUE; - rtx this_insn = start_insn, label; + rtx this_insn = start_insn, label = 0; + if (get_attr_conds (insn) == CONDS_JUMP_CLOB) + jump_clobbers = 1; + /* Register the insn jumped to. */ if (reverse) - label = XEXP (SET_SRC (body), 0); + { + if (!seeking_return) + label = XEXP (SET_SRC (body), 0); + } else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF) label = XEXP (XEXP (SET_SRC (body), 1), 0); else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF) @@ -1247,6 +2323,13 @@ final_prescan_insn (insn, opvec, noperands) label = XEXP (XEXP (SET_SRC (body), 2), 0); then_not_else = FALSE; } + else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN) + seeking_return = 1; + else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN) + { + seeking_return = 1; + then_not_else = FALSE; + } else abort (); @@ -1272,33 +2355,54 @@ final_prescan_insn (insn, opvec, noperands) control falls in from somewhere else. */ if (this_insn == label) { - arm_ccfsm_state = 1; + if (jump_clobbers) + { + arm_ccfsm_state = 2; + this_insn = next_nonnote_insn (this_insn); + } + else + arm_ccfsm_state = 1; succeed = TRUE; } else fail = TRUE; break; - case BARRIER: /* XXX Is this case necessary? */ + case BARRIER: /* Succeed if the following insn is the target label. - Otherwise fail. */ + Otherwise fail. + If return insns are used then the last insn in a function + will be a barrier. */ this_insn = next_nonnote_insn (this_insn); - if (this_insn == label) + if (this_insn && this_insn == label) { - arm_ccfsm_state = 1; + if (jump_clobbers) + { + arm_ccfsm_state = 2; + this_insn = next_nonnote_insn (this_insn); + } + else + arm_ccfsm_state = 1; succeed = TRUE; } else fail = TRUE; break; + case CALL_INSN: + /* The arm 6xx uses full 32 bit addresses so the cc is not + preserved over calls */ + if (TARGET_6) + fail = TRUE; + break; case JUMP_INSN: /* If this is an unconditional branch to the same label, succeed. If it is to another label, do nothing. If it is conditional, fail. */ /* XXX Probably, the test for the SET and the PC are unnecessary. */ - if (GET_CODE (scanbody) == SET && GET_CODE (SET_DEST (scanbody)) == PC) + if (GET_CODE (scanbody) == SET + && GET_CODE (SET_DEST (scanbody)) == PC) { if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF && XEXP (SET_SRC (scanbody), 0) == label && !reverse) @@ -1309,11 +2413,31 @@ final_prescan_insn (insn, opvec, noperands) else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE) fail = TRUE; } + else if (GET_CODE (scanbody) == RETURN + && seeking_return) + { + arm_ccfsm_state = 2; + succeed = TRUE; + } + else if (GET_CODE (scanbody) == PARALLEL) + { + switch (get_attr_conds (this_insn)) + { + case CONDS_NOCOND: + break; + default: + fail = TRUE; + break; + } + } break; case INSN: - /* Instructions affecting the condition codes make it fail. */ - if (sets_cc0_p (scanbody)) + /* Instructions using or affecting the condition codes make it + fail. */ + if ((GET_CODE (scanbody) == SET + || GET_CODE (scanbody) == PARALLEL) + && get_attr_conds (this_insn) != CONDS_NOCOND) fail = TRUE; break; @@ -1323,20 +2447,57 @@ final_prescan_insn (insn, opvec, noperands) } if (succeed) { - if (arm_ccfsm_state == 1 || reverse) + if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse)) arm_target_label = CODE_LABEL_NUMBER (label); - else if (arm_ccfsm_state == 2) - arm_target_insn = this_insn; + else if (seeking_return || arm_ccfsm_state == 2) + { + while (this_insn && GET_CODE (PATTERN (this_insn)) == USE) + { + this_insn = next_nonnote_insn (this_insn); + if (this_insn && (GET_CODE (this_insn) == BARRIER + || GET_CODE (this_insn) == CODE_LABEL)) + abort (); + } + if (!this_insn) + { + /* Oh, dear! we ran off the end.. give up */ + recog (PATTERN (insn), insn, NULL_PTR); + arm_ccfsm_state = 0; + return; + } + arm_target_insn = this_insn; + } else abort (); + if (jump_clobbers) + { + if (reverse) + abort (); + arm_current_cc = + get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body), + 0), 0), 1)); + if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND) + arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc); + if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE) + arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc); + } + else + { + /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from + what it was. */ + if (!reverse) + arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body), + 0)); + } - /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from what - it was. */ - if (!reverse) - arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body), 0)); if (reverse || then_not_else) arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc); } + /* restore recog_operand (getting the attributes of other insns can + destroy this array, but final.c assumes that it remains intact + accross this call; since the insn has been recognized already we + call recog direct). */ + recog (PATTERN (insn), insn, NULL_PTR); } } /* final_prescan_insn */ diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h index 3cb6a5286f3..f51b76fe932 100644 --- a/gcc/config/arm/arm.h +++ b/gcc/config/arm/arm.h @@ -2,7 +2,8 @@ Copyright (C) 1991, 1993 Free Software Foundation, Inc. Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl) and Martin Simmons (@harleqn.co.uk). - + More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk) + This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify @@ -27,30 +28,45 @@ extern void output_prologue (); extern void output_epilogue (); extern char *arm_output_asm_insn (); extern char *arm_output_llc (); +extern char *arithmetic_instr (); extern char *output_add_immediate (); extern char *output_call (); +extern char *output_call_mem (); extern char *output_move_double (); extern char *output_mov_double_fpu_from_arm (); extern char *output_mov_double_arm_from_fpu (); extern char *output_mov_immediate (); extern char *output_multi_immediate (); extern char *output_shifted_move (); +extern char *output_shift_compare (); extern char *output_arithmetic_with_immediate_multiply (); +extern char *output_arithmetic_with_shift (); +extern char *output_return_instruction (); +extern char *output_load_symbol (); +extern char *fp_immediate_constant (); +extern struct rtx_def *gen_compare_reg (); +extern struct rtx_def *arm_gen_store_multiple (); +extern struct rtx_def *arm_gen_load_multiple (); + +extern char *arm_condition_codes[]; + +/* This is needed by the tail-calling peepholes */ +extern int frame_pointer_needed; + -/* Translation to find startup files. On RISCiX boxes, gcrt0.o is in - /usr/lib. */ -#define STARTFILE_SPEC \ - "%{pg:/usr/lib/gcrt0.o%s}%{!pg:%{p:mcrt0.o%s}%{!p:crt0.o%s}}" +#ifndef CPP_PREDEFINES +#define CPP_PREDEFINES "-Darm -Acpu(arm) -Amachine(arm)" +#endif -#ifdef riscos -#define CPP_PREDEFINES "-Darm -Driscos -Acpu(arm) -Amachine(arm)" -#else -#define CPP_PREDEFINES "-Darm -Driscix -Dunix -Asystem(unix) -Acpu(arm) -Amachine(arm)" +#ifndef CPP_SPEC +#define CPP_SPEC "%{m6:-D__arm6__}" #endif /* Run-time Target Specification. */ +#ifndef TARGET_VERSION #define TARGET_VERSION \ - fputs (" (ARM/RISCiX)", stderr); + fputs (" (ARM/generic)", stderr); +#endif /* Run-time compilation parameters selecting different hardware subsets. On the ARM, misuse it in a different way. */ @@ -70,14 +86,50 @@ extern int target_flags; case instruction scheduling becomes very uninteresting. */ #define TARGET_FPE (target_flags & 4) +/* Nonzero if destined for an ARM6xx. Takes out bits that assume restoration + of condition flags when returning from a branch & link (ie. a function) */ +#define TARGET_6 (target_flags & 8) + +/* ARM_EXTRA_TARGET_SWITCHES is used in riscix.h to define some options which + are passed to the preprocessor and the assembler post-processor. They + aren't needed in the main pass of the compiler, but if we don't define + them in target switches cc1 complains about them. For the sake of + argument lets allocate bit 31 of target flags for such options. */ + +#ifndef ARM_EXTRA_TARGET_SWITCHES +#define ARM_EXTRA_TARGET_SWITCHES +#endif + #define TARGET_SWITCHES \ { \ {"apcs", 1}, \ {"poke-function-name", 2}, \ {"fpe", 4}, \ + {"6", 8}, \ + {"2", -8}, \ + {"3", -8}, \ + ARM_EXTRA_TARGET_SWITCHES \ {"", TARGET_DEFAULT } \ } +/* Which processor we are running on. Currently this is only used to + get the condition code clobbering attribute right when we are running on + an arm 6 */ + +enum processor_type +{ + PROCESSOR_ARM2, + PROCESSOR_ARM3, + PROCESSOR_ARM6 +}; + +/* Recast the cpu class to be the cpu attribute. */ + +/* Recast the cpu class to be the cpu attribute. */ +#define arm_cpu_attr ((enum attr_cpu)arm_cpu) + +extern enum processor_type arm_cpu; + #define TARGET_DEFAULT 0 #define TARGET_MEM_FUNCTIONS 1 @@ -88,33 +140,77 @@ extern int target_flags; - if floating point is done by emulation, forget about instruction scheduling. Note that this only saves compilation time; it doesn't matter for the final code. */ -#ifdef riscos -#define TARGET_WHEN_DEBUGGING 3 -#else +#ifndef TARGET_WHEN_DEBUGGING #define TARGET_WHEN_DEBUGGING 1 #endif #define OVERRIDE_OPTIONS \ { \ - if (write_symbols != NO_DEBUG) \ - target_flags |= TARGET_WHEN_DEBUGGING; \ - else if (TARGET_POKE_FUNCTION_NAME) \ + if (write_symbols != NO_DEBUG && flag_omit_frame_pointer) \ + warning ("-g without a frame pointer may not give sensible debugging");\ + if (TARGET_POKE_FUNCTION_NAME) \ target_flags |= 1; \ if (TARGET_FPE) \ flag_schedule_insns = flag_schedule_insns_after_reload = 0; \ + arm_cpu = TARGET_6 ? PROCESSOR_ARM6: PROCESSOR_ARM2; \ } /* Omitting the frame pointer is a very good idea on the ARM, especially if not TARGET_APCS, in which case all that pushing on function entry isn't - mandatory anymore. */ + mandatory anymore. + Forcing loads to be explicit also allows cse to work better */ + #define OPTIMIZATION_OPTIONS(OPTIMIZE) \ { \ if (OPTIMIZE) \ - flag_omit_frame_pointer = 1; \ + { \ + flag_force_mem = 1; \ + flag_omit_frame_pointer = 1; \ + } \ } /* Target machine storage Layout. */ + +/* Define this macro if it is advisable to hold scalars in registers + in a wider mode than that declared by the program. In such cases, + the value is constrained to be within the bounds of the declared + type, but kept valid in the wider mode. The signedness of the + extension may differ from that of the type. */ + +/* It is far faster to zero extend chars than to sign extend them */ + +#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \ + if (GET_MODE_CLASS (MODE) == MODE_INT \ + && GET_MODE_SIZE (MODE) < 4) \ + { \ + if (MODE == QImode) \ + UNSIGNEDP = 1; \ + (MODE) = SImode; \ + } + +/* Define for XFmode extended real floating point support. + This will automatically cause REAL_ARITHMETIC to be defined. */ +/* For the ARM: + I think I have added all the code to make this work. Unfortunately, + early releases of the floating point emulation code on RISCiX used a + different format for extended precision numbers. On my RISCiX box there + is a bug somewhere which causes the machine to lock up when running enquire + with long doubles. There is the additional aspect that Norcroft C + treats long doubles as doubles and we ought to remain compatible. + Perhaps someone with an FPA coprocessor and not running RISCiX would like + to try this someday. */ +/* #define LONG_DOUBLE_TYPE_SIZE 96 */ + +/* Disable XFmode patterns in md file */ +#define ENABLE_XF_PATTERNS 0 + +/* Define if you don't want extended real, but do want to use the + software floating point emulator for REAL_ARITHMETIC and + decimal <-> binary conversion. */ +/* See comment above */ +#define REAL_ARITHMETIC + /* Define this if most significant bit is lowest numbered in instructions that operate on numbered bit-fields. */ #define BITS_BIG_ENDIAN 0 @@ -145,11 +241,20 @@ extern int target_flags; #define BIGGEST_ALIGNMENT 32 +/* Make strings word-aligned so strcpy from constants will be faster. */ +#define CONSTANT_ALIGNMENT(EXP, ALIGN) \ + (TREE_CODE (EXP) == STRING_CST \ + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN)) + /* Every structures size must be a multiple of 32 bits. */ #define STRUCTURE_SIZE_BOUNDARY 32 +/* Non-zero if move instructions will actually fail to work + when given unaligned data. */ #define STRICT_ALIGNMENT 1 +#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT + /* Define number of bits in most basic integer type. (If undefined, default is BITS_PER_WORD). */ /* #define INT_TYPE_SIZE */ @@ -177,10 +282,43 @@ extern int target_flags; f4-f7 S floating point variable + cc This is NOT a real register, but is used internally + to represent things that use or set the condition + codes. + sfp This isn't either. It is used during rtl generation + since the offset between the frame pointer and the + auto's isn't known until after register allocation. + afp Nor this, we only need this because of non-local + goto. Without it fp appears to be used and the + elimination code won't get rid of sfp. It tracks + fp exactly at all times. + *: See CONDITIONAL_REGISTER_USAGE */ -/* The number of hard registers is 16 ARM + 8 FPU. */ -#define FIRST_PSEUDO_REGISTER 24 +/* The stack backtrace structure is as follows: + fp points to here: | save code pointer | [fp] + | return link value | [fp, #-4] + | return sp value | [fp, #-8] + | return fp value | [fp, #-12] + [| saved r10 value |] + [| saved r9 value |] + [| saved r8 value |] + [| saved r7 value |] + [| saved r6 value |] + [| saved r5 value |] + [| saved r4 value |] + [| saved r3 value |] + [| saved r2 value |] + [| saved r1 value |] + [| saved r0 value |] + [| saved f7 value |] three words + [| saved f6 value |] three words + [| saved f5 value |] three words + [| saved f4 value |] three words + r0-r3 are not normally saved in a C function. */ + +/* The number of hard registers is 16 ARM + 8 FPU + 1 CC + 1 SFP. */ +#define FIRST_PSEUDO_REGISTER 27 /* 1 for registers that have pervasive standard uses and are not available for the register allocator. */ @@ -188,7 +326,8 @@ extern int target_flags; { \ 0,0,0,0,0,0,0,0, \ 0,0,1,1,0,1,0,1, \ - 0,0,0,0,0,0,0,0 \ + 0,0,0,0,0,0,0,0, \ + 1,1,1 \ } /* 1 for registers not available across function calls. @@ -196,12 +335,15 @@ extern int target_flags; registers that can be used without being saved. The latter must include the registers where values are returned and the register where structure-value addresses are passed. - Aside from that, you can include as many other registers as you like. */ + Aside from that, you can include as many other registers as you like. + The CC is not preserved over function calls on the ARM 6, so it is + easier to assume this for all. SFP is preserved, since FP is. */ #define CALL_USED_REGISTERS \ { \ 1,1,1,1,0,0,0,0, \ 0,0,1,1,1,1,1,1, \ - 1,1,1,1,0,0,0,0 \ + 1,1,1,1,0,0,0,0, \ + 1,1,1 \ } /* If doing stupid life analysis, avoid a bug causing a return value r0 to be @@ -221,15 +363,19 @@ extern int target_flags; On the ARM regs are UNITS_PER_WORD bits wide; FPU regs can hold any FP mode. */ -#define HARD_REGNO_NREGS(REGNO, MODE) \ - ((REGNO) >= 16 ? 1 \ +#define HARD_REGNO_NREGS(REGNO, MODE) \ + (((REGNO) >= 16 && REGNO != FRAME_POINTER_REGNUM \ + && (REGNO) != ARG_POINTER_REGNUM) ? 1 \ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)) /* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. This is TRUE for ARM regs since they can hold anything, and TRUE for FPU regs holding FP. */ -#define HARD_REGNO_MODE_OK(REGNO, MODE) \ - ((REGNO) < 16 || GET_MODE_CLASS (MODE) == MODE_FLOAT) +#define HARD_REGNO_MODE_OK(REGNO, MODE) \ + ((GET_MODE_CLASS (MODE) == MODE_CC) ? (REGNO == CC_REGNUM) : \ + ((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \ + || REGNO == ARG_POINTER_REGNUM \ + || GET_MODE_CLASS (MODE) == MODE_FLOAT)) /* Value is 1 if it is a good idea to tie two pseudo registers when one has mode MODE1 and one has mode MODE2. @@ -249,15 +395,24 @@ extern int target_flags; #define STACK_POINTER_REGNUM 13 /* Base register for access to local variables of the function. */ -#define FRAME_POINTER_REGNUM 9 +#define FRAME_POINTER_REGNUM 25 + +/* Define this to be where the real frame pointer is if it is not possible to + work out the offset between the frame pointer and the automatic variables + until after register allocation has taken place. FRAME_POINTER_REGNUM + should point to a special register that we will make sure is eliminated. */ +#define HARD_FRAME_POINTER_REGNUM 11 /* Value should be nonzero if functions must have frame pointers. Zero means the frame pointer need not be set up (and parms may be accessed - via the stack pointer) in functions that seem suitable. */ -#define FRAME_POINTER_REQUIRED 0 + via the stack pointer) in functions that seem suitable. + If we have to have a frame pointer we might as well make use of it. + APCS says that the frame pointer does not need to be pushed in leaf + functions. */ +#define FRAME_POINTER_REQUIRED (TARGET_APCS && !leaf_function_p ()) /* Base register for access to arguments of the function. */ -#define ARG_POINTER_REGNUM 11 +#define ARG_POINTER_REGNUM 26 /* The native (Norcroft) Pascal compiler for the ARM passes the static chain as an invisible last argument (possible since varargs don't exist in @@ -268,14 +423,22 @@ extern int target_flags; is passed to a function. */ #define STRUCT_VALUE_REGNUM 0 +/* Internal, so that we don't need to refer to a raw number */ +#define CC_REGNUM 24 + /* The order in which register should be allocated. It is good to use ip - since no saving is required (though calls clobber it). It is quite good to - use lr since other calls may clobber it anyway. */ + since no saving is required (though calls clobber it) and it never contains + function parameters. It is quite good to use lr since other calls may + clobber it anyway. Allocate r0 through r3 in reverse order since r3 is + least likely to contain a function parameter; in addition results are + returned in r0. + */ #define REG_ALLOC_ORDER \ { \ - 0, 1, 2, 3, 12, 14, 4, 5, \ + 3, 2, 1, 0, 12, 14, 4, 5, \ 6, 7, 8, 10, 9, 11, 13, 15, \ - 16, 17, 18, 19, 20, 21, 22, 23 \ + 16, 17, 18, 19, 20, 21, 22, 23, \ + 24, 25 \ } /* Register and constant classes. */ @@ -306,18 +469,21 @@ enum reg_class of length N_REG_CLASSES. */ #define REG_CLASS_CONTENTS \ { \ - 0x000000, /* NO_REGS */ \ - 0xFF0000, /* FPU_REGS */ \ - 0x00FFFF, /* GENERAL_REGS */ \ - 0xFFFFFF /* ALL_REGS */ \ + 0x0000000, /* NO_REGS */ \ + 0x0FF0000, /* FPU_REGS */ \ + 0x200FFFF, /* GENERAL_REGS */ \ + 0x2FFFFFF /* ALL_REGS */ \ } /* The same information, inverted: Return the class number of the smallest class containing reg number REGNO. This could be a conditional expression or could index an array. */ -#define REGNO_REG_CLASS(REGNO) \ - ((REGNO) < 16 ? GENERAL_REGS : FPU_REGS) +#define REGNO_REG_CLASS(REGNO) \ + (((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \ + || REGNO == ARG_POINTER_REGNUM) \ + ? GENERAL_REGS : (REGNO) == CC_REGNUM \ + ? NO_REGS : FPU_REGS) /* The class value for index registers, and the one for base regs. */ #define INDEX_REG_CLASS GENERAL_REGS @@ -334,14 +500,30 @@ enum reg_class C is the letter, and VALUE is a constant value. Return 1 if VALUE is in the range specified by C. I: immediate arithmetic operand (i.e. 8 bits shifted as required). - J: valid indexing constants. */ -#define CONST_OK_FOR_LETTER_P(VALUE, C) \ - ((C) == 'I' ? const_ok_for_arm (VALUE) : \ - (C) == 'J' ? (abs (VALUE) < 4096) : 0) - -/* Constant letter 'G' for the FPU immediate constants. */ -#define CONST_DOUBLE_OK_FOR_LETTER_P(X,C) \ - ((C) == 'G' ? const_double_rtx_ok_for_fpu (X) : 0) + J: valid indexing constants. + K: as I but also (not (value)) ok. + L: as I but also (neg (value)) ok.*/ +#define CONST_OK_FOR_LETTER_P(VALUE, C) \ + ((C) == 'I' ? const_ok_for_arm (VALUE) : \ + (C) == 'J' ? ((VALUE) < 4096 && (VALUE) > -4096) : \ + (C) == 'K' ? (const_ok_for_arm (VALUE) || const_ok_for_arm (~(VALUE))) : \ + (C) == 'L' ? (const_ok_for_arm (VALUE) || const_ok_for_arm (-(VALUE))) : 0) + +/* For the ARM, `Q' means that this is a memory operand that is just + an offset from a register. + `S' means any symbol that has the SYMBOL_REF_FLAG set or a CONSTANT_POOL + address. This means that the symbol is in the text segment and can be + accessed without using a load. */ + +#define EXTRA_CONSTRAINT(OP, C) \ + ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) == REG \ + : (C) == 'S' ? CONSTANT_ADDRESS_P (OP) : 0) + +/* Constant letter 'G' for the FPU immediate constants. + 'H' means the same constant negated. */ +#define CONST_DOUBLE_OK_FOR_LETTER_P(X,C) \ + ((C) == 'G' ? const_double_rtx_ok_for_fpu (X) \ + : (C) == 'H' ? neg_const_double_rtx_ok_for_fpu (X) : 0) /* Given an rtx X being reloaded into a reg required to be in class CLASS, return the class of reg to actually use. @@ -349,6 +531,14 @@ enum reg_class in some cases it is preferable to use a more restrictive class. */ #define PREFERRED_RELOAD_CLASS(X, CLASS) (CLASS) +/* Return the register class of a scratch register needed to copy IN into + or out of a register in CLASS in MODE. If it can be done directly, + NO_REGS is returned. */ +#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,X) \ + (((MODE) == DFmode && (CLASS) == GENERAL_REGS \ + && true_regnum (X) == -1) ? GENERAL_REGS \ + : NO_REGS) + /* Return the maximum number of consecutive registers needed to represent mode MODE in a register of class CLASS. ARM regs are UNITS_PER_WORD bits while FPU regs can hold any FP mode */ @@ -356,11 +546,11 @@ enum reg_class ((CLASS) == FPU_REGS ? 1 \ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)) -/* Moves between FPU_REGS and GENERAL_REGS are two insns. */ +/* Moves between FPU_REGS and GENERAL_REGS are two memory insns. */ #define REGISTER_MOVE_COST(CLASS1, CLASS2) \ ((((CLASS1) == FPU_REGS && (CLASS2) != FPU_REGS) \ || ((CLASS2) == FPU_REGS && (CLASS1) != FPU_REGS)) \ - ? 4 : 2) + ? 20 : 2) /* Stack layout; function entry, exit and calling. */ @@ -462,7 +652,7 @@ enum reg_class For a library call, FNTYPE is 0. On the ARM, the offset starts at 0. */ #define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME) \ - ((CUM) = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE))) ? 4 : 0)) + ((CUM) = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) ? 4 : 0)) /* Update the data in CUM to advance over an argument of mode MODE and data type TYPE. @@ -529,15 +719,91 @@ enum reg_class /* Determine if the epilogue should be output as RTL. You should override this if you define FUNCTION_EXTRA_EPILOGUE. */ -/* #define USE_RETURN_INSN use_return_insn () */ +#define USE_RETURN_INSN use_return_insn () + +/* Definitions for register eliminations. + + This is an array of structures. Each structure initializes one pair + of eliminable registers. The "from" register number is given first, + followed by "to". Eliminations of the same "from" register are listed + in order of preference. + + We have two registers that can be eliminated on the ARM. First, the + arg pointer register can often be eliminated in favor of the stack + pointer register. Secondly, the pseudo frame pointer register can always + be eliminated; it is replaced with either the stack or the real frame + pointer. */ + +#define ELIMINABLE_REGS \ +{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ + {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ + {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ + {FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} + +/* Given FROM and TO register numbers, say whether this elimination is allowed. + Frame pointer elimination is automatically handled. + + All eliminations are permissible. Note that ARG_POINTER_REGNUM and + HARD_FRAME_POINTER_REGNUM are infact the same thing. If we need a frame + pointer, we must eliminate FRAME_POINTER_REGNUM into + HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */ +#define CAN_ELIMINATE(FROM, TO) \ + (((TO) == STACK_POINTER_REGNUM && frame_pointer_needed) ? 0 : 1) + +/* Define the offset between two registers, one to be eliminated, and the other + its replacement, at the start of a routine. */ +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ +{ \ + if ((FROM) == ARG_POINTER_REGNUM && (TO) == HARD_FRAME_POINTER_REGNUM)\ + (OFFSET) = 0; \ + else if ((FROM) == FRAME_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM)\ + (OFFSET) = (get_frame_size () + 3 & ~3); \ + else \ + { \ + int regno; \ + int offset = 12; \ + \ + for (regno = 4; regno <= 10; regno++) \ + if (regs_ever_live[regno]) \ + offset += 4; \ + for (regno = 20; regno <=23; regno++) \ + if (regs_ever_live[regno]) \ + offset += 12; \ + if ((FROM) == FRAME_POINTER_REGNUM) \ + (OFFSET) = -offset; \ + else \ + { \ + if (! regs_ever_live[HARD_FRAME_POINTER_REGNUM]) \ + offset -= 16; \ + if (regs_ever_live[14]) \ + offset += 4; \ + (OFFSET) = (get_frame_size () + 3 & ~3) + offset; \ + } \ + } \ +} +#if 0 /* Store in the variable DEPTH the initial difference between the frame pointer reg contents and the stack pointer reg contents, as of the start of the function body. This depends on the layout of the fixed parts of the stack frame and on how registers are saved. */ +#define INITIAL_FRAME_POINTER_OFFSET(DEPTH) \ +{ \ + int regno; \ + int offset = 12; \ + \ + for (regno = 0; regno < FRAME_POINTER_REGNUM; regno++) \ + if (regs_ever_live[regno]) \ + offset += 4; \ + for (regno = 20; regno < 24; regno++) \ + if (regs_ever_live[regno]) \ + offset += 12; \ + (DEPTH) = offset + (get_frame_size () + 3 & ~3); \ +} + #define INITIAL_FRAME_POINTER_OFFSET(DEPTH) \ (DEPTH) = (get_frame_size () + 3) & ~3; - +#endif /* Output assembler code for a block containing the constant parts of a trampoline, leaving space for the variable parts. @@ -600,15 +866,19 @@ enum reg_class has been allocated, which happens in local-alloc.c. On the ARM, don't allow the pc to be used. */ -#define REGNO_OK_FOR_BASE_P(REGNO) \ - ((REGNO) < 15 || (unsigned) reg_renumber[(REGNO)] < 15) -#define REGNO_OK_FOR_INDEX_P(REGNO) \ +#define REGNO_OK_FOR_BASE_P(REGNO) \ + ((REGNO) < 15 || (REGNO) == FRAME_POINTER_REGNUM \ + || (REGNO) == ARG_POINTER_REGNUM \ + || (unsigned) reg_renumber[(REGNO)] < 15 \ + || (unsigned) reg_renumber[(REGNO)] == FRAME_POINTER_REGNUM \ + || (unsigned) reg_renumber[(REGNO)] == ARG_POINTER_REGNUM) +#define REGNO_OK_FOR_INDEX_P(REGNO) \ REGNO_OK_FOR_BASE_P(REGNO) /* Maximum number of registers that can appear in a valid memory address. - The addressing mode [ra,rb, rc] uses the greatest number of - registers. */ -#define MAX_REGS_PER_ADDRESS 3 + Shifts in addresses can't be by a register. */ + +#define MAX_REGS_PER_ADDRESS 2 /* Recognize any constant value that is a valid address. */ /* XXX We can address any constant, eventually... */ @@ -620,8 +890,9 @@ enum reg_class || GET_CODE(X) == CONST ) #endif -#define CONSTANT_ADDRESS_P(X) \ - (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X)) +#define CONSTANT_ADDRESS_P(X) \ + (GET_CODE (X) == SYMBOL_REF \ + && (CONSTANT_POOL_ADDRESS_P (X) || SYMBOL_REF_FLAG (X))) /* Nonzero if the constant value X is a legitimate general operand. It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. @@ -632,10 +903,20 @@ enum reg_class #define LEGITIMATE_CONSTANT_P(X) \ (GET_CODE (X) == CONST_INT \ || (GET_CODE (X) == CONST_DOUBLE \ - && const_double_rtx_ok_for_fpu (X))) -#if 0 - || GET_CODE(X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P(X)) -#endif + && (const_double_rtx_ok_for_fpu (X) \ + || neg_const_double_rtx_ok_for_fpu (X))) \ + || CONSTANT_ADDRESS_P (X)) + +/* Symbols in the text segment can be accessed without indirecting via the + constant pool; it may take an extra binary operation, but this is still + faster than indirecting via memory. */ + +#define ENCODE_SECTION_INFO(decl) \ +{ \ + if (TREE_CONSTANT (decl) \ + && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST)) \ + SYMBOL_REF_FLAG (XEXP (TREE_CST_RTL (decl), 0)) = 1; \ +} /* The macros REG_OK_FOR..._P assume that the arg is a REG rtx and check its validity for a certain class. @@ -644,23 +925,36 @@ enum reg_class them unless they have been allocated suitable hard regs. The symbol REG_OK_STRICT causes the latter definition to be used. */ #ifndef REG_OK_STRICT + /* Nonzero if X is a hard reg that can be used as a base reg or if it is a pseudo reg. */ -#define REG_OK_FOR_BASE_P(X) \ - (REGNO (X) < 16 || REGNO (X) >= 24) +#define REG_OK_FOR_BASE_P(X) \ + (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \ + || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM) + /* Nonzero if X is a hard reg that can be used as an index or if it is a pseudo reg. */ #define REG_OK_FOR_INDEX_P(X) \ REG_OK_FOR_BASE_P(X) -#define REG_OK_FOR_PRE_POST_P(X) \ - (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER) + +#define REG_OK_FOR_PRE_POST_P(X) \ + (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \ + || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM) + #else + /* Nonzero if X is a hard reg that can be used as a base reg. */ #define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X)) + /* Nonzero if X is a hard reg that can be used as an index. */ #define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X)) -#define REG_OK_FOR_PRE_POST_P(X) \ - (REGNO (X) < 16 || (unsigned) reg_renumber[REGNO (X)] < 16) + +#define REG_OK_FOR_PRE_POST_P(X) \ + (REGNO (X) < 16 || (unsigned) reg_renumber[REGNO (X)] < 16 \ + || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM \ + || (unsigned) reg_renumber[REGNO (X)] == FRAME_POINTER_REGNUM \ + || (unsigned) reg_renumber[REGNO (X)] == ARG_POINTER_REGNUM) + #endif /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression @@ -678,18 +972,24 @@ enum reg_class /* A C statement (sans semicolon) to jump to LABEL for legitimate index RTXs used by the macro GO_IF_LEGITIMATE_ADDRESS. Floating point indices can only be small constants. */ -#define GO_IF_LEGITIMATE_INDEX(MODE, BASE_REGNO, INDEX, LABEL) \ +#define GO_IF_LEGITIMATE_INDEX(MODE, BASE_REGNO, INDEX, LABEL) \ do \ { \ int range; \ + int code = GET_CODE (INDEX); \ \ if (GET_MODE_CLASS (MODE) == MODE_FLOAT) \ - range = 1024; \ + { \ + if (code == CONST_INT && INTVAL (INDEX) < 1024 \ + && INTVAL (INDEX) > -1024 \ + && (INTVAL (INDEX) & 3) == 0) \ + goto LABEL; \ + } \ else \ { \ - if (INDEX_REGISTER_RTX_P (INDEX)) \ + if (INDEX_REGISTER_RTX_P (INDEX) && GET_MODE_SIZE (MODE) <= 4) \ goto LABEL; \ - if (GET_MODE_SIZE (MODE) <= 4 && GET_CODE (INDEX) == MULT) \ + if (GET_MODE_SIZE (MODE) <= 4 && code == MULT) \ { \ rtx xiop0 = XEXP (INDEX, 0); \ rtx xiop1 = XEXP (INDEX, 1); \ @@ -700,20 +1000,29 @@ do \ && power_of_two_operand (xiop0, SImode)) \ goto LABEL; \ } \ - range = 4096; \ + if (GET_MODE_SIZE (MODE) <= 4 \ + && (code == LSHIFTRT || code == ASHIFTRT || code == LSHIFT \ + || code == ASHIFT || code == ROTATERT)) \ + { \ + rtx op = XEXP (INDEX, 1); \ + if (INDEX_REGISTER_RTX_P (XEXP (INDEX, 0)) \ + && GET_CODE (op) == CONST_INT && INTVAL (op) > 0 \ + && INTVAL (op) <= 31) \ + goto LABEL; \ + } \ + range = (MODE) == HImode ? 4095 : 4096; \ + if (code == CONST_INT && INTVAL (INDEX) < range \ + && INTVAL (INDEX) > -range) \ + goto LABEL; \ } \ - \ - if (GET_CODE (INDEX) == CONST_INT && INTVAL (INDEX) < range \ - && INTVAL (INDEX) > -range) \ - goto LABEL; \ } while (0) /* Jump to LABEL if X is a valid address RTX. This must also take REG_OK_STRICT into account when deciding about valid registers, but it uses the above macros so we are in luck. Allow REG, REG+REG, REG+INDEX, INDEX+REG, REG-INDEX, and non floating SYMBOL_REF to the constant pool. - Allow REG-only and AUTINC-REG if handling TImode. Other symbol refs must - be forced though a static cell to ensure addressability. */ + Allow REG-only and AUTINC-REG if handling TImode or HImode. Other symbol + refs must be forced though a static cell to ensure addressability. */ #define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \ { \ if (BASE_REGISTER_RTX_P (X)) \ @@ -834,10 +1143,9 @@ do \ /* This is the kind of divide that is easiest to do in the general case. */ #define EASY_DIV_EXPR TRUNC_DIV_EXPR -/* 'char' is signed by default on RISCiX, unsigned on RISCOS. */ -#ifdef riscos -#define DEFAULT_SIGNED_CHAR 0 -#else +/* signed 'char' is most compatible, but RISC OS wants it unsigned. + unsigned is probably best, but may break some code. */ +#ifndef DEFAULT_SIGNED_CHAR #define DEFAULT_SIGNED_CHAR 1 #endif @@ -848,6 +1156,17 @@ do \ in one reasonably fast instruction. */ #define MOVE_MAX 4 +/* Define if operations between registers always perform the operation + on the full register even if a narrower mode is specified. */ +#define WORD_REGISTER_OPERATIONS + +/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD + will either zero-extend or sign-extend. The value of this macro should + be the code that says which one of the two operations is implicitly + done, NIL if none. */ +#define LOAD_EXTEND_OP(MODE) \ + ((MODE) == QImode ? ZERO_EXTEND : NIL) + /* Define this if zero-extension is slow (more than one real instruction). On the ARM, it is more than one instruction only if not fetching from memory. */ @@ -861,10 +1180,11 @@ do \ that the native compiler puts too large (> 32) immediate shift counts into a register and shifts by the register, letting the ARM decide what to do instead of doing that itself. */ -#define SHIFT_COUNT_TRUNCATED 1 - -/* We have the vprintf function. */ -#define HAVE_VPRINTF 1 +/* This is all wrong. Defining SHIFT_COUNT_TRUNCATED tells combine that + code like (X << (Y % 32)) for register X, Y is equivalent to (X << Y). + On the arm, Y in a register is used modulo 256 for the shift. Only for + rotates is modulo 32 used. */ +/* #define SHIFT_COUNT_TRUNCATED 1 */ /* XX This is not true, is it? */ /* All integers have the same format so truncation is easy. */ @@ -886,77 +1206,194 @@ do \ /* The relative costs of various types of constants. Note that cse.c defines REG = 1, SUBREG = 2, any node = (2 + sum of subnodes). */ -#define CONST_COSTS(RTX, CODE, OUTER_CODE) \ - case CONST_INT: \ - if (const_ok_for_arm (INTVAL (RTX))) \ - return (2); \ - else \ - return (5); \ - \ - case CONST: \ - case LABEL_REF: \ - case SYMBOL_REF: \ - return (6); \ - \ - case CONST_DOUBLE: \ - if (const_double_rtx_ok_for_fpu (RTX)) \ - return(2); \ - else \ - return(7); +#define CONST_COSTS(RTX, CODE, OUTER_CODE) \ + case CONST_INT: \ + if (const_ok_for_arm (INTVAL (RTX))) \ + return (OUTER_CODE) == SET ? 2 : -1; \ + else if (OUTER_CODE == AND \ + && const_ok_for_arm (~INTVAL (RTX))) \ + return -1; \ + else if ((OUTER_CODE == COMPARE \ + || OUTER_CODE == PLUS || OUTER_CODE == MINUS) \ + && const_ok_for_arm (-INTVAL (RTX))) \ + return -1; \ + else \ + return 5; \ + case CONST: \ + case LABEL_REF: \ + case SYMBOL_REF: \ + return 6; \ + case CONST_DOUBLE: \ + if (const_double_rtx_ok_for_fpu (RTX)) \ + return (OUTER_CODE) == SET ? 2 : -1; \ + else if (((OUTER_CODE) == COMPARE || (OUTER_CODE) == PLUS) \ + && neg_const_double_rtx_ok_for_fpu (RTX)) \ + return -1; \ + return(7); + +#define RTX_COSTS(X,CODE,OUTER_CODE) \ + case MEM: \ + { \ + int num_words = (GET_MODE_SIZE (GET_MODE (X)) > UNITS_PER_WORD) ? 2 : 1;\ + return (COSTS_N_INSNS (10*num_words)); \ + } \ + case MULT: \ + if (GET_CODE (XEXP (X, 1)) == CONST_INT \ + && exact_log2 (INTVAL (XEXP (X, 1))) >= 0) \ + return rtx_cost (XEXP (X, 0), GET_CODE (X))+1; \ + return COSTS_N_INSNS (9); \ + case LSHIFT: \ + case ASHIFT: \ + case LSHIFTRT: \ + case ASHIFTRT: \ + if (GET_CODE (XEXP (X, 1)) == CONST_INT) \ + return rtx_cost (XEXP (X, 0), GET_CODE (X))+1; \ + break; \ + case MINUS: \ + { \ + enum rtx_code code = GET_CODE (XEXP (X, 1)); \ + if (code == MULT) \ + { \ + if (GET_CODE (XEXP (XEXP (X, 1), 1)) == CONST_INT \ + && exact_log2 (INTVAL (XEXP (XEXP (X, 0), 1))) >= 0) \ + return COSTS_N_INSNS (1); \ + break; \ + } \ + else if (code == ASHIFT || code == LSHIFT || code == ASHIFTRT \ + || code == LSHIFTRT) \ + return COSTS_N_INSNS (1); \ + } /* fall through */ \ + case PLUS: \ + case IOR: \ + case XOR: \ + case AND: \ + { \ + enum rtx_code code = GET_CODE (XEXP (X, 0)); \ + if (code == MULT) \ + { \ + if (GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT \ + && exact_log2 (INTVAL (XEXP (XEXP (X, 0), 1))) >= 0) \ + return COSTS_N_INSNS (1); \ + if (GET_CODE (X) == PLUS) \ + return COSTS_N_INSNS (12); \ + break; \ + } \ + else if (code == ASHIFT || code == LSHIFT || code == ASHIFTRT \ + || code == LSHIFTRT) \ + return COSTS_N_INSNS (1); \ + break; \ + } \ + case NOT: \ + return rtx_cost (XEXP (X, 0), GET_CODE (XEXP (X, 0))); \ + case IF_THEN_ELSE: \ + { \ + if (GET_CODE (XEXP(X,1)) == PC || GET_CODE (XEXP(X,2)) == PC) \ + return COSTS_N_INSNS (4); \ + return COSTS_N_INSNS (1); \ + } \ + case SIGN_EXTEND: \ + return COSTS_N_INSNS (2); \ + case ZERO_EXTEND: \ + if (GET_MODE (XEXP (X, 0)) == QImode) \ + { \ + if (GET_CODE (XEXP (X, 0)) == MEM) \ + return COSTS_N_INSNS (10); \ + return COSTS_N_INSNS (1); \ + } \ + break; \ + case COMPARE: \ + if (GET_CODE (XEXP (X, 1)) == REG) \ + return 4; \ + case SMIN: \ + case SMAX: \ + case UMIN: \ + case UMAX: \ + return COSTS_N_INSNS (3); \ + case ABS: \ + if (GET_MODE (X) == SImode) \ + return COSTS_N_INSNS (2); \ + return COSTS_N_INSNS (1); + +/* Moves to and from memory are quite expensive */ +#define MEMORY_MOVE_COST(MODE) 10 + +/* All address computations that can be done are free */ +#define ADDRESS_COST(x) 2 + +/* Try to generate sequences that don't involve branches, we can then use + conditional instructions */ +#define BRANCH_COST 4 -/* Condition code information. */ - -/* Store in cc_status the expressions - that the condition codes will describe - after execution of an instruction whose pattern is EXP. - Do not alter them if the instruction would not alter the cc's. */ - -/* On the ARM nothing sets the condition code implicitly---apart from DImode - operations excluding moves---but we have to watch for registers in the - condition code value being clobbered. This clobbering includes (alas) - function calls. XXX They could just be considered to clobber regs 0-3 and - 10-15 with extra work. */ -#define NOTICE_UPDATE_CC(EXP, INSN) \ -{ \ - if (GET_MODE (EXP) == DImode \ - && GET_CODE (EXP) == SET \ - && GET_CODE (SET_SRC (EXP)) != REG \ - && GET_CODE (SET_SRC (EXP)) != MEM \ - && GET_CODE (SET_SRC (EXP)) != CONST_INT) \ - CC_STATUS_INIT; \ - else if (GET_CODE (EXP) == SET) \ - { \ - rtx dest = SET_DEST (EXP); \ - if (dest == cc0_rtx) \ - { \ - cc_status.flags = 0; \ - cc_status.value1 = SET_DEST (EXP); \ - cc_status.value2 = SET_SRC (EXP); \ - } \ - if (BASE_REGISTER_RTX_P (dest)) \ - { \ - if (cc_status.value1 \ - && reg_overlap_mentioned_p (dest, cc_status.value1)) \ - cc_status.value1 = 0; \ - if (cc_status.value2 \ - && reg_overlap_mentioned_p (dest, cc_status.value2)) \ - cc_status.value2 = 0; \ - } \ - } \ - else if (GET_CODE (INSN) != JUMP_INSN && GET_CODE (EXP) == PARALLEL) \ - { \ - CC_STATUS_INIT; \ - } \ -} +/* Condition code information. */ +/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE, + return the mode to be used for the comparison. + CCFPEmode should be used with floating inequalites, + CCFPmode should be used with floating equalities. + CC_NOOVmode should be used with SImode integer equalites + CCmode should be used otherwise. */ + +#define EXTRA_CC_MODES CC_NOOVmode, CCFPmode, CCFPEmode + +#define EXTRA_CC_NAMES "CC_NOOV", "CCFP", "CCFPE" + +#define SELECT_CC_MODE(OP,X,Y) \ + (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \ + ? ((OP == EQ || OP == NE) ? CCFPmode : CCFPEmode) \ + : ((GET_MODE (X) == SImode) \ + && ((OP) == EQ || (OP) == NE) \ + && (GET_CODE (X) == PLUS || GET_CODE (X) == MINUS \ + || GET_CODE (X) == AND || GET_CODE (X) == IOR \ + || GET_CODE (X) == XOR || GET_CODE (X) == MULT \ + || GET_CODE (X) == NOT || GET_CODE (X) == NEG \ + || GET_CODE (X) == LSHIFT || GET_CODE (X) == LSHIFTRT \ + || GET_CODE (X) == ASHIFT || GET_CODE (X) == ASHIFTRT \ + || GET_CODE (X) == ROTATERT || GET_CODE (X) == ZERO_EXTRACT) \ + ? CC_NOOVmode \ + : GET_MODE (X) == QImode ? CC_NOOVmode : CCmode)) + +#define STORE_FLAG_VALUE 1 + +/* Define the information needed to generate branch insns. This is + stored from the compare operation. Note that we can't use "rtx" here + since it hasn't been defined! */ + +extern struct rtx_def *arm_compare_op0, *arm_compare_op1; +extern int arm_compare_fp; + +/* Define the codes that are matched by predicates in arm.c */ +#define PREDICATE_CODES \ + {"s_register_operand", {SUBREG, REG}}, \ + {"arm_add_operand", {SUBREG, REG, CONST_INT}}, \ + {"fpu_add_operand", {SUBREG, REG, CONST_DOUBLE}}, \ + {"arm_rhs_operand", {SUBREG, REG, CONST_INT}}, \ + {"fpu_rhs_operand", {SUBREG, REG, CONST_DOUBLE}}, \ + {"arm_not_operand", {SUBREG, REG, CONST_INT}}, \ + {"shiftable_operator", {PLUS, MINUS, AND, IOR, XOR}}, \ + {"minmax_operator", {SMIN, SMAX, UMIN, UMAX}}, \ + {"shift_operator", {ASHIFT, LSHIFT, ASHIFTRT, LSHIFTRT, MULT}}, \ + {"di_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE, MEM}}, \ + {"load_multiple_operation", {PARALLEL}}, \ + {"store_multiple_operation", {PARALLEL}}, \ + {"equality_operator", {EQ, NE}}, \ + {"arm_rhsm_operand", {SUBREG, REG, CONST_INT, MEM}}, \ + {"const_shift_operand", {CONST_INT}}, \ + {"index_operand", {SUBREG, REG, CONST_INT}}, \ + {"cc_register", {REG}}, + /* Assembler output control */ +#ifndef ARM_OS_NAME +#define ARM_OS_NAME "(generic)" +#endif + /* The text to go at the start of the assembler file */ #define ASM_FILE_START(STREAM) \ { \ extern char *version_string; \ - \ - fprintf (STREAM,"@ Generated by gcc %s for ARM/RISCiX\n", version_string); \ + \ + fprintf (STREAM,"@ Generated by gcc %s for ARM/%s\n", version_string, \ + ARM_OS_NAME); \ fprintf (STREAM,"rfp\t.req\tr9\n"); \ fprintf (STREAM,"fp\t.req\tr11\n"); \ fprintf (STREAM,"ip\t.req\tr12\n"); \ @@ -980,18 +1417,35 @@ do \ { \ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \ "r8","rfp", "sl", "fp", "ip", "sp", "lr", "pc", \ - "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7" \ + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \ + "cc", "sfp", "afp" \ } +/* Arm Assembler barfs on dollars */ +#define DOLLARS_IN_IDENTIFIERS 0 + +#define NO_DOLLAR_IN_LABEL + /* DBX register number for a given compiler register number */ #define DBX_REGISTER_NUMBER(REGNO) (REGNO) -/* Generate DBX debugging information. */ +/* Generate DBX debugging information. riscix.h will undefine this because + the native assembler does not support stabs. */ #define DBX_DEBUGGING_INFO 1 /* Acorn dbx moans about continuation chars, so don't use any. */ #define DBX_CONTIN_LENGTH 0 +/* Output a source filename for the debugger. RISCiX dbx insists that the + ``desc'' field is set to compiler version number >= 315 (sic). */ +#define DBX_OUTPUT_MAIN_SOURCE_FILENAME(STREAM,NAME) \ +do { \ + fprintf (STREAM, ".stabs \"%s\",%d,0,315,%s\n", (NAME), N_SO, \ + <ext_label_name[1]); \ + text_section (); \ + ASM_OUTPUT_INTERNAL_LABEL (STREAM, "Ltext", 0); \ +} while (0) + /* Output a label definition. */ #define ASM_OUTPUT_LABEL(STREAM,NAME) \ arm_asm_output_label ((STREAM), (NAME)) @@ -1021,11 +1475,14 @@ do \ char *s = (char *) alloca (11 + strlen (PREFIX)); \ extern int arm_target_label, arm_ccfsm_state; \ \ - if (arm_ccfsm_state == 3 && arm_target_label == (NUM)) \ - arm_ccfsm_state = 0; \ - strcpy (s, "*"); \ - sprintf (&s[strlen (s)], "%s%d", (PREFIX), (NUM)); \ - arm_asm_output_label (STREAM, s); \ + if (arm_ccfsm_state == 3 && arm_target_label == (NUM) \ + && !strcmp (PREFIX, "L")) \ + { \ + arm_ccfsm_state = 0; \ + } \ + strcpy (s, "*"); \ + sprintf (&s[strlen (s)], "%s%d", (PREFIX), (NUM)); \ + arm_asm_output_label (STREAM, s); \ } while (0) /* Nothing special is done about jump tables */ @@ -1057,13 +1514,30 @@ do \ , fprintf (STREAM, "\t.word\tL%d\n", VALUE)) /* Output various types of constants. */ -#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \ - (arm_increase_location (sizeof (double)) \ - , fprintf (STREAM, "\t.double\t%20.20f\n", VALUE)) - -#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \ - (arm_increase_location (sizeof (float)) \ - , fprintf (STREAM, "\t.float\t%20.20f\n", VALUE)) +#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \ +do { long l[3]; \ + arm_increase_location (12); \ + REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \ + if (sizeof (int) == sizeof (long)) \ + fprintf (STREAM, "\t.long 0x%x,0x%x,0x%x\n", l[2], l[1], l[0]); \ + else \ + fprintf (STREAM, "\t.long 0x%lx,0x%lx,0x%lx\n", l[2], l[1], l[0]); \ + } while (0) + + +#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \ +do { char dstr[30]; \ + arm_increase_location (8); \ + REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \ + fprintf (STREAM, "\t.double %s\n", dstr); \ + } while (0) + +#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \ +do { char dstr[30]; \ + arm_increase_location (4); \ + REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \ + fprintf (STREAM, "\t.float %s\n", dstr); \ + } while (0); #define ASM_OUTPUT_INT(STREAM, EXP) \ (fprintf (STREAM, "\t.word\t"), \ @@ -1154,7 +1628,9 @@ do \ small-distance conditional branches and have ASM_OUTPUT_OPCODE make the instructions conditional. Suffixes like s (affect flags) and b (bytewise load/store) need to stay suffixes, so the possible condition code comes - before these suffixes. */ + before these suffixes. %d or %D may appear in the opcode if + it can take a condition; a null rtx will cause no condition to be added, + this is what we expect to happen if arm_ccfsm_state is non-zero. */ #define ASM_OUTPUT_OPCODE(STREAM, PTR) \ { \ extern int arm_ccfsm_state, arm_current_cc; \ @@ -1162,17 +1638,13 @@ do \ int i; \ \ fflush (STREAM); /* XXX for debugging only. */ \ - if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2) \ - { \ - fprintf (STREAM, "@ \t"); \ - arm_ccfsm_state += 2; \ - } \ - else if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4) \ + if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4) \ { \ - for (i = 0; *(PTR) != ' ' && *(PTR) != '\t' && i < 3; i++, (PTR)++) \ + for (i = 0; *(PTR) != ' ' && *(PTR) != '\t' && *(PTR) != '%' && i < 3;\ + i++, (PTR)++) \ putc (*(PTR), STREAM); \ fprintf (STREAM, "%s", arm_condition_codes[arm_current_cc]); \ - for (; *(PTR) != ' ' && *(PTR) != '\t'; (PTR)++) \ + for (; *(PTR) != ' ' && *(PTR) != '\t' && *(PTR) != '%'; (PTR)++) \ putc (*(PTR), STREAM); \ } \ } @@ -1186,44 +1658,56 @@ do \ /* Output an operand of an instruction. If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */ #define PRINT_OPERAND(STREAM, X, CODE) \ -{ \ - if ((CODE) == 'R') \ - fputs (reg_names[REGNO (X) + 1], (STREAM)); \ - else if (GET_CODE (X) == REG) \ - { \ - if ((CODE) != 'M') \ - fputs (reg_names[REGNO (X)], (STREAM)); \ - else \ - fprintf ((STREAM), "{%s-%s}", \ - reg_names[REGNO (X)], \ - reg_names[REGNO (X) - 1 \ - + ((GET_MODE_SIZE (GET_MODE (X)) \ - + GET_MODE_SIZE (SImode) - 1) \ - / GET_MODE_SIZE (SImode))]); \ - } \ - else if (GET_CODE (X) == MEM) \ - { \ - extern int output_memory_reference_mode; \ - output_memory_reference_mode = GET_MODE (X); \ - output_address (XEXP (X, 0)); \ - } \ - else if (GET_CODE(X) == CONST_DOUBLE) \ - { \ - union real_extract u; \ - u.i[0] = CONST_DOUBLE_LOW (X); \ - u.i[1] = CONST_DOUBLE_HIGH (X); \ - fprintf(STREAM,"#%20.20f",u.d); \ - } \ - else if (GET_CODE (X) == NEG) \ - { \ - fputc ('-', (STREAM)); \ - output_operand ((X), 0); \ - } \ - else \ - { \ - fputc('#', STREAM); \ - output_addr_const(STREAM, X); \ - } \ +{ \ + if ((CODE) == 'd') \ + { \ + if (X) \ + fputs (arm_condition_codes[get_arm_condition_code (X)], \ + (STREAM)); \ + } \ + else if ((CODE) == 'D') \ + { \ + if (X) \ + fputs (arm_condition_codes[get_arm_condition_code (X) ^ 1], \ + (STREAM)); \ + } \ + else if ((CODE) == 'R') \ + fputs (reg_names[REGNO (X) + 1], (STREAM)); \ + else if (GET_CODE (X) == REG) \ + { \ + if ((CODE) != 'M') \ + fputs (reg_names[REGNO (X)], (STREAM)); \ + else \ + fprintf ((STREAM), "{%s-%s}", \ + reg_names[REGNO (X)], \ + reg_names[REGNO (X) - 1 \ + + ((GET_MODE_SIZE (GET_MODE (X)) \ + + GET_MODE_SIZE (SImode) - 1) \ + / GET_MODE_SIZE (SImode))]); \ + } \ + else if (GET_CODE (X) == MEM) \ + { \ + extern int output_memory_reference_mode; \ + output_memory_reference_mode = GET_MODE (X); \ + output_address (XEXP (X, 0)); \ + } \ + else if (GET_CODE(X) == CONST_DOUBLE) \ + { \ + union real_extract u; \ + u.i[0] = CONST_DOUBLE_LOW (X); \ + u.i[1] = CONST_DOUBLE_HIGH (X); \ + fprintf(STREAM,"#%s", fp_immediate_constant(X)); \ + } \ + else if (GET_CODE (X) == NEG) \ + { \ + fputc ('-', (STREAM)); \ + output_operand ((X), 0); \ + } \ + else \ + { \ + fputc('#', STREAM); \ + output_addr_const(STREAM, X); \ + } \ } /* Output the address of an operand. */ @@ -1275,10 +1759,25 @@ do \ } \ else \ abort(); \ - fprintf (STREAM, "[%s, %s%s, asl#%d]", base_reg_name, \ + fprintf (STREAM, "[%s, %s%s, asl #%d]", base_reg_name, \ is_minus ? "-" : "", reg_names[REGNO (index)], \ shift); \ break; \ + case ASHIFTRT: \ + case LSHIFTRT: \ + case ASHIFT: \ + case LSHIFT: \ + case ROTATERT: \ + { \ + char *shift_type = shift_instr (GET_CODE (index), \ + &XEXP (index, 1)); \ + shift = INTVAL (XEXP (index, 1)); \ + index = XEXP (index, 0); \ + fprintf (STREAM, "[%s, %s%s, %s #%d]", base_reg_name, \ + is_minus ? "-" : "", reg_names[REGNO (index)], \ + shift_type, shift); \ + break; \ + } \ \ default: \ abort(); \ diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md index 342e61793a5..6a54de4636c 100644 --- a/gcc/config/arm/arm.md +++ b/gcc/config/arm/arm.md @@ -1,7 +1,8 @@ ;;- Machine description Acorn RISC Machine for GNU compiler -;; Copyright (C) 1991 Free Software Foundation, Inc. +;; Copyright (C) 1991, 1993 Free Software Foundation, Inc. ;; Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl) ;; and Martin Simmons (@harleqn.co.uk). +;; More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk) ;; This file is part of GNU CC. @@ -26,66 +27,490 @@ ;; to be able to (correctly) output instructions for loading a value from a ;; function's constant pool, since different instructions are needed when the ;; constant pool is more than 4095 bytes away from the PC. + +;; There are patterns in this file to support XFmode arithmetic. +;; Unfortunately RISCiX doesn't work well with these so they are disabled. +;; (See arm.h) + +;; UNSPEC Usage: +;; 0 `sin' operation: operand 0 is the result, operand 1 the parameter, +;; the mode is MODE_FLOAT +;; 1 `cos' operation: operand 0 is the result, operand 1 the parameter, +;; the mode is MODE_FLOAT +;; Attributes + +; condition codes: this one is used by final_prescan_insn to speed up +; conditionalizing instructions. It saves having to scan the rtl to see if +; it uses or alters the condition codes. + +; USE means that the condition codes are used by the insn in the process of +; outputting code, this means (at present) that we can't use the insn in +; inlined branches + +; SET means that the purpose of the insn is to set the condition codes in a +; well defined manner. + +; CLOB means that the condition codes are altered in an undefined manner, if +; they are altered at all + +; JUMP_CLOB is used when the conditions are not defined if a branch is taken, +; but are if the branch wasn't taken; the effect is to limit the branch +; elimination scanning. + +; NOCOND means that the condition codes are niether altered nor affect the +; output of this insn + +(define_attr "conds" "use,set,clob,jump_clob,nocond" + (const_string "nocond")) + +; CPU attribute is used to determine whether condition codes are clobbered +; by a call insn: on the arm6 they are if in 32-bit addressing mode; on the +; arm2 and arm3 the condition codes are restored by the return. + +(define_attr "cpu" "arm2,arm3,arm6" (const (symbol_ref "arm_cpu_attr"))) + +; LENGTH, all instructions are 4 bytes +(define_attr "length" "" (const_int 1)) + +; An assembler sequence may clobber the condition codes without us knowing +(define_asm_attributes + [(set_attr "conds" "clob") + (set_attr "length" "1")]) + +; TYPE attribute is used to detect floating point instructions which, if +; running on a co-processor can run in parallel with other, basic instructions +; If write-buffer scheduling is enabled then it can also be used in the +; scheduling of writes. + +; Classification of each insn +; normal any data instruction that doesn't hit memory or fp regs +; block blockage insn, this blocks all functional units +; float a floating point arithmetic operation (subject to expansion) +; float_em a floating point arithmetic operation that is normally emulated +; f_load a floating point load from memory +; f_store a floating point store to memory +; f_mem_r a transfer of a floating point register to a real reg via mem +; r_mem_f the reverse of f_mem_r +; f_2_r fast transfer float to arm (no memory needed) +; r_2_f fast transfer arm to float +; call a subroutine call +; load any load from memory +; store1 store 1 word to memory from arm registers +; store2 store 2 words +; store3 store 3 words +; store4 store 4 words +; +(define_attr "type" + "normal,block,float,float_em,f_load,f_store,f_mem_r,r_mem_f,f_2_r,r_2_f,call,load,store1,store2,store3,store4" + (const_string "normal")) + +(define_attr "write_conflict" "no,yes" + (if_then_else (eq_attr "type" + "block,float_em,f_load,f_store,f_mem_r,r_mem_f,call,load") + (const_string "yes") + (const_string "no"))) + +; The write buffer on some of the arm6 processors is hard to model exactly. +; There is room in the buffer for up to two addresses and up to eight words +; of memory, but the two needn't be split evenly. When writing the two +; addresses are fully pipelined. However, a read from memory that is not +; currently in the cache will block until the writes have completed. +; It is normally the case that FCLK and MCLK will be in the ratio 2:1, so +; writes will take 2 FCLK cycles per word, if FCLK and MCLK are asynchronous +; (they aren't allowed to be at present) then there is a startup cost of 1MCLK +; cycle to add as well. + +;; (define_function_unit {name} {num-units} {n-users} {test} +;; {ready-delay} {issue-delay} [{conflict-list}]) +;; This is not well tuned, but I don't have all the details. +(define_function_unit "fpa" 1 1 (eq_attr "type" "float") 5 0) + +(define_function_unit "write_buf" 1 2 (eq_attr "type" "store1") 3 3 + [(eq_attr "write_conflict" "yes")]) +(define_function_unit "write_buf" 1 2 (eq_attr "type" "store2") 5 5 + [(eq_attr "write_conflict" "yes")]) +(define_function_unit "write_buf" 1 2 (eq_attr "type" "store3") 7 7 + [(eq_attr "write_conflict" "yes")]) +(define_function_unit "write_buf" 1 2 (eq_attr "type" "store4") 9 9 + [(eq_attr "write_conflict" "yes")]) +(define_function_unit "write_buf" 1 2 (eq_attr "type" "r_mem_f") 3 3 + [(eq_attr "write_conflict" "yes")]) + +;; Note: For DImode insns, there is normally no reason why operands should +;; not be in the same register, what we don't want is for something being +;; written to partially overlap something that is an input. + ;; Addition insns. (define_insn "adddi3" - [(set (match_operand:DI 0 "di_operand" "=&r") - (plus:DI (match_operand:DI 1 "di_operand" "%r") - (match_operand:DI 2 "di_operand" "r")))] + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (plus:DI (match_operand:DI 1 "s_register_operand" "%0,0") + (match_operand:DI 2 "s_register_operand" "r,0"))) + (clobber (reg:CC 24))] "" "* arm_output_asm_insn (\"adds\\t%0, %1, %2\", operands); return (arm_output_asm_insn (\"adc\\t%R0, %R1, %R2\", operands)); -") +" +[(set_attr "conds" "clob") + (set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (plus:DI (sign_extend:DI + (match_operand:SI 1 "s_register_operand" "r,r")) + (match_operand:DI 2 "s_register_operand" "r,0"))) + (clobber (reg:CC 24))] + "" + "* + arm_output_asm_insn (\"adds\\t%0, %2, %1\", operands); + return (arm_output_asm_insn (\"adc\\t%R0, %R2, %1, asr #31\", operands)); +" +[(set_attr "conds" "clob") + (set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (plus:DI (zero_extend:DI + (match_operand:SI 1 "s_register_operand" "r,r")) + (match_operand:DI 2 "s_register_operand" "r,0"))) + (clobber (reg:CC 24))] + "" + "* + arm_output_asm_insn (\"adds\\t%0, %2, %1\", operands); + return (arm_output_asm_insn (\"adc\\t%R0, %R2, #0\", operands)); +" +[(set_attr "conds" "clob") + (set_attr "length" "2")]) (define_insn "addsi3" - [(set (match_operand:SI 0 "register_operand" "=r,r") - (plus:SI (match_operand:SI 1 "register_operand" "r,r") - (match_operand:SI 2 "general_operand" "r,n")))] + [(set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_add_operand" "rL")))] "" "* - switch (which_alternative) + if (GET_CODE (operands[2]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[2]))) { - case 0: - return (arm_output_asm_insn (\"add\\t%0, %1, %2\", operands)); - case 1: - return (output_add_immediate (operands)); + operands[2] = gen_rtx (CONST_INT, VOIDmode, -INTVAL (operands[2])); + return arm_output_asm_insn (\"sub\\t%0, %1, %2\", operands); } + return arm_output_asm_insn (\"add\\t%0, %1, %2\", operands); ") -(define_insn "addsf3" - [(set (match_operand:SF 0 "register_operand" "=f") - (plus:SF (match_operand:SF 1 "register_operand" "f") - (match_operand:SF 2 "fpu_rhs_operand" "fG")))] +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_add_operand" "rL")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_dup 1) (match_dup 2)))] + "" + "* + if (GET_CODE (operands[2]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[2]))) + { + operands[2] = gen_rtx (CONST_INT, VOIDmode, -INTVAL (operands[2])); + return arm_output_asm_insn (\"subs\\t%0, %1, %2\", operands); + } + return (arm_output_asm_insn (\"adds\\t%0, %1, %2\", operands)); +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC 24) + (compare:CC (match_operand:SI 1 "s_register_operand" "r") + (neg:SI (match_operand:SI 2 "arm_add_operand" "rL")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_dup 1) (match_dup 2)))] + "" + "* + if (GET_CODE (operands[2]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[2]))) + { + operands[2] = gen_rtx (CONST_INT, VOIDmode, -INTVAL (operands[2])); + return arm_output_asm_insn (\"subs\\t%0, %1, %2\", operands); + } + return (arm_output_asm_insn (\"adds\\t%0, %1, %2\", operands)); +" +[(set_attr "conds" "set")]) + +(define_insn "incscc" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (plus:SI (match_operator:SI 2 "comparison_operator" + [(reg 24) (const_int 0)]) + (match_operand:SI 1 "s_register_operand" "0,?r")))] "" "* - return (arm_output_asm_insn (\"adfs\\t%0, %1, %2\", operands)); + if (which_alternative == 1) + arm_output_asm_insn (\"mov%D2\\t%0, %1\", operands); + return arm_output_asm_insn (\"add%d2\\t%0, %1, #1\", operands); +" +[(set_attr "conds" "use") + (set_attr "length" "*,2")]) + +; If a constant is too big to fit in a single instruction then the constant +; will be pre-loaded into a register taking at least two insns, we might be +; able to merge it with an add, but it depends on the exact value. + +(define_split + [(set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "immediate_operand" "n")))] + "!(const_ok_for_arm (INTVAL (operands[2])) + || const_ok_for_arm (-INTVAL (operands[2])))" + [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2))) + (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 3)))] + " +{ + unsigned int val = (unsigned) INTVAL (operands[2]); + int i; + unsigned int temp; + + /* this code is similar to the approach followed in movsi, but it must + generate exactly two insns */ + + for (i = 30; i >= 0; i -= 2) + { + if (val & (3 << i)) + { + i -= 6; + if (i < 0) i = 0; + if (const_ok_for_arm (temp = (val & ~(255 << i)))) + { + val &= 255 << i; + break; + } + /* we might be able to do this as (larger number - small number) */ + temp = ((val >> i) & 255) + 1; + if (temp > 255 && i < 24) + { + i += 2; + temp = ((val >> i) & 255) + 1; + } + if (const_ok_for_arm ((temp << i) - val)) + { + i = temp << i; + temp = (unsigned) - (int) (i - val); + val = i; + break; + } + FAIL; + } + } + /* if we got here, we have found a way of doing it in two instructions. + the two constants are in val and temp */ + operands[2] = GEN_INT ((int)val); + operands[3] = GEN_INT ((int)temp); +} ") +(define_insn "addsf3" + [(set (match_operand:SF 0 "s_register_operand" "=f,f") + (plus:SF (match_operand:SF 1 "s_register_operand" "f,f") + (match_operand:SF 2 "fpu_add_operand" "fG,H")))] + "" + "* +{ + REAL_VALUE_TYPE r; + + switch (which_alternative) + { + case 0: + return arm_output_asm_insn (\"adfs\\t%0, %1, %2\", operands); + case 1: + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[2]); + r = REAL_VALUE_NEGATE (r); + operands[2] = CONST_DOUBLE_FROM_REAL_VALUE (r, GET_MODE (operands[2])); + return arm_output_asm_insn (\"sufs\\t%0, %1, %2\", operands); + } +} +" +[(set_attr "type" "float")]) + (define_insn "adddf3" - [(set (match_operand:DF 0 "register_operand" "=f") - (plus:DF (match_operand:DF 1 "register_operand" "f") - (match_operand:DF 2 "fpu_rhs_operand" "fG")))] + [(set (match_operand:DF 0 "s_register_operand" "=f,f") + (plus:DF (match_operand:DF 1 "s_register_operand" "f,f") + (match_operand:DF 2 "fpu_add_operand" "fG,H")))] + "" + "* +{ + REAL_VALUE_TYPE r; + + switch (which_alternative) + { + case 0: + return (arm_output_asm_insn (\"adfd\\t%0, %1, %2\", operands)); + case 1: + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[2]); + r = REAL_VALUE_NEGATE (r); + operands[2] = CONST_DOUBLE_FROM_REAL_VALUE (r, GET_MODE (operands[2])); + return arm_output_asm_insn (\"sufd\\t%0, %1, %2\", operands); + } +} +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f,f") + (plus:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f,f")) + (match_operand:DF 2 "fpu_add_operand" "fG,H")))] + "" + "* +{ + REAL_VALUE_TYPE r; + + switch (which_alternative) + { + case 0: + return (arm_output_asm_insn (\"adfd\\t%0, %1, %2\", operands)); + case 1: + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[2]); + r = REAL_VALUE_NEGATE (r); + operands[2] = CONST_DOUBLE_FROM_REAL_VALUE (r, GET_MODE (operands[2])); + return arm_output_asm_insn (\"sufd\\t%0, %1, %2\", operands); + } +} +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (plus:DF (match_operand:DF 1 "s_register_operand" "f") + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] "" "* return (arm_output_asm_insn (\"adfd\\t%0, %1, %2\", operands)); -") +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (plus:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "" + "* + return (arm_output_asm_insn (\"adfd\\t%0, %1, %2\", operands)); +" +[(set_attr "type" "float")]) + +(define_insn "addxf3" + [(set (match_operand:XF 0 "s_register_operand" "=f,f") + (plus:XF (match_operand:XF 1 "s_register_operand" "f,f") + (match_operand:XF 2 "fpu_add_operand" "fG,H")))] + "ENABLE_XF_PATTERNS" + "* +{ + REAL_VALUE_TYPE r; + + switch (which_alternative) + { + case 0: + return (arm_output_asm_insn (\"adfe\\t%0, %1, %2\", operands)); + case 1: + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[2]); + r = REAL_VALUE_NEGATE (r); + operands[2] = CONST_DOUBLE_FROM_REAL_VALUE (r, GET_MODE (operands[2])); + return arm_output_asm_insn (\"sufe\\t%0, %1, %2\", operands); + } +} +" +[(set_attr "type" "float")]) (define_insn "subdi3" - [(set (match_operand:DI 0 "di_operand" "=&r") - (minus:DI (match_operand:DI 1 "di_operand" "%r") - (match_operand:DI 2 "di_operand" "r")))] + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r") + (minus:DI (match_operand:DI 1 "s_register_operand" "0,r,0") + (match_operand:DI 2 "s_register_operand" "r,0,0"))) + (clobber (reg:CC 24))] "" "* arm_output_asm_insn (\"subs\\t%0, %1, %2\", operands); return (arm_output_asm_insn (\"sbc\\t%R0, %R1, %R2\", operands)); -") +" +[(set_attr "conds" "clob") + (set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (minus:DI (match_operand:DI 1 "s_register_operand" "?r,0") + (zero_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")))) + (clobber (reg:CC 24))] + "" + "* + arm_output_asm_insn (\"subs\\t%0, %1, %2\", operands); + return (arm_output_asm_insn (\"sbc\\t%R0, %R1, #0\", operands)); +" +[(set_attr "conds" "clob") + (set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (minus:DI (match_operand:DI 1 "s_register_operand" "r,0") + (sign_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")))) + (clobber (reg:CC 24))] + "" + "* + arm_output_asm_insn (\"subs\\t%0, %1, %2\", operands); + return (arm_output_asm_insn (\"sbc\\t%R0, %R1, %2, asr #31\", operands)); +" +[(set_attr "conds" "clob") + (set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (minus:DI (zero_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "?r,0"))) + (clobber (reg:CC 24))] + "" + "* + arm_output_asm_insn (\"rsbs\\t%0, %1, %2\", operands); + return (arm_output_asm_insn (\"rsc\\t%R0, %R1, #0\", operands)); +" +[(set_attr "conds" "clob") + (set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (minus:DI (sign_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "?r,0"))) + (clobber (reg:CC 24))] + "" + "* + arm_output_asm_insn (\"rsbs\\t%0, %1, %2\", operands); + return (arm_output_asm_insn (\"rsc\\t%R0, %R1, %2, asr #31\", operands)); +" +[(set_attr "conds" "clob") + (set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=r") + (minus:DI (zero_extend:DI + (match_operand:SI 1 "s_register_operand" "r")) + (zero_extend:DI + (match_operand:SI 2 "s_register_operand" "r")))) + (clobber (reg:CC 24))] + "" + "* + arm_output_asm_insn (\"subs\\t%0, %1, %2\", operands); + return (arm_output_asm_insn (\"rsc\\t%R0, %1, %1 @ extend carry\", + operands)); +" +[(set_attr "conds" "clob") + (set_attr "length" "2")]) (define_insn "subsi3" - [(set (match_operand:SI 0 "register_operand" "=r,r,r") - (minus:SI (match_operand:SI 1 "arm_rhs_operand" "r,r,I") - (match_operand:SI 2 "general_operand" "r,n,r")))] + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (minus:SI (match_operand:SI 1 "arm_rhs_operand" "r,I") + (match_operand:SI 2 "arm_rhs_operand" "rI,r")))] "" "* switch (which_alternative) @@ -93,15 +518,45 @@ case 0: return (arm_output_asm_insn (\"sub\\t%0, %1, %2\", operands)); case 1: - operands[2] = gen_rtx (CONST_INT, VOIDmode, -INTVAL (operands[2])); - return (output_add_immediate (operands)); - case 2: return (arm_output_asm_insn (\"rsb\\t%0, %2, %1\", operands)); } ") +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (minus:SI (match_operand:SI 1 "arm_rhs_operand" "r,I") + (match_operand:SI 2 "arm_rhs_operand" "rI,r")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r,r") + (minus:SI (match_dup 1) (match_dup 2)))] + "" + "* + switch (which_alternative) + { + case 0: + return arm_output_asm_insn (\"subs\\t%0, %1, %2\", operands); + case 1: + return arm_output_asm_insn (\"rsbs\\t%0, %2, %1\", operands); + } +" +[(set_attr "conds" "set")]) + +(define_insn "decscc" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r") + (match_operator:SI 2 "comparison_operator" + [(reg 24) (const_int 0)])))] + "" + "* + if (which_alternative == 1) + arm_output_asm_insn (\"mov%D2\\t%0, %1\", operands); + return arm_output_asm_insn (\"sub%d2\\t%0, %1, #1\", operands); +" +[(set_attr "conds" "use") + (set_attr "length" "*,2")]) + (define_insn "subsf3" - [(set (match_operand:SF 0 "register_operand" "=f,f") + [(set (match_operand:SF 0 "s_register_operand" "=f,f") (minus:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G") (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))] "" @@ -113,88 +568,233 @@ case 1: return (arm_output_asm_insn (\"rsfs\\t%0, %2, %1\", operands)); } -") +" +[(set_attr "type" "float")]) (define_insn "subdf3" - [(set (match_operand:DF 0 "register_operand" "=f,f") + [(set (match_operand:DF 0 "s_register_operand" "=f,f") (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G") - (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))] + (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))] "" "* switch (which_alternative) { case 0: return (arm_output_asm_insn (\"sufd\\t%0, %1, %2\", operands)); - case 2: + case 1: return (arm_output_asm_insn (\"rsfd\\t%0, %2, %1\", operands)); } -") +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (minus:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (match_operand:DF 2 "fpu_rhs_operand" "fG")))] + "" + "* + return arm_output_asm_insn (\"sufd\\t%0, %1, %2\", operands); +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f,f") + (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G") + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f,f"))))] + "" + "* + switch (which_alternative) + { + case 0: + return (arm_output_asm_insn (\"sufd\\t%0, %1, %2\", operands)); + case 1: + return (arm_output_asm_insn (\"rsfd\\t%0, %2, %1\", operands)); + } +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (minus:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "" + "* + return arm_output_asm_insn (\"sufd\\t%0, %1, %2\", operands); +" +[(set_attr "type" "float")]) + +(define_insn "subxf3" + [(set (match_operand:XF 0 "s_register_operand" "=f,f") + (minus:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G") + (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))] + "ENABLE_XF_PATTERNS" + "* + switch (which_alternative) + { + case 0: + return (arm_output_asm_insn (\"sufe\\t%0, %1, %2\", operands)); + case 1: + return (arm_output_asm_insn (\"rsfe\\t%0, %2, %1\", operands)); + } +" +[(set_attr "type" "float")]) ;; Multiplication insns -;; The `&' is too strict, but at least generates correct code. +;; Use `&' and then `0' to prevent the operands 0 and 1 being the same (define_insn "mulsi3" - [(set (match_operand:SI 0 "register_operand" "=&r") - (mult:SI (match_operand:SI 1 "register_operand" "%r") - (match_operand:SI 2 "register_operand" "r")))] + [(set (match_operand:SI 0 "s_register_operand" "=&r,&r") + (mult:SI (match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 1 "s_register_operand" "%?r,0")))] "" "* - if (REGNO (operands[0]) == REGNO (operands[1])) - return (arm_output_asm_insn (\"mul\\t%0, %2, %1\", operands)); - else - return (arm_output_asm_insn (\"mul\\t%0, %1, %2\", operands)); + return (arm_output_asm_insn (\"mul\\t%0, %2, %1\", operands)); ") +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (mult:SI + (match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 1 "s_register_operand" "%?r,0")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=&r,&r") + (mult:SI (match_dup 2) (match_dup 1)))] + "" + "* + return (arm_output_asm_insn (\"muls\\t%0, %2, %1\", operands)); +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (mult:SI + (match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 1 "s_register_operand" "%?r,0")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=&r,&r"))] + "" + "* + return (arm_output_asm_insn (\"muls\\t%0, %2, %1\", operands)); +" +[(set_attr "conds" "set")]) + ;; Unnamed templates to match MLA instruction. (define_insn "" - [(set (match_operand:SI 0 "register_operand" "=&r") + [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r") (plus:SI - (mult:SI (match_operand:SI 1 "register_operand" "%r") - (match_operand:SI 2 "register_operand" "r")) - (match_operand:SI 3 "register_operand" "r")))] + (mult:SI (match_operand:SI 2 "s_register_operand" "r,r,r,r") + (match_operand:SI 1 "s_register_operand" "%r,0,r,0")) + (match_operand:SI 3 "s_register_operand" "?r,r,0,0")))] "" "* - if (REGNO (operands[0]) == REGNO (operands[1])) - return (arm_output_asm_insn (\"mla\\t%0, %2, %1, %3\", operands)); - else - return (arm_output_asm_insn (\"mla\\t%0, %1, %2, %3\", operands)); + return (arm_output_asm_insn (\"mla\\t%0, %2, %1, %3\", operands)); ") (define_insn "" - [(set (match_operand:SI 0 "register_operand" "=&r") - (plus:SI - (match_operand:SI 3 "register_operand" "r") - (mult:SI (match_operand:SI 1 "register_operand" "%r") - (match_operand:SI 2 "register_operand" "r"))))] + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (plus:SI + (mult:SI + (match_operand:SI 2 "s_register_operand" "r,r,r,r") + (match_operand:SI 1 "s_register_operand" "%r,0,r,0")) + (match_operand:SI 3 "s_register_operand" "?r,r,0,0")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r") + (plus:SI (mult:SI (match_dup 2) (match_dup 1)) + (match_dup 3)))] "" "* - if (REGNO (operands[0]) == REGNO (operands[1])) - return (arm_output_asm_insn (\"mla\\t%0, %2, %1, %3\", operands)); - else - return (arm_output_asm_insn (\"mla\\t%0, %1, %2, %3\", operands)); -") + return (arm_output_asm_insn (\"mlas\\t%0, %2, %1, %3\", operands)); +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (plus:SI + (mult:SI + (match_operand:SI 2 "s_register_operand" "r,r,r,r") + (match_operand:SI 1 "s_register_operand" "%r,0,r,0")) + (match_operand:SI 3 "s_register_operand" "?r,r,0,0")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=&r,&r,&r,&r"))] + "" + "* + return (arm_output_asm_insn (\"mlas\\t%0, %2, %1, %3\", operands)); +" +[(set_attr "conds" "set")]) (define_insn "mulsf3" - [(set (match_operand:SF 0 "register_operand" "=f") - (mult:SF (match_operand:SF 1 "register_operand" "f") + [(set (match_operand:SF 0 "s_register_operand" "=f") + (mult:SF (match_operand:SF 1 "s_register_operand" "f") (match_operand:SF 2 "fpu_rhs_operand" "fG")))] "" - "*return (arm_output_asm_insn (\"mufs\\t%0, %1, %2\", operands));") + "* + return (arm_output_asm_insn (\"fmls\\t%0, %1, %2\", operands)); +" +[(set_attr "type" "float")]) (define_insn "muldf3" - [(set (match_operand:DF 0 "register_operand" "=f") - (mult:DF (match_operand:DF 1 "register_operand" "f") + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mult:DF (match_operand:DF 1 "s_register_operand" "f") (match_operand:DF 2 "fpu_rhs_operand" "fG")))] "" "* return (arm_output_asm_insn (\"mufd\\t%0, %1, %2\", operands)); -") +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mult:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (match_operand:DF 2 "fpu_rhs_operand" "fG")))] + "" + "* + return (arm_output_asm_insn (\"mufd\\t%0, %1, %2\", operands)); +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mult:DF (match_operand:DF 1 "s_register_operand" "f") + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "" + "* + return (arm_output_asm_insn (\"mufd\\t%0, %1, %2\", operands)); +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mult:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "" + "* + return (arm_output_asm_insn (\"mufd\\t%0, %1, %2\", operands)); +" +[(set_attr "type" "float")]) + +(define_insn "mulxf3" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (mult:XF (match_operand:XF 1 "s_register_operand" "f") + (match_operand:XF 2 "fpu_rhs_operand" "fG")))] + "ENABLE_XF_PATTERNS" + "* + return (arm_output_asm_insn (\"mufe\\t%0, %1, %2\", operands)); +" +[(set_attr "type" "float")]) ;; Division insns (define_insn "divsf3" - [(set (match_operand:SF 0 "register_operand" "=f,f") + [(set (match_operand:SF 0 "s_register_operand" "=f,f") (div:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G") (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))] "" @@ -202,14 +802,15 @@ switch (which_alternative) { case 0: - return (arm_output_asm_insn (\"dvfs\\t%0, %1, %2\", operands)); + return (arm_output_asm_insn (\"fdvs\\t%0, %1, %2\", operands)); case 1: - return (arm_output_asm_insn (\"rdfs\\t%0, %2, %1\", operands)); + return (arm_output_asm_insn (\"frds\\t%0, %2, %1\", operands)); } -") +" +[(set_attr "type" "float")]) (define_insn "divdf3" - [(set (match_operand:DF 0 "register_operand" "=f,f") + [(set (match_operand:DF 0 "s_register_operand" "=f,f") (div:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G") (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))] "" @@ -221,117 +822,651 @@ case 1: return (arm_output_asm_insn (\"rdfd\\t%0, %2, %1\", operands)); } -") +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (div:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (match_operand:DF 2 "fpu_rhs_operand" "fG")))] + "" + "* + return (arm_output_asm_insn (\"dvfd\\t%0, %1, %2\", operands)); +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (div:DF (match_operand:DF 1 "fpu_rhs_operand" "fG") + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "" + "* + return (arm_output_asm_insn (\"rdfd\\t%0, %2, %1\", operands)); +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (div:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "" + "* + return (arm_output_asm_insn (\"dvfd\\t%0, %1, %2\", operands)); +" +[(set_attr "type" "float")]) + +(define_insn "divxf3" + [(set (match_operand:XF 0 "s_register_operand" "=f,f") + (div:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G") + (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))] + "ENABLE_XF_PATTERNS" + "* + switch (which_alternative) + { + case 0: + return (arm_output_asm_insn (\"dvfe\\t%0, %1, %2\", operands)); + case 1: + return (arm_output_asm_insn (\"rdfe\\t%0, %2, %1\", operands)); + } +" +[(set_attr "type" "float")]) ;; Modulo insns (define_insn "modsf3" - [(set (match_operand:SF 0 "register_operand" "=f") - (mod:SF (match_operand:SF 1 "register_operand" "f") + [(set (match_operand:SF 0 "s_register_operand" "=f") + (mod:SF (match_operand:SF 1 "s_register_operand" "f") (match_operand:SF 2 "fpu_rhs_operand" "fG")))] "" "* return (arm_output_asm_insn (\"rmfs\\t%0, %1, %2\", operands)); -") +" +[(set_attr "type" "float")]) (define_insn "moddf3" - [(set (match_operand:DF 0 "register_operand" "=f") - (mod:DF (match_operand:DF 1 "register_operand" "f") + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mod:DF (match_operand:DF 1 "s_register_operand" "f") (match_operand:DF 2 "fpu_rhs_operand" "fG")))] "" "* return (arm_output_asm_insn (\"rmfd\\t%0, %1, %2\", operands)); -") +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mod:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (match_operand:DF 2 "fpu_rhs_operand" "fG")))] + "" + "* + return (arm_output_asm_insn (\"rmfd\\t%0, %1, %2\", operands)); +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mod:DF (match_operand:DF 1 "s_register_operand" "f") + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "" + "* + return (arm_output_asm_insn (\"rmfd\\t%0, %1, %2\", operands)); +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (mod:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f")) + (float_extend:DF + (match_operand:SF 2 "s_register_operand" "f"))))] + "" + "* + return (arm_output_asm_insn (\"rmfd\\t%0, %1, %2\", operands)); +" +[(set_attr "type" "float")]) + +(define_insn "modxf3" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (mod:XF (match_operand:XF 1 "s_register_operand" "f") + (match_operand:XF 2 "fpu_rhs_operand" "fG")))] + "ENABLE_XF_PATTERNS" + "* + return (arm_output_asm_insn (\"rmfe\\t%0, %1, %2\", operands)); +" +[(set_attr "type" "float")]) ;; Boolean and,ior,xor insns (define_insn "anddi3" - [(set (match_operand:DI 0 "di_operand" "=&r") - (and:DI (match_operand:DI 1 "di_operand" "%r") - (match_operand:DI 2 "di_operand" "r")))] + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (and:DI (match_operand:DI 1 "s_register_operand" "%0,0") + (match_operand:DI 2 "s_register_operand" "r,0")))] "" "* arm_output_asm_insn (\"and\\t%0, %1, %2\", operands); return (arm_output_asm_insn (\"and\\t%R0, %R1, %R2\", operands)); -") +" +[(set_attr "length" "2")]) -(define_insn "andsi3" - [(set (match_operand:SI 0 "register_operand" "=r") - (and:SI (match_operand:SI 1 "register_operand" "r") - (match_operand:SI 2 "arm_rhs_operand" "rI")))] +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (and:DI (zero_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "?r,0")))] "" "* - return (arm_output_asm_insn (\"and\\t%0, %1, %2\", operands)); -") + arm_output_asm_insn (\"and\\t%0, %1, %2\", operands); + return arm_output_asm_insn (\"mov\\t%R0, #0\", operands); +" +[(set_attr "length" "2")]) -(define_insn "andcbsi3" - [(set (match_operand:SI 0 "register_operand" "=r") - (and:SI (match_operand:SI 1 "register_operand" "r") - (not:SI (match_operand:SI 2 "arm_rhs_operand" "rI"))))] +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (and:DI (sign_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "?r,0")))] "" "* - return (arm_output_asm_insn (\"bic\\t%0, %1, %2\", operands)); -") + arm_output_asm_insn (\"and\\t%0, %1, %2\", operands); + return arm_output_asm_insn (\"and\\t%R0, %R1, %2, asr #31\", operands); +" +[(set_attr "length" "2")]) -(define_insn "iordi3" - [(set (match_operand:DI 0 "di_operand" "=&r") - (ior:DI (match_operand:DI 1 "di_operand" "%r") - (match_operand:DI 2 "di_operand" "r")))] +(define_insn "andsi3" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (and:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_not_operand" "rK")))] "" "* - arm_output_asm_insn (\"orr\\t%0, %1, %2\", operands); - return (arm_output_asm_insn (\"orr\\t%R0, %R1, %R2\", operands)); + if (GET_CODE (operands[2]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[2]))) + { + operands[2] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[2])); + return arm_output_asm_insn (\"bic\\t%0, %1, %2\", operands); + } + return arm_output_asm_insn (\"and\\t%0, %1, %2\", operands); ") -(define_insn "iorsi3" - [(set (match_operand:SI 0 "register_operand" "=r,r") - (ior:SI (match_operand:SI 1 "register_operand" "r,r") - (match_operand:SI 2 "nonmemory_operand" "r,n")))] +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (and:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_not_operand" "rK")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (and:SI (match_dup 1) (match_dup 2)))] "" "* - switch (which_alternative) + if (GET_CODE (operands[2]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[2]))) { - case 0: - return (arm_output_asm_insn (\"orr\\t%0, %1, %2\", operands)); - case 1: - return (output_multi_immediate (operands, - \"orr\\t%0, %1, %2\", \"orr\\t%0, %0, %2\", - 2, INTVAL (operands[2]))); + operands[2] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[2])); + return arm_output_asm_insn (\"bics\\t%0, %1, %2\", operands); } + return arm_output_asm_insn (\"ands\\t%0, %1, %2\", operands); +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (and:SI (match_operand:SI 0 "s_register_operand" "r") + (match_operand:SI 1 "arm_rhs_operand" "rI")) + (const_int 0)))] + "" + "* + return arm_output_asm_insn (\"tst\\t%0, %1\", operands); +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (and:SI (match_operand:SI 0 "s_register_operand" "r") + (match_operand:SI 1 "immediate_operand" "K")) + (const_int 0))) + (clobber (match_scratch:SI 3 "=r"))] + "const_ok_for_arm (~INTVAL (operands[1]))" + "* + operands[1] = GEN_INT (~INTVAL (operands[1])); + return arm_output_asm_insn (\"bics\\t%3, %0, %1\", operands); +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (zero_extract:SI + (match_operand:SI 0 "s_register_operand" "r") + (match_operand:SI 1 "immediate_operand" "n") + (match_operand:SI 2 "immediate_operand" "n")) + (const_int 0)))] + "INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 32 + && INTVAL (operands[1]) > 0 + && INTVAL (operands[1]) + (INTVAL (operands[2]) & 1) <= 8 + && INTVAL (operands[1]) + INTVAL (operands[2]) <= 32" + "* +{ + unsigned int mask = 0; + int cnt = INTVAL (operands[1]); + + while (cnt--) + mask = (mask << 1) | 1; + operands[1] = gen_rtx (CONST_INT, VOIDmode, mask << INTVAL (operands[2])); + return arm_output_asm_insn (\"tst\\t%0, %1\", operands); +} +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (zero_extract:SI + (match_operand:QI 0 "memory_operand" "m") + (match_operand 1 "immediate_operand" "n") + (match_operand 2 "immediate_operand" "n")) + (const_int 0))) + (clobber (match_scratch:QI 3 "=r"))] + "INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 8 + && INTVAL (operands[1]) > 0 && INTVAL (operands[1]) <= 8" + "* +{ + unsigned int mask = 0; + int cnt = INTVAL (operands[1]); + + while (cnt--) + mask = (mask << 1) | 1; + operands[1] = gen_rtx (CONST_INT, VOIDmode, mask << INTVAL (operands[2])); + arm_output_asm_insn (\"ldrb\\t%3, %0\", operands); + return arm_output_asm_insn (\"tst\\t%3, %1\", operands); +} +" +[(set_attr "conds" "set") + (set_attr "length" "2")]) + +;; constants for op 2 will never be given to these patterns. +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (and:DI (not:DI (match_operand:DI 2 "s_register_operand" "r,0")) + (match_operand:DI 1 "s_register_operand" "0,r")))] + "" + "* + arm_output_asm_insn (\"bic\\t%0, %1, %2\", operands); + return arm_output_asm_insn (\"bic\\t%R0, %R1, %R2\", operands); +" +[(set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (and:DI (not:DI (zero_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r"))) + (match_operand:DI 1 "s_register_operand" "?r,0")))] + "" + "* + arm_output_asm_insn (\"bic\\t%0, %1, %2\", operands); + if (REGNO (operands[1]) != REGNO (operands[0])) + return arm_output_asm_insn (\"mov\\t%R0, %R1\", operands); + return \"\"; +" +[(set_attr "length" "2,1")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (and:DI (not:DI (sign_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r"))) + (match_operand:DI 1 "s_register_operand" "?r,0")))] + "" + "* + arm_output_asm_insn (\"bic\\t%0, %1, %2\", operands); + return arm_output_asm_insn (\"bic\\t%R0, %R1, %2, asr #31\", operands); +" +[(set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r")) + (match_operand:SI 1 "s_register_operand" "r")))] + "" + "* + return (arm_output_asm_insn (\"bic\\t%0, %1, %2\", operands)); ") +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (and:SI + (not:SI (match_operand:SI 2 "s_register_operand" "r")) + (match_operand:SI 1 "s_register_operand" "r")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (and:SI (not:SI (match_dup 2)) (match_dup 1)))] + "" + "* + return (arm_output_asm_insn (\"bics\\t%0, %1, %2\", operands)); +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (and:SI + (not:SI (match_operand:SI 2 "s_register_operand" "r")) + (match_operand:SI 1 "s_register_operand" "r")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=r"))] + "" + "* + return (arm_output_asm_insn (\"bics\\t%0, %1, %2\", operands)); +" +[(set_attr "conds" "set")]) + +(define_insn "iordi3" + [(set (match_operand:DI 0 "s_register_operand" "=&r") + (ior:DI (match_operand:DI 1 "s_register_operand" "%0") + (match_operand:DI 2 "s_register_operand" "r")))] + "" + "* + arm_output_asm_insn (\"orr\\t%0, %1, %2\", operands); + return (arm_output_asm_insn (\"orr\\t%R0, %R1, %R2\", operands)); +" +[(set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (ior:DI (zero_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "?r,0")))] + "" + "* + arm_output_asm_insn (\"orr\\t%0, %1, %2\", operands); + if (REGNO (operands[0]) != REGNO (operands[1])) + return (arm_output_asm_insn (\"mov\\t%R0, %R1\", operands)); + return \"\"; +" +[(set_attr "length" "2,1")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (ior:DI (sign_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "?r,0")))] + "" + "* + arm_output_asm_insn (\"orr\\t%0, %1, %2\", operands); + return (arm_output_asm_insn (\"orr\\t%R0, %R1, %2, asr #31\", operands)); +" +[(set_attr "length" "2")]) + +(define_insn "iorsi3" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (ior:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rI")))] + "" + "* + return (arm_output_asm_insn (\"orr\\t%0, %1, %2\", operands)); +") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r") + (match_operand:SI 2 "arm_rhs_operand" "rI")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (ior:SI (match_dup 1) (match_dup 2)))] + "" + "* + return arm_output_asm_insn (\"orrs\\t%0, %1, %2\", operands); +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r") + (match_operand:SI 2 "arm_rhs_operand" "rI")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=r"))] + "" + "* + return arm_output_asm_insn (\"orrs\\t%0, %1, %2\", operands); +" +[(set_attr "conds" "set")]) + +(define_insn "xordi3" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (xor:DI (match_operand:DI 1 "s_register_operand" "%0,0") + (match_operand:DI 2 "s_register_operand" "r,0")))] + "" + "* + arm_output_asm_insn (\"eor\\t%0, %1, %2\", operands); + return arm_output_asm_insn (\"eor\\t%R0, %R1, %R2\", operands); +" +[(set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (xor:DI (zero_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "?r,0")))] + "" + "* + arm_output_asm_insn (\"eor\\t%0, %1, %2\", operands); + if (REGNO (operands[0]) != REGNO (operands[1])) + return arm_output_asm_insn (\"mov\\t%R0, %R1\", operands); + return \"\"; +" +[(set_attr "length" "2,1")]) + +(define_insn "" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (xor:DI (sign_extend:DI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:DI 1 "s_register_operand" "?r,0")))] + "" + "* + arm_output_asm_insn (\"eor\\t%0, %1, %2\", operands); + return arm_output_asm_insn (\"eor\\t%R0, %R1, %2, asr #31\", operands); +" +[(set_attr "length" "2")]) + (define_insn "xorsi3" - [(set (match_operand:SI 0 "register_operand" "=r,r") - (xor:SI (match_operand:SI 1 "register_operand" "r,r") - (match_operand:SI 2 "nonmemory_operand" "r,n")))] + [(set (match_operand:SI 0 "s_register_operand" "=r") + (xor:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rI")))] "" "* - switch (which_alternative) - { - case 0: - return (arm_output_asm_insn (\"eor\\t%0, %1, %2\", operands)); - case 1: - return (output_multi_immediate (operands, - \"eor\\t%0, %1, %2\", \"eor\\t%0, %0, %2\", - 2, INTVAL (operands[2]))); - } + return (arm_output_asm_insn (\"eor\\t%0, %1, %2\", operands)); ") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (xor:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rI")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (xor:SI (match_dup 1) (match_dup 2)))] + "" + "* + return arm_output_asm_insn (\"eors\\t%0, %1, %2\", operands); +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (xor:SI (match_operand:SI 0 "s_register_operand" "r") + (match_operand:SI 1 "arm_rhs_operand" "rI")) + (const_int 0)))] + "" + "* + return arm_output_asm_insn (\"teq\\t%0, %1\", operands); +" +[(set_attr "conds" "set")]) + +;; by splitting (IOR (AND (NOT A) (NOT B)) C) as D = AND (IOR A B) (NOT C), +;; (NOT D) we can sometimes merge the final NOT into one of the following +;; insns + +(define_split + [(set (match_operand:SI 0 "s_register_operand" "=r") + (ior:SI (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "r")) + (not:SI (match_operand:SI 2 "arm_rhs_operand" "rI"))) + (match_operand:SI 3 "arm_rhs_operand" "rI"))) + (clobber (match_operand:SI 4 "s_register_operand" "=r"))] + "" + [(set (match_dup 4) (and:SI (ior:SI (match_dup 1) (match_dup 2)) + (not:SI (match_dup 3)))) + (set (match_dup 0) (not:SI (match_dup 4)))] + "" +) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r") + (and:SI (ior:SI (match_operand:SI 1 "s_register_operand" "r,r,0") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")) + (not:SI (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI"))))] + "" + "* + arm_output_asm_insn (\"orr\\t%0, %1, %2\", operands); + return arm_output_asm_insn (\"bic\\t%0, %0, %3\", operands); +" +[(set_attr "length" "2")]) + + + +;; Minimum and maximum insns + +(define_insn "smaxsi3" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r") + (smax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))) + (clobber (reg:CC 24))] + "" + "* + arm_output_asm_insn (\"cmp\\t%1, %2\", operands); + if (which_alternative != 0) + arm_output_asm_insn (\"movge\\t%0, %1\", operands); + if (which_alternative != 1) + return arm_output_asm_insn (\"movlt\\t%0, %2\", operands); + return \"\"; +" +[(set_attr "conds" "clob") + (set_attr "length" "2,2,3")]) + +(define_insn "sminsi3" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r") + (smin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))) + (clobber (reg:CC 24))] + "" + "* + arm_output_asm_insn (\"cmp\\t%1, %2\", operands); + if (which_alternative != 0) + arm_output_asm_insn (\"movle\\t%0, %1\", operands); + if (which_alternative != 1) + return arm_output_asm_insn (\"movgt\\t%0, %2\", operands); + return \"\"; +" +[(set_attr "conds" "clob") + (set_attr "length" "2,2,3")]) + +(define_insn "umaxsi3" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r") + (umax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))) + (clobber (reg:CC 24))] + "" + "* + arm_output_asm_insn (\"cmp\\t%1, %2\", operands); + if (which_alternative != 0) + arm_output_asm_insn (\"movcs\\t%0, %1\", operands); + if (which_alternative != 1) + return arm_output_asm_insn (\"movcc\\t%0, %2\", operands); + return \"\"; +" +[(set_attr "conds" "clob") + (set_attr "length" "2,2,3")]) + +(define_insn "uminsi3" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r") + (umin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))) + (clobber (reg:CC 24))] + "" + "* + arm_output_asm_insn (\"cmp\\t%1, %2\", operands); + if (which_alternative != 0) + arm_output_asm_insn (\"movcc\\t%0, %1\", operands); + if (which_alternative != 1) + return arm_output_asm_insn (\"movcs\\t%0, %2\", operands); + return \"\"; +" +[(set_attr "conds" "clob") + (set_attr "length" "2,2,3")]) + +(define_insn "" + [(set (match_operand:SI 0 "memory_operand" "=m") + (match_operator:SI 3 "minmax_operator" + [(match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "s_register_operand" "r")])) + (clobber (reg:CC 24))] + "" + "* + operands[3] = gen_rtx (minmax_code (operands[3]), SImode, operands[1], + operands[2]); + arm_output_asm_insn (\"cmp\\t%1, %2\", operands); + arm_output_asm_insn (\"str%d3\\t%1, %0\", operands); + return arm_output_asm_insn (\"str%D3\\t%2, %0\", operands); +" +[(set_attr "conds" "clob") + (set_attr "length" "3") + (set_attr "type" "store1")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (match_operator:SI 4 "shiftable_operator" + [(match_operator:SI 5 "minmax_operator" + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_rhs_operand" "rI,rI")]) + (match_operand:SI 1 "s_register_operand" "0,?r")])) + (clobber (reg:CC 24))] + "" + "* +{ + char buf[100]; + enum rtx_code code = GET_CODE (operands[4]); + char *inst = arithmetic_instr (operands[4], TRUE); + + operands[5] = gen_rtx (minmax_code (operands[5]), SImode, operands[2], + operands[3]); + arm_output_asm_insn (\"cmp\\t%2, %3\", operands); + sprintf (buf, \"%s%%d5\\t%%0, %%1, %%2\", inst); + arm_output_asm_insn (buf, operands); + if (which_alternative != 0 || operands[3] != const0_rtx + || (code != PLUS && code != MINUS && code != IOR && code != XOR)) + { + sprintf (buf, \"%s%%D5\\t%%0, %%1, %%3\", inst); + return arm_output_asm_insn (buf, operands); + } + return \"\"; +} +" +[(set_attr "conds" "clob") + (set_attr "length" "3")]) + ;; Shift and rotation insns (define_insn "ashlsi3" - [(set (match_operand:SI 0 "register_operand" "=r") - (ashift:SI (match_operand:SI 1 "register_operand" "r") - (match_operand:SI 2 "general_operand" "rn")))] + [(set (match_operand:SI 0 "s_register_operand" "=r") + (ashift:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rn")))] "" "* return (output_shifted_move (ASHIFT, operands)); ") (define_insn "ashrsi3" - [(set (match_operand:SI 0 "register_operand" "=r") - (ashiftrt:SI (match_operand:SI 1 "register_operand" "r") - (match_operand:SI 2 "general_operand" "rn")))] + [(set (match_operand:SI 0 "s_register_operand" "=r") + (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rn")))] "" "* return (output_shifted_move (ASHIFTRT, operands)); @@ -341,18 +1476,18 @@ ;; An unnamed pattern is needed for expansion of zero_extend. (define_insn "" - [(set (match_operand:SI 0 "register_operand" "=r") - (lshift:SI (match_operand:SI 1 "register_operand" "r") - (match_operand:SI 2 "general_operand" "rn")))] + [(set (match_operand:SI 0 "s_register_operand" "=r") + (lshift:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rn")))] "" "* return (output_shifted_move (LSHIFT, operands)); ") (define_insn "lshrsi3" - [(set (match_operand:SI 0 "register_operand" "=r") - (lshiftrt:SI (match_operand:SI 1 "register_operand" "r") - (match_operand:SI 2 "general_operand" "rn")))] + [(set (match_operand:SI 0 "s_register_operand" "=r") + (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rn")))] "" "* return (output_shifted_move (LSHIFTRT, operands)); @@ -361,151 +1496,580 @@ ;; rotlsi3 is not defined yet to see what happens (define_insn "rotrsi3" - [(set (match_operand:SI 0 "register_operand" "=r,r") - (rotatert:SI (match_operand:SI 1 "register_operand" "r,r") - (match_operand:SI 2 "general_operand" "r,n")))] + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (rotatert:SI (match_operand:SI 1 "s_register_operand" "r,r") + (match_operand:SI 2 "arm_rhs_operand" "r,n")))] "" "* switch (which_alternative) { case 0: - return (arm_output_asm_insn (\"mov\\t%0, %1,ror %2\", operands)); + return (arm_output_asm_insn (\"mov\\t%0, %1, ror %2\", operands)); case 1: if (INTVAL(operands[2]) > 31) operands[2] = gen_rtx (CONST_INT, VOIDmode, INTVAL (operands[2]) % 32); - return (arm_output_asm_insn (\"mov\\t%0, %1,ror%2\", operands)); + return (arm_output_asm_insn (\"mov\\t%0, %1, ror %2\", operands)); } ") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (match_operator:SI 1 "shift_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_rhs_operand" "rn")]) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (match_op_dup 1 [(match_dup 2) (match_dup 3)]))] + "" + "* +{ + char buf[100]; + + sprintf (buf, \"movs\\t%%0, %%2, %s %%3\", + shift_instr (GET_CODE (operands[1]), &operands[3])); + return arm_output_asm_insn (buf, operands); +} +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (match_operator:SI 1 "shift_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_rhs_operand" "rn")]) + (const_int 0))) + (clobber (match_scratch:SI 0 "=r"))] + "" + "* +{ + char buf[100]; + + sprintf (buf, \"movs\\t%%0, %%2, %s %%3\", + shift_instr (GET_CODE (operands[1]), &operands[3])); + return arm_output_asm_insn (buf, operands); +} +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (not:SI (match_operator:SI 1 "shift_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_rhs_operand" "rn")])))] + "" + "* +{ + char buf[100]; + sprintf (buf, \"mvn\\t%%0, %%2, %s %%3\", + shift_instr (GET_CODE (operands[1]), &operands[3])); + return arm_output_asm_insn (buf, operands); +} +") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (not:SI (match_operator:SI 1 "shift_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_rhs_operand" "rn")])) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (not:SI (match_op_dup 1 [(match_dup 2) (match_dup 3)])))] + "" + "* +{ + char buf[100]; + sprintf (buf, \"mvns\\t%%0, %%2, %s %%3\", + shift_instr (GET_CODE (operands[1]), &operands[3])); + return arm_output_asm_insn (buf, operands); +} +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (not:SI (match_operator:SI 1 "shift_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_rhs_operand" "rn")])) + (const_int 0))) + (clobber (match_scratch:SI 0 "=r"))] + "" + "* +{ + char buf[100]; + sprintf (buf, \"mvns\\t%%0, %%2, %s %%3\", + shift_instr (GET_CODE (operands[1]), &operands[3])); + return arm_output_asm_insn (buf, operands); +} +" +[(set_attr "conds" "set")]) + ;; Unary arithmetic insns (define_insn "negdi2" - [(set (match_operand:DI 0 "di_operand" "=&r") - (neg:DI (match_operand:DI 1 "di_operand" "r")))] + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (neg:DI (match_operand:DI 1 "s_register_operand" "?r,0")))] "" "* - arm_output_asm_insn (\"rsb\\t%0, %1, #0\", operands); + arm_output_asm_insn (\"rsbs\\t%0, %1, #0\", operands); return (arm_output_asm_insn (\"rsc\\t%R0, %R1, #0\", operands)); -") +" +[(set_attr "conds" "clob") + (set_attr "length" "2")]) (define_insn "negsi2" - [(set (match_operand:SI 0 "register_operand" "=r") - (neg:SI (match_operand:SI 1 "register_operand" "r")))] + [(set (match_operand:SI 0 "s_register_operand" "=r") + (neg:SI (match_operand:SI 1 "s_register_operand" "r")))] "" "* return (arm_output_asm_insn (\"rsb\\t%0, %1, #0\", operands)); ") (define_insn "negsf2" - [(set (match_operand:SF 0 "register_operand" "=f") - (neg:SF (match_operand:SF 1 "register_operand" "f")))] + [(set (match_operand:SF 0 "s_register_operand" "=f") + (neg:SF (match_operand:SF 1 "s_register_operand" "f")))] "" "* return (arm_output_asm_insn (\"mnfs\\t%0, %1\", operands)); -") +" +[(set_attr "type" "float")]) (define_insn "negdf2" - [(set (match_operand:DF 0 "register_operand" "=f") - (neg:DF (match_operand:DF 1 "register_operand" "f")))] + [(set (match_operand:DF 0 "s_register_operand" "=f") + (neg:DF (match_operand:DF 1 "s_register_operand" "f")))] "" "* return (arm_output_asm_insn (\"mnfd\\t%0, %1\", operands)); -") +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (neg:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f"))))] + "" + "* + return (arm_output_asm_insn (\"mnfd\\t%0, %1\", operands)); +" +[(set_attr "type" "float")]) + +(define_insn "negxf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (neg:XF (match_operand:XF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS" + "* + return (arm_output_asm_insn (\"mnfe\\t%0, %1\", operands)); +" +[(set_attr "type" "float")]) + +;; abssi2 doesn't really clobber the condition codes if a different register +;; is being set. To keep things simple, assume during rtl manipulations that +;; it does, but tell the final scan operator the truth. Similarly for +;; (neg (abs...)) + +(define_insn "abssi2" + [(set (match_operand:SI 0 "s_register_operand" "=r,&r") + (abs:SI (match_operand:SI 1 "s_register_operand" "0,r"))) + (clobber (reg 24))] + "" + "* + switch (which_alternative) + { + case 0: + arm_output_asm_insn (\"cmp\\t%0, #0\", operands); + return arm_output_asm_insn (\"rsblt\\t%0, %0, #0\", operands); + case 1: + arm_output_asm_insn (\"eor\\t%0, %1, %1, asr #31\", operands); + return arm_output_asm_insn (\"sub\\t%0, %0, %1, asr #31\", operands); + } +" +[(set_attr "conds" "clob,*") + (set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,&r") + (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "0,r")))) + (clobber (reg 24))] + "" + "* + switch (which_alternative) + { + case 0: + arm_output_asm_insn (\"cmp\\t%0, #0\", operands); + return arm_output_asm_insn (\"rsbgt\\t%0, %0, #0\", operands); + case 1: + arm_output_asm_insn (\"eor\\t%0, %1, %1, asr #31\", operands); + return arm_output_asm_insn (\"rsb\\t%0, %0, %1, asr #31\", operands); + } +" +[(set_attr "conds" "clob,*") + (set_attr "length" "2")]) (define_insn "abssf2" - [(set (match_operand:SF 0 "register_operand" "=f") - (abs:SF (match_operand:SF 1 "register_operand" "f")))] + [(set (match_operand:SF 0 "s_register_operand" "=f") + (abs:SF (match_operand:SF 1 "s_register_operand" "f")))] "" "* return (arm_output_asm_insn (\"abss\\t%0, %1\", operands)); -") +" +[(set_attr "type" "float")]) (define_insn "absdf2" - [(set (match_operand:DF 0 "register_operand" "=f") - (abs:DF (match_operand:DF 1 "register_operand" "f")))] + [(set (match_operand:DF 0 "s_register_operand" "=f") + (abs:DF (match_operand:DF 1 "s_register_operand" "f")))] "" "* return (arm_output_asm_insn (\"absd\\t%0, %1\", operands)); -") +" +[(set_attr "type" "float")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (abs:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f"))))] + "" + "* + return (arm_output_asm_insn (\"absd\\t%0, %1\", operands)); +" +[(set_attr "type" "float")]) + +(define_insn "absxf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (abs:XF (match_operand:XF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS" + "* + return (arm_output_asm_insn (\"abse\\t%0, %1\", operands)); +" +[(set_attr "type" "float")]) (define_insn "sqrtsf2" - [(set (match_operand:SF 0 "register_operand" "=f") - (sqrt:SF (match_operand:SF 1 "register_operand" "f")))] + [(set (match_operand:SF 0 "s_register_operand" "=f") + (sqrt:SF (match_operand:SF 1 "s_register_operand" "f")))] "" "* return (arm_output_asm_insn (\"sqts\\t%0, %1\", operands)); -") +" +[(set_attr "type" "float_em")]) (define_insn "sqrtdf2" - [(set (match_operand:DF 0 "register_operand" "=f") - (sqrt:DF (match_operand:DF 1 "register_operand" "f")))] + [(set (match_operand:DF 0 "s_register_operand" "=f") + (sqrt:DF (match_operand:DF 1 "s_register_operand" "f")))] "" "* return (arm_output_asm_insn (\"sqtd\\t%0, %1\", operands)); -") +" +[(set_attr "type" "float_em")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (sqrt:DF (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f"))))] + "" + "* + return (arm_output_asm_insn (\"sqtd\\t%0, %1\", operands)); +" +[(set_attr "type" "float_em")]) + +(define_insn "sqrtxf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (sqrt:XF (match_operand:XF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS" + "* + return (arm_output_asm_insn (\"sqte\\t%0, %1\", operands)); +" +[(set_attr "type" "float_em")]) + +(define_insn "sinsf2" + [(set (match_operand:SF 0 "s_register_operand" "=f") + (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 0))] + "" + "* + return arm_output_asm_insn (\"sins\\t%0, %1\", operands); +" +[(set_attr "type" "float_em")]) + +(define_insn "sindf2" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 0))] + "" + "* + return arm_output_asm_insn (\"sind\\t%0, %1\", operands); +" +[(set_attr "type" "float_em")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (unspec:DF [(float_extend:DF + (match_operand:SF 1 "s_register_operand" "f"))] 0))] + "" + "* + return arm_output_asm_insn (\"sind\\t%0, %1\", operands); +" +[(set_attr "type" "float_em")]) + +(define_insn "sinxf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 0))] + "ENABLE_XF_PATTERNS" + "* + return arm_output_asm_insn (\"sine\\t%0, %1\", operands); +" +[(set_attr "type" "float_em")]) + +(define_insn "cossf2" + [(set (match_operand:SF 0 "s_register_operand" "=f") + (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 1))] + "" + "* + return arm_output_asm_insn (\"coss\\t%0, %1\", operands); +" +[(set_attr "type" "float_em")]) + +(define_insn "cosdf2" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 1))] + "" + "* + return arm_output_asm_insn (\"cosd\\t%0, %1\", operands); +" +[(set_attr "type" "float_em")]) + +(define_insn "" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (unspec:DF [(float_extend:DF + (match_operand:SF 1 "s_register_operand" "f"))] 1))] + "" + "* + return arm_output_asm_insn (\"cosd\\t%0, %1\", operands); +" +[(set_attr "type" "float_em")]) + +(define_insn "cosxf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 1))] + "ENABLE_XF_PATTERNS" + "* + return arm_output_asm_insn (\"cose\\t%0, %1\", operands); +" +[(set_attr "type" "float_em")]) + +(define_insn "one_cmpldi2" + [(set (match_operand:DI 0 "s_register_operand" "=&r,&r") + (not:DI (match_operand:DI 1 "s_register_operand" "?r,0")))] + "" + "* + arm_output_asm_insn (\"mvn\\t%0, %1\", operands); + return arm_output_asm_insn (\"mvn\\t%R0, %R1\", operands); +" +[(set_attr "length" "2")]) (define_insn "one_cmplsi2" - [(set (match_operand:SI 0 "register_operand" "=r") - (not:SI (match_operand:SI 1 "register_operand" "r")))] + [(set (match_operand:SI 0 "s_register_operand" "=r") + (not:SI (match_operand:SI 1 "s_register_operand" "r")))] "" "* return (arm_output_asm_insn (\"mvn\\t%0, %1\", operands)); ") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r")) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (not:SI (match_dup 1)))] + "" + "* + return (arm_output_asm_insn (\"mvns\\t%0, %1\", operands)); +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=r"))] + "" + "* + return (arm_output_asm_insn (\"mvns\\t%0, %1\", operands)); +" +[(set_attr "conds" "set")]) ;; Fixed <--> Floating conversion insns (define_insn "floatsisf2" - [(set (match_operand:SF 0 "register_operand" "=f") - (float:SF (match_operand:SI 1 "register_operand" "r")))] + [(set (match_operand:SF 0 "s_register_operand" "=f") + (float:SF (match_operand:SI 1 "s_register_operand" "r")))] "" "* return (arm_output_asm_insn (\"flts\\t%0, %1\", operands)); -") +" +[(set_attr "type" "r_2_f")]) (define_insn "floatsidf2" - [(set (match_operand:DF 0 "register_operand" "=f") - (float:DF (match_operand:SI 1 "register_operand" "r")))] + [(set (match_operand:DF 0 "s_register_operand" "=f") + (float:DF (match_operand:SI 1 "s_register_operand" "r")))] "" "* return (arm_output_asm_insn (\"fltd\\t%0, %1\", operands)); -") +" +[(set_attr "type" "r_2_f")]) + +(define_insn "floatsixf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (float:XF (match_operand:SI 1 "s_register_operand" "r")))] + "ENABLE_XF_PATTERNS" + "* + return (arm_output_asm_insn (\"flte\\t%0, %1\", operands)); +" +[(set_attr "type" "r_2_f")]) + +(define_insn "fix_truncsfsi2" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (fix:SI (match_operand:SF 1 "s_register_operand" "f")))] + "" + "* + return arm_output_asm_insn (\"fixz\\t%0, %1\", operands); +" +[(set_attr "type" "f_2_r")]) + +(define_insn "fix_truncdfsi2" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (fix:SI (match_operand:DF 1 "s_register_operand" "f")))] + "" + "* + return arm_output_asm_insn (\"fixz\\t%0, %1\", operands); +" +[(set_attr "type" "f_2_r")]) + +(define_insn "fix_truncxfsi2" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (fix:SI (match_operand:XF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS" + "* + return arm_output_asm_insn (\"fixz\\t%0, %1\", operands); +" +[(set_attr "type" "f_2_r")]) ;; Truncation insns (define_insn "truncdfsf2" - [(set (match_operand:SF 0 "register_operand" "=f") + [(set (match_operand:SF 0 "s_register_operand" "=f") (float_truncate:SF - (match_operand:DF 1 "register_operand" "f")))] + (match_operand:DF 1 "s_register_operand" "f")))] "" "* return (arm_output_asm_insn (\"mvfs\\t%0, %1\", operands)); -") +" +[(set_attr "type" "float")]) + +(define_insn "truncxfsf2" + [(set (match_operand:SF 0 "s_register_operand" "=f") + (float_truncate:SF + (match_operand:XF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS" + "* + return (arm_output_asm_insn (\"mvfs\\t%0, %1\", operands)); +" +[(set_attr "type" "float")]) + +(define_insn "truncxfdf2" + [(set (match_operand:DF 0 "s_register_operand" "=f") + (float_truncate:DF + (match_operand:XF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS" + "* + return (arm_output_asm_insn (\"mvfd\\t%0, %1\", operands)); +" +[(set_attr "type" "float")]) -;; Zero extension instructions. +;; Zero and sign extension instructions. -(define_expand "zero_extendhisi2" - [(set (match_dup 2) - (ashift:SI (match_operand:HI 1 "register_operand" "") - (const_int 16))) - (set (match_operand:SI 0 "register_operand" "") - (lshiftrt:SI (match_dup 2) - (const_int 16)))] +(define_insn "zero_extendsidi2" + [(set (match_operand:DI 0 "s_register_operand" "=r") + (zero_extend:DI (match_operand:SI 1 "s_register_operand" "r")))] "" - " -{ operands[1] = gen_lowpart (SImode, operands[1]); - operands[2] = gen_reg_rtx (SImode); }") + "* + if (REGNO (operands[1]) != REGNO (operands[0])) + arm_output_asm_insn (\"mov\\t%0, %1\", operands); + return arm_output_asm_insn (\"mov\\t%R0, #0\", operands); +" +[(set_attr "length" "2")]) + +(define_insn "zero_extendqidi2" + [(set (match_operand:DI 0 "s_register_operand" "=r,r") + (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "r,m")))] + "" + "* + switch (which_alternative) + { + case 0: + arm_output_asm_insn (\"and\\t%0, %1, #255\", operands); + break; + case 1: + arm_output_asm_insn (\"ldrb\\t%0, %1\",operands); + break; + } + return arm_output_asm_insn (\"mov\\t%R0, #0\", operands); +" +[(set_attr "length" "2") + (set_attr "type" "*,load")]) + +(define_insn "extendsidi2" + [(set (match_operand:DI 0 "s_register_operand" "=r") + (sign_extend:DI (match_operand:SI 1 "s_register_operand" "r")))] + "" + "* + if (REGNO (operands[1]) != REGNO (operands[0])) + arm_output_asm_insn (\"mov\\t%0, %1\", operands); + return arm_output_asm_insn (\"mov\\t%R0, %0, asr #31\", operands); +" +[(set_attr "length" "2")]) + +(define_expand "zero_extendhisi2" + [(set (match_dup 2) + (ashift:SI (match_operand:HI 1 "s_register_operand" "") + (const_int 16))) + (set (match_operand:SI 0 "s_register_operand" "") + (lshiftrt:SI (match_dup 2) + (const_int 16)))] + "" + " +{ operands[1] = gen_lowpart (SImode, operands[1]); + operands[2] = gen_reg_rtx (SImode); }") (define_insn "zero_extendqihi2" - [(set (match_operand:HI 0 "register_operand" "=r") + [(set (match_operand:HI 0 "s_register_operand" "=r") (zero_extend:HI - (match_operand:QI 1 "register_operand" "r")))] + (match_operand:QI 1 "s_register_operand" "r")))] "" "* return (arm_output_asm_insn (\"and\\t%0, %1, #255\\t@ zero_extendqihi2\", operands)); ") +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (match_operand:QI 1 "s_register_operand" "r") + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (zero_extend:HI (match_dup 1)))] + "" + "* + return arm_output_asm_insn (\"ands\\t%0, %1, #255\", operands); +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (match_operand:QI 0 "s_register_operand" "r") + (const_int 0)))] + "" + "* + return arm_output_asm_insn (\"tst\\t%0, #255\", operands); +" +[(set_attr "conds" "set")]) + (define_insn "zero_extendqisi2" - [(set (match_operand:SI 0 "register_operand" "=r,r") + [(set (match_operand:SI 0 "s_register_operand" "=r,r") (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,m")))] "" @@ -517,13 +2081,38 @@ case 1: return (arm_output_asm_insn (\"ldrb\\t%0, %1\\t@ zero_extendqisi2\", operands)); } -") +" +[(set_attr "type" "*,load")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (match_operand:QI 1 "s_register_operand" "r") + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (zero_extend:SI (match_dup 1)))] + "" + "* + return arm_output_asm_insn (\"ands\\t%0, %1, #255\", operands); +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (match_operand:QI 1 "s_register_operand" "r") + (const_int 0))) + (set (match_operand:QI 0 "s_register_operand" "=r") + (match_dup 1))] + "" + "* + return arm_output_asm_insn (\"ands\\t%0, %1, #255\", operands); +" +[(set_attr "conds" "set")]) (define_expand "extendhisi2" [(set (match_dup 2) - (ashift:SI (match_operand:HI 1 "register_operand" "") + (ashift:SI (match_operand:HI 1 "s_register_operand" "") (const_int 16))) - (set (match_operand:SI 0 "register_operand" "") + (set (match_operand:SI 0 "s_register_operand" "") (ashiftrt:SI (match_dup 2) (const_int 16)))] "" @@ -533,9 +2122,9 @@ (define_expand "extendqihi2" [(set (match_dup 2) - (ashift:SI (match_operand:QI 1 "register_operand" "") + (ashift:SI (match_operand:QI 1 "s_register_operand" "") (const_int 24))) - (set (match_operand:HI 0 "register_operand" "") + (set (match_operand:HI 0 "s_register_operand" "") (ashiftrt:SI (match_dup 2) (const_int 24)))] "" @@ -546,9 +2135,9 @@ (define_expand "extendqisi2" [(set (match_dup 2) - (ashift:SI (match_operand:QI 1 "register_operand" "") + (ashift:SI (match_operand:QI 1 "s_register_operand" "") (const_int 24))) - (set (match_operand:SI 0 "register_operand" "") + (set (match_operand:SI 0 "s_register_operand" "") (ashiftrt:SI (match_dup 2) (const_int 24)))] "" @@ -557,25 +2146,45 @@ operands[2] = gen_reg_rtx (SImode); }") (define_insn "extendsfdf2" - [(set (match_operand:DF 0 "register_operand" "=f") - (float_extend:DF (match_operand:SF 1 "register_operand" "f")))] + [(set (match_operand:DF 0 "s_register_operand" "=f") + (float_extend:DF (match_operand:SF 1 "s_register_operand" "f")))] "" "* return (arm_output_asm_insn (\"mvfd\\t%0, %1\", operands)); +" +[(set_attr "type" "float")]) + +(define_insn "extendsfxf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (float_extend:XF (match_operand:SF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS" + "* + return (arm_output_asm_insn (\"mvfe\\t%0, %1\", operands)); ") + +(define_insn "extenddfxf2" + [(set (match_operand:XF 0 "s_register_operand" "=f") + (float_extend:XF (match_operand:DF 1 "s_register_operand" "f")))] + "ENABLE_XF_PATTERNS" + "* + return (arm_output_asm_insn (\"mvfe\\t%0, %1\", operands)); +" +[(set_attr "type" "float")]) + ;; Move insns (including loads and stores) ;; XXX Just some ideas about movti. - +;; I don't think these are a good idea on the arm, there just aren't enough +;; registers ;;(define_expand "loadti" -;; [(set (match_operand:TI 0 "register_operand" "") +;; [(set (match_operand:TI 0 "s_register_operand" "") ;; (mem:TI (match_operand:SI 1 "address_operand" "")))] ;; "" "") ;;(define_expand "storeti" ;; [(set (mem:TI (match_operand:TI 0 "address_operand" "")) -;; (match_operand:TI 1 "register_operand" ""))] +;; (match_operand:TI 1 "s_register_operand" ""))] ;; "" "") ;;(define_expand "movti" @@ -625,25 +2234,151 @@ (define_insn "movdi" - [(set (match_operand:DI 0 "di_operand" "=r,r,r,o,r") - (match_operand:DI 1 "di_operand" "r,n,o,r,F"))] + [(set (match_operand:DI 0 "di_operand" "=r,r,r,o<>,r") + (match_operand:DI 1 "di_operand" "rK,n,o<>,r,F"))] "" "* return (output_move_double (operands)); -") +" +[(set_attr "length" "2,8,2,2,8") + (set_attr "type" "*,*,load,store2,*")]) -(define_insn "movsi" - [(set (match_operand:SI 0 "general_operand" "=r,r,r,m") - (match_operand:SI 1 "general_operand" "r,n,m,r"))] +(define_expand "movsi" + [(set (match_operand:SI 0 "general_operand" "") + (match_operand:SI 1 "general_operand" ""))] "" + " + /* Everything except mem = const or mem = mem can be done easily */ + if (GET_CODE (operands[0]) == MEM) + operands[1] = force_reg (SImode, operands[1]); + if (GET_CODE (operands[1]) == CONST_INT + && !(const_ok_for_arm (INTVAL (operands[1])) + || const_ok_for_arm (~INTVAL (operands[1])))) + { + int n = INTVAL (operands[1]); + rtx tmpreg, tmpreg2; + int i, n_ones = 0, first = 1, last = 0; + + if (GET_CODE (operands[0]) != REG + && GET_CODE (operands[0]) != SUBREG) + abort (); + for (i = 0; i < 32; i++) + if (n & 1 << i) + n_ones++; + /* These loops go the opposite way around to those in arm.c so that + the last constant may be more likely to be eliminted into the + next instruction */ + + if (n_ones > 16) + { + n = (~n) & 0xffffffff; + for (i = 30; i >= 0; i -= 2) + { + if (n & (3 << i)) + { + i -= 6; + if (i < 0) + i = 0; + if ((n & (255 << i)) == n) + last = 1; + if (first) + { + rtx equal; + rtx insn = + emit_insn (gen_movsi (tmpreg = (reload_in_progress + || reload_completed) + ? operands[0] + : gen_reg_rtx (SImode), + equal = gen_rtx (CONST_INT, VOIDmode, + ~(n & (255 << i))))); + first = 0; + } + else + { + rtx constant; + rtx insn = + emit_insn (gen_subsi3 (tmpreg2 = (reload_in_progress + || reload_completed + || last) + ? operands[0] + : gen_reg_rtx (SImode), + tmpreg, + constant = gen_rtx (CONST_INT, VOIDmode, + n & (255 << i)))); + tmpreg = tmpreg2; + } + n &= ~(255 << i); + } + } + } + else + { + for (i = 30; i >= 0; i -= 2) + { + if (n & (3 << i)) + { + i -= 6; + if (i < 0) + i = 0; + if ((n & (255 << i)) == n) + last = 1; + if (first) + { + rtx equal; + rtx insn = + emit_insn (gen_movsi (tmpreg = (reload_in_progress + || reload_completed) + ? operands[0] + : gen_reg_rtx (SImode), + equal = gen_rtx (CONST_INT, VOIDmode, + n & (255 << i)))); + first = 0; + } + else + { + rtx constant; + rtx insn = + emit_insn (gen_addsi3 (tmpreg2 = (reload_in_progress + || reload_completed + || last) + ? operands[0] + : gen_reg_rtx (SImode), + tmpreg, + constant = gen_rtx (CONST_INT, VOIDmode, + n & (255 << i)))); + tmpreg = tmpreg2; + } + n &= ~(255 << i); + } + } + } + DONE; + } +") + +(define_insn "" + [(set (match_operand:SI 0 "general_operand" "=r,r,r,m,r") + (match_operand:SI 1 "general_operand" "m,K,r,r,S"))] + "(register_operand (operands[0], SImode) + && (GET_CODE (operands[1]) != CONST_INT + || const_ok_for_arm (INTVAL (operands[1])) + || const_ok_for_arm (~INTVAL (operands[1]))) + && (GET_CODE (operands[1]) != SYMBOL_REF + || CONSTANT_ADDRESS_P (operands[1]))) + || register_operand (operands[1], SImode)" "* switch (which_alternative) { - case 0: + case 2: return (arm_output_asm_insn (\"mov\\t%0, %1\", operands)); case 1: - return (output_mov_immediate (operands)); - case 2: + if (!const_ok_for_arm (INTVAL (operands[1]))) + { + operands[1] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[1])); + return arm_output_asm_insn (\"mvn\\t%0, %1\", operands); + } + return arm_output_asm_insn (\"mov\\t%0, %1\", operands); + case 0: if (GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0))) return (arm_output_llc (operands)); @@ -651,8 +2386,29 @@ return (arm_output_asm_insn (\"ldr\\t%0, %1\", operands)); case 3: return (arm_output_asm_insn (\"str\\t%1, %0\", operands)); + case 4: + return output_load_symbol (operands); } -") +" +[(set_attr "length" "2,*,*,*,4") + (set_attr "type" "load,*,*,store1,*")]) + +;; If copying one reg to another we can set the condition codes according to +;; its value. Such a move is common after a return from subroutine and the +;; result is being tested against zero. + +(define_insn "" + [(set (reg:CC 24) (compare (match_operand:SI 1 "s_register_operand" "r") + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") (match_dup 1))] + "" + "* + if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG + && REGNO (operands[0]) == REGNO (operands[1])) + return arm_output_asm_insn (\"cmp\\t%0, #0\", operands); + return arm_output_asm_insn (\"subs\\t%0, %1, #0\", operands); +" +[(set_attr "conds" "set")]) ;; XXX The movhi stuff isn't as correct or as nice as it could be... @@ -663,10 +2419,10 @@ ;;(define_expand "loadhi" ;; [;; load the whole word (ARM realigns it if not on word boundary) -;; (set (match_operand:SI 2 "register_operand" "") +;; (set (match_operand:SI 2 "s_register_operand" "") ;; (mem:SI (match_operand:SI 1 "address_operand" ""))) ;; ;; quietly forget the upper 16 bits -;; (set (match_operand:HI 0 "register_operand" "") +;; (set (match_operand:HI 0 "s_register_operand" "") ;; (subreg:HI (match_dup 2) 0))] ;; "" ;; "" @@ -678,7 +2434,7 @@ ;;(define_expand "reloadhi" ;; [(set (reg:SI 10) ;; (mem:SI (match_operand:SI 1 "address_operand" ""))) -;; (set (match_operand:HI 0 "register_operand" "") +;; (set (match_operand:HI 0 "s_register_operand" "") ;; (subreg:HI (reg:SI 10) 0))] ;; "" "") @@ -690,19 +2446,24 @@ (match_dup 2)) (set (reg:SI 10) (ashiftrt:SI (match_operand 0 "" "") (const_int 8))) - (set (mem:QI (plus:SI (match_dup 1) (const_int 1))) + (set (mem:QI (match_dup 3)) (reg:QI 10))] "" " { operands[2] = gen_lowpart (QImode, operands[0]); operands[0] = gen_lowpart (SImode, operands[0]); + operands[3] = plus_constant (operands[1], 1); }") ;; Subroutine to store a half word from a register into memory. ;; Operand 0 is the source register (HImode) ;; Operand 1 is the destination address in a register (SImode) +;; In both this routine and the next, we must be careful not to spill +;; a memory address of reg+large_const into a seperate PLUS insn, since this +;; can generate unrecognizable rtl. + (define_expand "storehi" [;; store the low byte (set (mem:QI (match_operand:SI 1 "" "")) (match_dup 3)) @@ -710,13 +2471,23 @@ (set (match_dup 2) (ashiftrt:SI (match_operand 0 "" "") (const_int 8))) ;; store the high byte - (set (mem:QI (plus:SI (match_dup 1) (const_int 1))) + (set (mem:QI (match_dup 4)) (subreg:QI (match_dup 2) 0))] ;explicit subreg safe "" " -{ operands[3] = gen_lowpart (QImode, operands[0]); +{ + enum rtx_code code = GET_CODE (operands[1]); + + if ((code == PLUS || code == MINUS) + && (GET_CODE (XEXP (operands[1], 1)) == REG + || GET_CODE (XEXP (operands[1], 0)) != REG)) + operands[1] = force_reg (SImode, operands[1]); + operands[4] = plus_constant (operands[1], 1); + operands[3] = gen_lowpart (QImode, operands[0]); operands[0] = gen_lowpart (SImode, operands[0]); - operands[2] = gen_reg_rtx (SImode); }") + operands[2] = gen_reg_rtx (SImode); +} +") ;; Subroutine to store a half word integer constant into memory. ;; Operand 0 is the constant @@ -726,16 +2497,23 @@ [;; store the low byte (set (mem:QI (match_operand:SI 1 "" "")) (match_operand 0 "" "")) ;; store the high byte - (set (mem:QI (plus:SI (match_dup 1) (const_int 1))) - (match_dup 2))] + (set (mem:QI (match_dup 3)) (match_dup 2))] "" " - { - int value = INTVAL (operands[0]); - - operands[0] = force_reg (QImode, gen_rtx (CONST_INT, VOIDmode, value & 255)); - operands[2] = force_reg (QImode, gen_rtx (CONST_INT, VOIDmode,(value>>8) & 255)); - } +{ + int value = INTVAL (operands[0]); + enum rtx_code code = GET_CODE (operands[1]); + + if ((code == PLUS || code == MINUS) + && (GET_CODE (XEXP (operands[1], 1)) == REG + || GET_CODE (XEXP (operands[1], 0)) != REG)) + operands[1] = force_reg (SImode, operands[1]); + + operands[0] = force_reg (QImode, gen_rtx (CONST_INT, VOIDmode, value & 255)); + operands[2] = force_reg (QImode, + gen_rtx (CONST_INT, VOIDmode,(value>>8) & 255)); + operands[3] = plus_constant (operands[1], 1); +} ") (define_expand "movhi" @@ -759,22 +2537,42 @@ { if (GET_CODE (operands[1]) == CONST_INT) { - insn = gen_storeinthi (operands[1], force_reg (SImode, XEXP (operands[0], 0))); + insn = gen_storeinthi (operands[1], XEXP (operands[0],0)); } else { if (GET_CODE (operands[1]) == MEM) - operands[1] = copy_to_reg (operands[1]); - insn = gen_storehi (operands[1], force_reg (SImode, XEXP (operands[0], 0))); + operands[1] = force_reg (HImode, operands[1]); + insn = gen_storehi (operands[1], XEXP (operands[0], 0)); } } -#if 0 - else if (GET_CODE (operands[1]) == MEM) + else if (GET_CODE (operands[1]) == CONST_INT + && !(const_ok_for_arm (INTVAL (operands[1])) + || const_ok_for_arm (~INTVAL (operands[1])))) { - insn = gen_loadhi (operands[0], XEXP (operands[1], 0), - gen_reg_rtx (SImode)); + rtx reg, reg2; + + /* no need to be clever, this will always take two insns. + The top sixteen bits should be all zeros or all ones. */ + if (INTVAL (operands[1]) < 0) + { + emit_insn (gen_movsi (reg = gen_reg_rtx (SImode), + GEN_INT (INTVAL (operands[1]) + | ~(0x0ff00)))); + emit_insn (gen_addsi3 (reg2 = gen_reg_rtx (SImode), reg, + GEN_INT (-((~INTVAL (operands[1])) + & 0xff)))); + } + else + { + emit_insn (gen_movsi (reg = gen_reg_rtx (SImode), + GEN_INT (INTVAL (operands[1]) & 0xff00))); + emit_insn (gen_addsi3 (reg2 = gen_reg_rtx (SImode), reg, + GEN_INT (INTVAL (operands[1]) & 0x00ff))); + } + insn = gen_rtx (SET, HImode, operands[0], + gen_rtx (SUBREG, HImode, reg2, 0)); } -#endif else insn = gen_rtx (SET, VOIDmode, operands[0], operands[1]); } @@ -787,576 +2585,3674 @@ (define_insn "" [(set (match_operand:HI 0 "general_operand" "=r,r,r,m") - (match_operand:HI 1 "general_operand" "r,n,m,r"))] - "" + (match_operand:HI 1 "general_operand" "r,K,m,r"))] + "(register_operand (operands[0], HImode) + && (GET_CODE (operands[1]) != CONST_INT + || const_ok_for_arm (INTVAL (operands[1])) + || const_ok_for_arm (~INTVAL (operands[1])))) + || register_operand (operands[1], HImode)" "* switch (which_alternative) { - case 0: return (arm_output_asm_insn (\"mov\\t%0, %1\\t@movhi\", operands)); - case 1: return (output_mov_immediate (operands)); - case 2: return (arm_output_asm_insn (\"ldr\\t%0, %1\\t@movhi\", operands)); - case 3: return (arm_output_asm_insn (\"str\\t%1, %0\\t@movhi\", operands)); + case 1: + if (!const_ok_for_arm (INTVAL (operands[1]))) + { + operands[1] = GEN_INT (~INTVAL (operands[1])); + return arm_output_asm_insn (\"mvn\\t%0, %1\", operands); + } + /* fall through */ + case 0: + return arm_output_asm_insn (\"mov\\t%0, %1\\t@movhi\", operands); + case 2: + return arm_output_asm_insn (\"ldr\\t%0, %1\\t@movhi\", operands); + case 3: + return arm_output_asm_insn (\"str\\t%1, %0\\t@movhi\", operands); + } +" +[(set_attr "type" "*,*,load,store1")]) + +(define_expand "movqi" + [(set (match_operand:QI 0 "general_operand" "") + (match_operand:QI 1 "general_operand" ""))] + "" + " + /* Everything except mem = const or mem = mem can be done easily */ + + if (!(reload_in_progress || reload_completed)) + { + rtx reg; + if (GET_CODE (operands[1]) == CONST_INT) + { + emit_insn (gen_movsi (reg = gen_reg_rtx (SImode), operands[1])); + operands[1] = gen_rtx (SUBREG, QImode, reg, 0); + } } + if (GET_CODE (operands[0]) == MEM) + operands[1] = force_reg (QImode, operands[1]); ") -(define_insn "movqi" + +(define_insn "" [(set (match_operand:QI 0 "general_operand" "=r,r,r,m") - (match_operand:QI 1 "general_operand" "r,n,m,r"))] - "" + (match_operand:QI 1 "general_operand" "r,K,m,r"))] + "register_operand (operands[0], QImode) + || register_operand (operands[1], QImode)" "* switch (which_alternative) { + case 1: + if (INTVAL (operands[1]) < 0) + { + operands[1] = GEN_INT (~INTVAL (operands[1])); + return arm_output_asm_insn (\"mvn\\t%0, %1\", operands); + } case 0: return (arm_output_asm_insn (\"mov\\t%0, %1\", operands)); - case 1: - return (output_mov_immediate (operands)); case 2: return (arm_output_asm_insn (\"ldrb\\t%0, %1\", operands)); case 3: return (arm_output_asm_insn (\"strb\\t%1, %0\", operands)); } -") +" +[(set_attr "type" "*,*,load,store1")]) (define_insn "movsf" - [(set (match_operand:SF 0 "general_operand" "=f,f,m,f,r,r") - (match_operand:SF 1 "general_operand" "fG,m,f,r,f,r"))] + [(set (match_operand:SF 0 "general_operand" "=f,f,f,m,f,r,r,r,m") + (match_operand:SF 1 "general_operand" "fG,H,m,f,r,f,r,m,r"))] "" "* +{ + REAL_VALUE_TYPE r; + switch (which_alternative) { case 0: - return (arm_output_asm_insn (\"mvfs\\t%0, %1\", operands)); + return arm_output_asm_insn (\"mvfs\\t%0, %1\", operands); case 1: - return (arm_output_asm_insn (\"ldfs\\t%0, %1\", operands)); + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); + r = REAL_VALUE_NEGATE (r); + operands[1] = CONST_DOUBLE_FROM_REAL_VALUE (r, GET_MODE (operands[1])); + return arm_output_asm_insn (\"mnfs\\t%0, %1\", operands); case 2: - return (arm_output_asm_insn (\"stfs\\t%1, %0\", operands)); + return arm_output_asm_insn (\"ldfs\\t%0, %1\", operands); case 3: - arm_output_asm_insn(\"stmfd\\tsp!, {%1}\", operands); - return (arm_output_asm_insn (\"ldfs\\t%0, [sp],#4\", operands)); + return arm_output_asm_insn (\"stfs\\t%1, %0\", operands); case 4: - arm_output_asm_insn(\"stfs\\t%1, [sp,#-4]!\", operands); - return (arm_output_asm_insn (\"ldmfd\\tsp!, {%0}\", operands)); + arm_output_asm_insn(\"stmfd\\tsp!, {%1}\", operands); + return arm_output_asm_insn (\"ldfs\\t%0, [sp],#4\", operands); case 5: - return (arm_output_asm_insn (\"mov\\t%0, %1\", operands)); + arm_output_asm_insn(\"stfs\\t%1, [sp,#-4]!\", operands); + return arm_output_asm_insn (\"ldmfd\\tsp!, {%0}\", operands); + case 6: + return arm_output_asm_insn (\"mov\\t%0, %1\", operands); + case 7: + return arm_output_asm_insn (\"ldr\\t%0, %1\\t@ float\", operands); + case 8: + return arm_output_asm_insn (\"str\\t%1, %0\\t@ float\", operands); } +} +" +[(set_attr "length" "1,1,1,1,2,2,1,1,1") + (set_attr "type" "float,float,f_load,f_store,r_mem_f,f_mem_r,*,load,store1")]) + +(define_expand "movdf" + [(parallel [(set (match_operand:DF 0 "general_operand" "") + (match_operand:DF 1 "general_operand" "")) + (clobber (match_scratch:SI 2 ""))])] + "" + " + if (GET_CODE (operands[0]) == MEM) + operands[1] = force_reg (DFmode, operands[1]); ") -(define_insn "movdf" - [(set (match_operand:DF 0 "general_operand" "=f,f,m,f,r,r") - (match_operand:DF 1 "general_operand" "fG,m,f,r,f,r"))] +;; Reloading a df mode value stored in integer regs to memory can require a +;; scratch reg. +(define_expand "reload_outdf" + [(parallel [(set (match_operand:DF 0 "reload_memory_operand" "=o") + (match_operand:DF 1 "s_register_operand" "r")) + (clobber (match_operand:SI 2 "s_register_operand" "=&r"))])] "" + "") + +(define_insn "" + [(set (match_operand:DF 0 "general_operand" "=r,Q,r,o,f,f,f,f,m,!f,!r,r") + (match_operand:DF 1 "general_operand" + "Q,r,?o,?r,?f,!G,!H,m,f,r,f,??r")) + (clobber (match_scratch:SI 2 "=X,X,X,&r,X,X,X,X,X,X,X,X"))] + "GET_CODE (operands[0]) != MEM || register_operand (operands[1], DFmode)" + "* +{ + REAL_VALUE_TYPE r; + rtx ops[3]; + + switch (which_alternative) + { + case 0: + operands[1] = XEXP (operands[1], 0); + return arm_output_asm_insn (\"ldmia\\t%1, {%0, %R0}\\t@ double\", + operands); + case 1: + operands[0] = XEXP (operands[0], 0); + return arm_output_asm_insn (\"stmia\\t%0, {%1, %R1}\\t@ double\", + operands); + case 2: + ops[0] = operands[0]; + ops[1] = XEXP (XEXP (operands[1], 0), 0); + ops[2] = XEXP (XEXP (operands[1], 0), 1); + if (!INTVAL (ops[2]) || const_ok_for_arm (INTVAL (ops[2]))) + arm_output_asm_insn (\"add\\t%0, %1, %2\", ops); + else + arm_output_asm_insn (\"sub\\t%0, %1, #%n2\", ops); + return arm_output_asm_insn (\"ldmia\\t%0, {%0, %R0}\\t@ double\", + operands); + case 3: + + ops[0] = operands[2]; + ops[1] = XEXP (XEXP (operands[0], 0), 0); + ops[2] = XEXP (XEXP (operands[0], 0), 1); + if (!INTVAL (ops[2]) || const_ok_for_arm (INTVAL (ops[2]))) + arm_output_asm_insn (\"add\\t%0, %1, %2\", ops); + else + arm_output_asm_insn (\"sub\\t%0, %1, #%n2\", ops); + return arm_output_asm_insn (\"stmia\\t%2, {%1, %R1}\\t@ double\", + operands); + case 4: + case 5: + return arm_output_asm_insn (\"mvfd\\t%0, %1\", operands); + case 6: + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); + r = REAL_VALUE_NEGATE (r); + operands[1] = CONST_DOUBLE_FROM_REAL_VALUE (r, GET_MODE (operands[1])); + return arm_output_asm_insn (\"mnfd\\t%0, %1\", operands); + case 7: return arm_output_asm_insn (\"ldfd\\t%0, %1\", operands); + case 8: return arm_output_asm_insn (\"stfd\\t%1, %0\", operands); + case 9: return output_mov_double_fpu_from_arm (operands); + case 10: return output_mov_double_arm_from_fpu (operands); + case 11: return output_move_double (operands); + } +} +" +[(set_attr "length" "1,1,2,2,1,1,1,1,1,2,2,2") + (set_attr "type" +"load,store2,load,store2,float,float,float,f_load,f_store,r_mem_f,f_mem_r,*")]) + +(define_insn "movxf" + [(set (match_operand:XF 0 "general_operand" "=f,f,f,m,f,r,r") + (match_operand:XF 1 "general_operand" "fG,H,m,f,r,f,r"))] + "ENABLE_XF_PATTERNS" "* +{ + REAL_VALUE_TYPE r; + switch (which_alternative) { - case 0: return (arm_output_asm_insn (\"mvfd\\t%0, %1\", operands)); - case 1: return (arm_output_asm_insn (\"ldfd\\t%0, %1\", operands)); - case 2: return (arm_output_asm_insn (\"stfd\\t%1, %0\", operands)); - case 3: return (output_mov_double_fpu_from_arm (operands)); - case 4: return (output_mov_double_arm_from_fpu (operands)); - case 5: return (output_move_double (operands)); + case 0: return arm_output_asm_insn (\"mvfe\\t%0, %1\", operands); + case 1: + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); + r = REAL_VALUE_NEGATE (r); + operands[1] = CONST_DOUBLE_FROM_REAL_VALUE (r, GET_MODE (operands[1])); + return arm_output_asm_insn (\"mnfe\\t%0, %1\", operands); + case 2: return arm_output_asm_insn (\"ldfe\\t%0, %1\", operands); + case 3: return arm_output_asm_insn (\"stfe\\t%1, %0\", operands); + case 4: return output_mov_long_double_fpu_from_arm (operands); + case 5: return output_mov_long_double_arm_from_fpu (operands); + case 6: return output_mov_long_double_arm_from_arm (operands); } -") +} +" +[(set_attr "length" "1,1,1,1,2,2,3") + (set_attr "type" "float,float,f_load,f_store,r_mem_f,f_mem_r,*")]) -;; Comparison and test insns -(define_insn "cmpsi" - [(set (cc0) - (compare (match_operand:SI 0 "register_operand" "r") - (match_operand:SI 1 "arm_rhs_operand" "rI")))] - "" - "* - return (arm_output_asm_insn (\"cmp\\t%0, %1\", operands)); -") +;; load- and store-multiple insns +;; The arm can load/store any set of registers, provided that they are in +;; ascending order; but that is beyond GCC so stick with what it knows. -(define_insn "tstsi" - [(set (cc0) (match_operand:SI 0 "register_operand" "r"))] +(define_expand "load_multiple" + [(match_par_dup 3 [(set (match_operand:SI 0 "" "") + (match_operand:SI 1 "" "")) + (use (match_operand:SI 2 "" ""))])] "" - "* - return (arm_output_asm_insn (\"cmp\\t%0, #0\", operands)); + " + /* Support only fixed point registers */ + if (GET_CODE (operands[2]) != CONST_INT + || INTVAL (operands[2]) > 14 + || INTVAL (operands[2]) < 2 + || GET_CODE (operands[1]) != MEM + || GET_CODE (operands[0]) != REG + || REGNO (operands[0]) > 14 + || REGNO (operands[0]) + INTVAL (operands[2]) > 15) + FAIL; + + operands[3] + = arm_gen_load_multiple (REGNO (operands[0]), INTVAL (operands[2]), + force_reg (SImode, XEXP (operands[1], 0)), + TRUE, FALSE); ") +;; Load multiple with write-back + (define_insn "" - [(set (cc0) - (compare (match_operand:SI 0 "register_operand" "r") - (neg:SI (match_operand:SI 1 "arm_rhs_operand" "rI"))))] - "" + [(match_parallel 0 "load_multiple_operation" + [(set (match_operand:SI 1 "s_register_operand" "+r") + (plus:SI (match_dup 1) + (match_operand:SI 2 "immediate_operand" "n"))) + (set (match_operand:SI 3 "s_register_operand" "=r") + (mem:SI (match_dup 1)))])] + "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))" "* - return (arm_output_asm_insn (\"cmn\\t%0, %1\", operands)); -") +{ + rtx ops[3]; + int count = XVECLEN (operands[0], 0); -(define_insn "cmpsf" - [(set (cc0) - (compare (match_operand:SF 0 "register_operand" "f") - (match_operand:SF 1 "fpu_rhs_operand" "fG")))] - "" - "* - return (arm_output_asm_insn (\"cmf\\t%0, %1\", operands)); -") + ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0); + ops[1] = SET_DEST (XVECEXP (operands[0], 0, 1)); + ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 2)); -(define_insn "cmpdf" - [(set (cc0) - (compare (match_operand:DF 0 "register_operand" "f") - (match_operand:DF 1 "fpu_rhs_operand" "fG")))] - "" - "* - return (arm_output_asm_insn (\"cmf\\t%0, %1\", operands)); -") - -;; Conditional branch insns + return arm_output_asm_insn (\"ldmia\\t%0!, {%1-%2}\\t@ load multiple\", ops); +} +" +[(set_attr "type" "load")]) -(define_insn "beq" - [(set (pc) - (if_then_else (eq (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" - "* - return (arm_output_asm_insn (\"beq\\t%l0\", operands)); -") +;; Ordinary load multiple -(define_insn "bne" - [(set (pc) - (if_then_else (ne (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] +(define_insn "" + [(match_parallel 0 "load_multiple_operation" + [(set (match_operand:SI 1 "s_register_operand" "=r") + (match_operand:SI 2 "indirect_operand" "Q"))])] "" "* - return (arm_output_asm_insn (\"bne\\t%l0\", operands)); -") - -(define_insn "bgt" - [(set (pc) - (if_then_else (gt (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] +{ + rtx ops[3]; + int count = XVECLEN (operands[0], 0); + + ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0); + ops[1] = SET_DEST (XVECEXP (operands[0], 0, 0)); + ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 1)); + + return arm_output_asm_insn (\"ldmia\\t%0, {%1-%2}\\t@ load multiple\", ops); +} +" +[(set_attr "type" "load")]) + +(define_expand "store_multiple" + [(match_par_dup 3 [(set (match_operand:SI 0 "" "") + (match_operand:SI 1 "" "")) + (use (match_operand:SI 2 "" ""))])] "" - "* - return (arm_output_asm_insn (\"bgt\\t%l0\", operands)); + " + /* Support only fixed point registers */ + if (GET_CODE (operands[2]) != CONST_INT + || INTVAL (operands[2]) > 14 + || INTVAL (operands[2]) < 2 + || GET_CODE (operands[1]) != REG + || GET_CODE (operands[0]) != MEM + || REGNO (operands[1]) > 14 + || REGNO (operands[1]) + INTVAL (operands[2]) > 15) + FAIL; + + operands[3] + = arm_gen_store_multiple (REGNO (operands[1]), INTVAL (operands[2]), + force_reg (SImode, XEXP (operands[0], 0)), + TRUE, FALSE); ") -(define_insn "ble" - [(set (pc) - (if_then_else (le (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" +;; Store multiple with write-back + +(define_insn "" + [(match_parallel 0 "store_multiple_operation" + [(set (match_operand:SI 1 "s_register_operand" "+r") + (plus:SI (match_dup 1) + (match_operand:SI 2 "immediate_operand" "n"))) + (set (mem:SI (match_dup 1)) + (match_operand:SI 3 "s_register_operand" "r"))])] + "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))" "* - return (arm_output_asm_insn (\"ble\\t%l0\", operands)); -") +{ + rtx ops[3]; + int count = XVECLEN (operands[0], 0); -(define_insn "bge" - [(set (pc) - (if_then_else (ge (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] + ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0); + ops[1] = SET_SRC (XVECEXP (operands[0], 0, 1)); + ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 2)); + + return arm_output_asm_insn (\"stmia\\t%0!, {%1-%2}\\t@ str multiple\", ops); +} +" +[(set (attr "type") + (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4)) + (const_string "store2") + (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 5)) + (const_string "store3")] + (const_string "store4")))]) + +;; Ordinary store multiple + +(define_insn "" + [(match_parallel 0 "store_multiple_operation" + [(set (match_operand:SI 2 "indirect_operand" "=Q") + (match_operand:SI 1 "s_register_operand" "r"))])] "" "* - return (arm_output_asm_insn (\"bge\\t%l0\", operands)); +{ + rtx ops[3]; + int count = XVECLEN (operands[0], 0); + + ops[0] = XEXP (SET_DEST (XVECEXP (operands[0], 0, 0)), 0); + ops[1] = SET_SRC (XVECEXP (operands[0], 0, 0)); + ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 1)); + + return arm_output_asm_insn (\"stmia\\t%0, {%1-%2}\\t@ str multiple\", ops); +} +" +[(set (attr "type") + (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 3)) + (const_string "store2") + (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4)) + (const_string "store3")] + (const_string "store4")))]) + +;; Move a block of memory if it is word aligned and MORE than 2 words long. +;; We could let this apply for blocks of less than this, but it clobbers so +;; many registers that there is then probably a better way. + +;; If optimizing, output redundant moves with REG_NOTES on them, this +;; produces better code. + +(define_expand "movstrsi" + [(set (match_operand:BLK 0 "general_operand" "=m") + (match_operand:BLK 1 "general_operand" "m")) + (use (match_operand:SI 2 "immediate_operand" "n")) + (use (match_operand:SI 3 "immediate_operand" "n")) + (clobber (reg:SI 0)) + (clobber (reg:SI 1)) + (clobber (reg:SI 2)) + (clobber (reg:SI 3)) + (clobber (match_scratch:SI 4 "=+r")) + (clobber (match_scratch:SI 5 "=+r"))] + "" + " +{ + int words_to_go; + int i, r; + rtx const_sxteen = gen_rtx (CONST_INT, SImode, 16); + rtx src = gen_reg_rtx (SImode); + rtx dst = gen_reg_rtx (SImode); + rtx st_src, st_dst, end_src, end_dst, fin_src, fin_dst; + extern int optimize; + + if (GET_CODE (operands[2]) != CONST_INT + || GET_CODE (operands[3]) != CONST_INT + || INTVAL (operands[2]) % 4 != 0 + || INTVAL (operands[2]) < 4 + || INTVAL (operands[2]) > 64 + || INTVAL (operands[3]) < 4 + || INTVAL (operands[3]) % 4 != 0) + FAIL; + emit_move_insn (dst, st_dst = force_reg (SImode, XEXP (operands[0], 0))); + emit_move_insn (src, st_src = force_reg (SImode, XEXP (operands[1], 0))); + fin_src = src; + fin_dst = dst; + + for (i = 0, words_to_go = INTVAL (operands[2]) / 4; words_to_go >= 2; i+=4) + { + emit_insn (arm_gen_load_multiple (0, words_to_go > 4 ? 4 : words_to_go, + src, TRUE, TRUE)); + emit_insn (arm_gen_store_multiple (0, words_to_go > 4 ? 4 : words_to_go, + dst, TRUE, TRUE)); + if (optimize) + for (r = (words_to_go > 4) ? 3 : words_to_go - 1; r >= 0; r--) + { + rtx note; + note = emit_move_insn (gen_reg_rtx (SImode), + gen_rtx (REG, SImode, r)); + REG_NOTES (note) = gen_rtx (EXPR_LIST, REG_EQUIV, + gen_rtx (MEM, SImode, + plus_constant (st_src, 4*(i+r))), + REG_NOTES (note)); + REG_NOTES (note) = gen_rtx (EXPR_LIST, REG_EQUIV, + gen_rtx (MEM, SImode, + plus_constant (st_dst, 4*(i+r))), + REG_NOTES (note)); + } + words_to_go -= words_to_go < 4 ? words_to_go : 4; + } + if (words_to_go) + { + rtx sreg; + + emit_move_insn (sreg = gen_reg_rtx (SImode), gen_rtx (MEM, SImode, src)); + emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4)); + emit_move_insn (gen_rtx (MEM, SImode, dst), sreg); + emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4)); + } + if (optimize) + { + /* Insns for the REG_NOTES: These notes tell the optimiser where the + index registers have got to so that consecutive block moves of + contiguous data work efficiently */ + + end_src = emit_move_insn (fin_src, fin_src); + REG_NOTES (end_src) = gen_rtx(EXPR_LIST, REG_EQUAL, + plus_constant (st_src, INTVAL (operands[2])), + REG_NOTES (end_src)); + end_dst = emit_move_insn (fin_dst, fin_dst); + REG_NOTES (end_dst) = gen_rtx(EXPR_LIST, REG_EQUAL, + plus_constant (st_dst, INTVAL (operands[2])), + REG_NOTES (end_dst)); + } + DONE; +} ") + -(define_insn "blt" - [(set (pc) - (if_then_else (lt (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] +;; Comparison and test insns + +(define_expand "cmpsi" + [(set (reg:CC 24) + (compare:CC (match_operand:SI 0 "s_register_operand" "") + (match_operand:SI 1 "arm_add_operand" "")))] "" - "* - return (arm_output_asm_insn (\"blt\\t%l0\", operands)); + " +{ + arm_compare_op0 = operands[0]; + arm_compare_op1 = operands[1]; + arm_compare_fp = 0; + DONE; +} ") -(define_insn "bgtu" - [(set (pc) - (if_then_else (gtu (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] +(define_expand "cmpsf" + [(set (reg:CC 24) + (compare:CC (match_operand:SF 0 "s_register_operand" "") + (match_operand:SF 1 "fpu_rhs_operand" "")))] "" - "* - return (arm_output_asm_insn (\"bhi\\t%l0\", operands)); + " +{ + arm_compare_op0 = operands[0]; + arm_compare_op1 = operands[1]; + arm_compare_fp = 1; + DONE; +} ") -(define_insn "bleu" - [(set (pc) - (if_then_else (leu (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] +(define_expand "cmpdf" + [(set (reg:CC 24) + (compare:CC (match_operand:DF 0 "s_register_operand" "") + (match_operand:DF 1 "fpu_rhs_operand" "")))] "" - "* - return (arm_output_asm_insn (\"bls\\t%l0\", operands)); + " +{ + arm_compare_op0 = operands[0]; + arm_compare_op1 = operands[1]; + arm_compare_fp = 1; + DONE; +} ") -(define_insn "bgeu" - [(set (pc) - (if_then_else (geu (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" - "* - return (arm_output_asm_insn (\"bhs\\t%l0\", operands)); +(define_expand "cmpxf" + [(set (reg:CC 24) + (compare:CC (match_operand:XF 0 "s_register_operand" "") + (match_operand:XF 1 "fpu_rhs_operand" "")))] + "ENABLE_XF_PATTERNS" + " +{ + arm_compare_op0 = operands[0]; + arm_compare_op1 = operands[1]; + arm_compare_fp = 1; + DONE; +} ") -(define_insn "bltu" - [(set (pc) - (if_then_else (ltu (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] +(define_insn "" + [(set (match_operand 0 "cc_register" "") + (compare (match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_add_operand" "rL")))] "" "* - return (arm_output_asm_insn (\"blo\\t%l0\", operands)); -") - -;; Inverted conditional branch insns + if (GET_CODE (operands[2]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[2]))) + return arm_output_asm_insn (\"cmn\\t%1, #%n2\", operands); + return arm_output_asm_insn (\"cmp\\t%1, %2\", operands); +" +[(set_attr "conds" "set")]) (define_insn "" - [(set (pc) - (if_then_else (eq (cc0) (const_int 0)) - (pc) - (label_ref (match_operand 0 "" ""))))] + [(set (match_operand 0 "cc_register" "") + (compare (match_operand:SI 1 "s_register_operand" "r") + (neg:SI (match_operand:SI 2 "s_register_operand" "r"))))] "" "* - return (arm_output_asm_insn (\"bne\\t%l0\", operands)); -") + return arm_output_asm_insn (\"cmn\\t%1, %2\", operands); +" +[(set_attr "conds" "set")]) (define_insn "" - [(set (pc) - (if_then_else (ne (cc0) (const_int 0)) - (pc) - (label_ref (match_operand 0 "" ""))))] + [(set (match_operand 0 "cc_register" "") + (compare (match_operand:SI 1 "s_register_operand" "r") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "arm_rhs_operand" "rn")])))] "" "* - return (arm_output_asm_insn (\"beq\\t%l0\", operands)); -") + return output_shift_compare (operands, FALSE); +" +[(set_attr "conds" "set")]) (define_insn "" - [(set (pc) - (if_then_else (gt (cc0) (const_int 0)) - (pc) - (label_ref (match_operand 0 "" ""))))] + [(set (match_operand 0 "cc_register" "") + (compare (match_operand:SI 1 "s_register_operand" "r") + (neg:SI (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "arm_rhs_operand" "rn")]))))] "" "* - return (arm_output_asm_insn (\"ble\\t%l0\", operands)); -") + return output_shift_compare (operands, TRUE); +" +[(set_attr "conds" "set")]) (define_insn "" - [(set (pc) - (if_then_else (le (cc0) (const_int 0)) - (pc) - (label_ref (match_operand 0 "" ""))))] + [(set (reg:CCFP 24) + (compare:CCFP (match_operand:SF 0 "s_register_operand" "f,f") + (match_operand:SF 1 "fpu_add_operand" "fG,H")))] "" "* - return (arm_output_asm_insn (\"bgt\\t%l0\", operands)); -") +{ + REAL_VALUE_TYPE r; + + switch (which_alternative) + { + case 0: + return arm_output_asm_insn (\"cmf\\t%0, %1\", operands); + case 1: + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); + r = REAL_VALUE_NEGATE (r); + operands[1] = CONST_DOUBLE_FROM_REAL_VALUE (r, GET_MODE (operands[1])); + return arm_output_asm_insn (\"cnf\\t%0, %1\", operands); + } +} +" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) (define_insn "" - [(set (pc) - (if_then_else (ge (cc0) (const_int 0)) - (pc) - (label_ref (match_operand 0 "" ""))))] + [(set (reg:CCFP 24) + (compare:CCFP (match_operand:DF 0 "s_register_operand" "f,f") + (match_operand:DF 1 "fpu_add_operand" "fG,H")))] "" "* - return (arm_output_asm_insn (\"blt\\t%l0\", operands)); -") +{ + REAL_VALUE_TYPE r; -(define_insn "" - [(set (pc) - (if_then_else (lt (cc0) (const_int 0)) - (pc) - (label_ref (match_operand 0 "" ""))))] - "" - "* - return (arm_output_asm_insn (\"bge\\t%l0\", operands)); -") + switch (which_alternative) + { + case 0: + return arm_output_asm_insn (\"cmf\\t%0, %1\", operands); + case 1: + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); + r = REAL_VALUE_NEGATE (r); + operands[1] = CONST_DOUBLE_FROM_REAL_VALUE (r, GET_MODE (operands[1])); + return arm_output_asm_insn (\"cnf\\t%0, %1\", operands); + } +} +" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) (define_insn "" - [(set (pc) - (if_then_else (gtu (cc0) (const_int 0)) - (pc) - (label_ref (match_operand 0 "" ""))))] + [(set (reg:CCFP 24) + (compare:CCFP (float_extend:DF + (match_operand:SF 0 "s_register_operand" "f,f")) + (match_operand:DF 1 "fpu_add_operand" "fG,H")))] "" "* - return (arm_output_asm_insn (\"bls\\t%l0\", operands)); -") +{ + REAL_VALUE_TYPE r; + + switch (which_alternative) + { + case 0: + return arm_output_asm_insn (\"cmf\\t%0, %1\", operands); + case 1: + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); + r = REAL_VALUE_NEGATE (r); + operands[1] = CONST_DOUBLE_FROM_REAL_VALUE (r, GET_MODE (operands[1])); + return arm_output_asm_insn (\"cnf\\t%0, %1\", operands); + } +} +" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) (define_insn "" - [(set (pc) - (if_then_else (leu (cc0) (const_int 0)) - (pc) - (label_ref (match_operand 0 "" ""))))] + [(set (reg:CCFP 24) + (compare:CCFP (match_operand:DF 0 "s_register_operand" "f") + (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f"))))] "" "* - return (arm_output_asm_insn (\"bhi\\t%l0\", operands)); -") + return arm_output_asm_insn (\"cmf\\t%0, %1\", operands); +" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) (define_insn "" - [(set (pc) - (if_then_else (geu (cc0) (const_int 0)) - (pc) - (label_ref (match_operand 0 "" ""))))] - "" + [(set (reg:CCFP 24) + (compare:CCFP (match_operand:XF 0 "s_register_operand" "f,f") + (match_operand:XF 1 "fpu_add_operand" "fG,H")))] + "ENABLE_XF_PATTERNS" "* - return (arm_output_asm_insn (\"blo\\t%l0\", operands)); -") +{ + REAL_VALUE_TYPE r; + + switch (which_alternative) + { + case 0: + return arm_output_asm_insn (\"cmf\\t%0, %1\", operands); + case 1: + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); + r = REAL_VALUE_NEGATE (r); + operands[1] = CONST_DOUBLE_FROM_REAL_VALUE (r, GET_MODE (operands[1])); + return arm_output_asm_insn (\"cnf\\t%0, %1\", operands); + } +} +" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) (define_insn "" - [(set (pc) - (if_then_else (ltu (cc0) (const_int 0)) - (pc) - (label_ref (match_operand 0 "" ""))))] + [(set (reg:CCFPE 24) + (compare:CCFPE (match_operand:SF 0 "s_register_operand" "f,f") + (match_operand:SF 1 "fpu_add_operand" "fG,H")))] "" "* - return (arm_output_asm_insn (\"bhs\\t%l0\", operands)); -") - -;; Jump and linkage insns -;; `return' is still a jump-to-epilogue... +{ + REAL_VALUE_TYPE r; -(define_insn "jump" - [(set (pc) - (label_ref (match_operand 0 "" "")))] - "" - "* - return (arm_output_asm_insn (\"b\\t%l0\", operands)); -") + switch (which_alternative) + { + case 0: + return arm_output_asm_insn (\"cmfe\\t%0, %1\", operands); + case 1: + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); + r = REAL_VALUE_NEGATE (r); + operands[1] = CONST_DOUBLE_FROM_REAL_VALUE (r, GET_MODE (operands[1])); + return arm_output_asm_insn (\"cnfe\\t%0, %1\", operands); + } +} +" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) -(define_insn "call" - [(call (match_operand 0 "memory_operand" "m") - (match_operand 1 "general_operand" "g")) - (clobber (reg:SI 14))] +(define_insn "" + [(set (reg:CCFPE 24) + (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f,f") + (match_operand:DF 1 "fpu_add_operand" "fG,H")))] "" "* - return (output_call (operands)); -") +{ + REAL_VALUE_TYPE r; -(define_insn "call_value" - [(set (match_operand 0 "" "=rf") - (call (match_operand 1 "memory_operand" "m") - (match_operand 2 "general_operand" "g"))) - (clobber (reg:SI 14))] + switch (which_alternative) + { + case 0: + return arm_output_asm_insn (\"cmfe\\t%0, %1\", operands); + case 1: + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); + r = REAL_VALUE_NEGATE (r); + operands[1] = CONST_DOUBLE_FROM_REAL_VALUE (r, GET_MODE (operands[1])); + return arm_output_asm_insn (\"cnfe\\t%0, %1\", operands); + } +} +" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) + +(define_insn "" + [(set (reg:CCFPE 24) + (compare:CCFPE (float_extend:DF + (match_operand:SF 0 "s_register_operand" "f,f")) + (match_operand:DF 1 "fpu_add_operand" "fG,H")))] "" "* - return (output_call (&operands[1])); -") +{ + REAL_VALUE_TYPE r; -;; Allow calls to SYMBOL_REFs specially as they are not valid general addresses -;; The 'a' causes the operand to be treated as an address, i.e. no '#' output. + switch (which_alternative) + { + case 0: + return arm_output_asm_insn (\"cmfe\\t%0, %1\", operands); + case 1: + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); + r = REAL_VALUE_NEGATE (r); + operands[1] = CONST_DOUBLE_FROM_REAL_VALUE (r, GET_MODE (operands[1])); + return arm_output_asm_insn (\"cnfe\\t%0, %1\", operands); + } +} +" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) (define_insn "" - [(call (mem:SI (match_operand:SI 0 "" "i")) - (match_operand:SI 1 "general_operand" "g")) - (clobber (reg:SI 14))] - "GET_CODE (operands[0]) == SYMBOL_REF" + [(set (reg:CCFPE 24) + (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f") + (float_extend:DF + (match_operand:SF 1 "s_register_operand" "f"))))] + "" "* - return (arm_output_asm_insn (\"bl\\t%a0\", operands)); -") + return arm_output_asm_insn (\"cmfe\\t%0, %1\", operands); +" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) (define_insn "" - [(set (match_operand 0 "register_operand" "=rf") - (call (mem:SI (match_operand:SI 1 "" "i")) - (match_operand:SI 2 "general_operand" "g"))) - (clobber (reg:SI 14))] - "GET_CODE(operands[1]) == SYMBOL_REF" + [(set (reg:CCFPE 24) + (compare:CCFPE (match_operand:XF 0 "s_register_operand" "f,f") + (match_operand:XF 1 "fpu_add_operand" "fG,H")))] + "ENABLE_XF_PATTERNS" "* - return (arm_output_asm_insn (\"bl\\t%a1\", operands)); -") +{ + REAL_VALUE_TYPE r; -;; Call subroutine returning any type. + switch (which_alternative) + { + case 0: + return arm_output_asm_insn (\"cmfe\\t%0, %1\", operands); + case 1: + REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]); + r = REAL_VALUE_NEGATE (r); + operands[1] = CONST_DOUBLE_FROM_REAL_VALUE (r, GET_MODE (operands[1])); + return arm_output_asm_insn (\"cnfe\\t%0, %1\", operands); + } +} +" +[(set_attr "conds" "set") + (set_attr "type" "f_2_r")]) -(define_expand "untyped_call" - [(parallel [(call (match_operand 0 "" "") - (const_int 0)) - (match_operand 1 "" "") - (match_operand 2 "" "")])] +; This insn allows redundant compares to be removed by cse, nothing should +; ever appear in the output file since (set (reg x) (reg x)) is a no-op that +; is deleted later on. The match_dup will match the mode here, so that +; mode changes of the condition codes aren't lost by this even though we don't +; specify what they are. + +(define_insn "" + [(set (match_operand 0 "cc_register" "") (match_dup 0))] + "" + "\\t@ deleted compare" +[(set_attr "conds" "set") + (set_attr "length" "0")]) + + +;; Conditional branch insns + +(define_expand "beq" + [(set (pc) + (if_then_else (eq (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] "" " { - int i; + operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") - emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx)); +(define_expand "bne" + [(set (pc) + (if_then_else (ne (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") - for (i = 0; i < XVECLEN (operands[2], 0); i++) - { - rtx set = XVECEXP (operands[2], 0, i); - emit_move_insn (SET_DEST (set), SET_SRC (set)); - } +(define_expand "bgt" + [(set (pc) + (if_then_else (gt (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") - /* The optimizer does not know that the call sets the function value - registers we stored in the result block. We avoid problems by - claiming that all hard registers are used and clobbered at this - point. */ - emit_insn (gen_blockage ()); +(define_expand "ble" + [(set (pc) + (if_then_else (le (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") - DONE; -}") +(define_expand "bge" + [(set (pc) + (if_then_else (ge (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") -;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and -;; all of memory. This blocks insns from being moved across this point. +(define_expand "blt" + [(set (pc) + (if_then_else (lt (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + " +{ + operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") -(define_insn "blockage" - [(unspec_volatile [(const_int 0)] 0)] +(define_expand "bgtu" + [(set (pc) + (if_then_else (gtu (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] "" - "") + " +{ + operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") -(define_insn "tablejump" +(define_expand "bleu" [(set (pc) - (match_operand:SI 0 "register_operand" "r")) - (use (label_ref (match_operand 1 "" "")))] + (if_then_else (leu (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] "" - "* - return (arm_output_asm_insn (\"mov\\tpc, %0\\t@ table jump, label %l1\", operands)); + " +{ + operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} ") -(define_insn "indirect_jump" +(define_expand "bgeu" [(set (pc) - (match_operand:SI 0 "register_operand" "r"))] + (if_then_else (geu (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] "" - "* - return (arm_output_asm_insn (\"mov\\tpc, %0\\t@ indirect jump\", operands)); + " +{ + operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} ") - -;; Misc insns -(define_insn "nop" - [(const_int 0)] +(define_expand "bltu" + [(set (pc) + (if_then_else (ltu (match_dup 1) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] "" - "* - return (arm_output_asm_insn (\"mov\\tr0, r0\\t@ nop\", operands)); + " +{ + operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} ") - -;; Patterns to allow combination of arithmetic, cond code and shifts -;(define_insn "" -; [(set (match_operand:SI 0 "register_operand" "=r") -; (match_operator:SI 1 "shiftable_operator" -; [(match_operand:SI 2 "register_operand" "r") -; (match_operator:SI 3 "shift_operator" -; [(match_operand:SI 4 "register_operand" "r") -; (match_operand:SI 5 "nonmemory_operand" "rn")])]))] -; "" -; "* -; return (output_arithmetic_with_shift (operands, FALSE, FALSE)); -; " -;) - -;(define_insn "" -; [(set (match_operand:SI 0 "register_operand" "=r") -; (match_operator:SI 1 "shiftable_operator" -; [(match_operator:SI 3 "shift_operator" -; [(match_operand:SI 4 "register_operand" "r") -; (match_operand:SI 5 "nonmemory_operand" "rI")]) -; (match_operand:SI 2 "register_operand" "r")]))] -; "" -; "* -; return (output_arithmetic_with_shift (operands, TRUE, FALSE)); -;") - -;; Patterns to allow combination of arithmetic and left shift - -;(define_insn "" -; [(set (match_operand:SI 0 "register_operand" "=r") -; (match_operator:SI 1 "shiftable_operator" -; [(match_operand:SI 2 "register_operand" "r") -; (mult:SI -; (match_operand:SI 3 "register_operand" "r") -; (match_operand:SI 4 "power_of_two_operand" "n"))]))] -; "" -; "* -; return (output_arithmetic_with_immediate_multiply (operands, FALSE)); -;") - -(define_insn "" - [(set (match_operand:SI 0 "register_operand" "=r") - (match_operator:SI 1 "shiftable_operator" - [(mult:SI - (match_operand:SI 3 "register_operand" "r") - (match_operand:SI 4 "power_of_two_operand" "n")) - (match_operand:SI 2 "register_operand" "r")]))] +;; patterns to match conditional branch insns + +(define_insn "" + [(set (pc) + (if_then_else (match_operator 1 "comparison_operator" + [(reg 24) (const_int 0)]) + (label_ref (match_operand 0 "" "")) + (pc)))] "" "* - return (output_arithmetic_with_immediate_multiply (operands, TRUE)); -") +{ + extern int arm_ccfsm_state; -;; This variant of the above insn can occur if the first operand is the -;; frame pointer and we eliminate that. This is a kludge, but there doesn't -;; seem to be a way around it. + if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return (arm_output_asm_insn (\"b%d1\\t%l0\", operands)); +}" +[(set_attr "conds" "use")]) (define_insn "" - [(set (match_operand:SI 0 "register_operand" "=&r") - (plus:SI (plus:SI (mult:SI (match_operand:SI 3 "register_operand" "r") - (match_operand:SI 4 "power_of_two_operand" "n")) - (match_operand:SI 2 "register_operand" "r")) - (match_operand:SI 1 "const_int_operand" "n")))] - "reload_in_progress" + [(set (pc) + (if_then_else (match_operator 1 "comparison_operator" + [(reg 24) (const_int 0)]) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" "* { - int shift = int_log2 (INTVAL (operands[4])); - operands[4] = GEN_INT (shift); - arm_output_asm_insn (\"add\\t%0, %2, %3, asl %4\", operands); - operands[2] = operands[1]; - operands[1] = operands[0]; - return output_add_immediate (operands); -}") + extern int arm_ccfsm_state; + + if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return (arm_output_asm_insn (\"b%D1\\t%l0\", operands)); +}" +[(set_attr "conds" "use")]) -;; Peephole optimizations. - -;; When testing a bitset smaller than 9 bits for (un)equality, a -;; shift/and/cmp/b{eq,ne} sequence can be replaced by one tst and the same -;; branch sequence. - -;;(define_peephole -;; [(set (match_operand:SI 0 "register_operand" "=r") -;; (lshiftrt:SI (match_dup 0) -;; (match_operand 1 "immediate_operand" ""))) -;; (set (match_dup 0) -;; (and:SI (match_dup 0) -;; (match_operand 2 "immediate_operand" ""))) -;; (set (cc0) (match_dup 0)) -;; (set (pc) -;; (if_then_else (ne (cc0) (const_int 0)) -;; (label_ref (match_operand 3 "" "")) -;; (pc)))] -;; "dead_or_set_p (PREV_INSN (insn), operands[0]) -;; && GET_CODE (operands[2]) == CONST_INT && GET_CODE (operands[1]) == CONST_INT -;; && const_ok_for_arm (INTVAL (operands[2]) << INTVAL (operands[1]))" -;; "* -;; operands[2] = gen_rtx (CONST_INT, VOIDmode, -;; INTVAL (operands[2]) << INTVAL (operands[1])); -;; arm_output_asm_insn (\"tst\\t%0, %2\\t\\t@ ph test bitfield\", operands); -;; return (arm_output_asm_insn (\"bne\\t%l3\", operands)); -;;") - -;;(define_peephole -;; [(set (match_operand:SI 0 "register_operand" "=r") -;; (lshiftrt:SI (match_dup 0) -;; (match_operand 1 "immediate_operand" ""))) -;; (set (match_dup 0) -;; (and:SI (match_dup 0) -;; (match_operand 2 "immediate_operand" ""))) -;; (set (cc0) (match_dup 0)) -;; (set (pc) -;; (if_then_else (ne (cc0) (const_int 0)) -;; (pc) -;; (label_ref (match_operand 3 "" ""))))] -;; "dead_or_set_p (prev_real_insn (insn), operands[0]) -;; && GET_CODE (operands[2]) == CONST_INT && GET_CODE (operands[1]) == CONST_INT -;; && const_ok_for_arm (INTVAL (operands[2]) << INTVAL (operands[1]))" -;; "* -;; operands[2] = gen_rtx (CONST_INT, VOIDmode, -;; INTVAL (operands[2]) << INTVAL (operands[1])); -;; arm_output_asm_insn (\"tst\\t%0, %2\\t\\t@ ph test bitfield\", operands); -;; return (arm_output_asm_insn (\"beq\\t%l3\", operands)); -;;") - -;; This allows negative constants to be compared since GCC appears not to try -;; converting them with a NEG. - -;;(define_peephole -;; [(set (match_operand:SI 2 "register_operand" "=r") -;; (match_operand:SI 1 "immediate_operand" "n")) -;; (set (cc0) -;; (compare (match_operand:SI 0 "register_operand" "r") -;; (match_dup 1)))] -;; "const_ok_for_arm (-INTVAL (operands[1])) -;; && dead_or_set_p (prev_real_insn (insn), operands[0])" -;; "* -;; operands[1] = gen_rtx (CONST_INT, VOIDmode, -INTVAL (operands[1])); -;; return (arm_output_asm_insn (\"cmn\\t%0, %1\\t\\t@ ph negate comparison\", operands)); -;;") + +; scc insns + +(define_expand "seq" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (eq:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sne" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (ne:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sgt" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (gt:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sle" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (le:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sge" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (ge:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "slt" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (lt:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sgtu" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (gtu:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sleu" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (leu:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sgeu" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (geu:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_expand "sltu" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (ltu:SI (match_dup 1) (const_int 0)))] + "" + " +{ + operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1, + arm_compare_fp); +} +") + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (match_operator:SI 1 "comparison_operator" [(reg 24) (const_int 0)]))] + "" + "* + arm_output_asm_insn (\"mov%d1\\t%0, #1\", operands); + return arm_output_asm_insn (\"mov%D1\\t%0, #0\", operands); +" +[(set_attr "conds" "use") + (set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (neg:SI (match_operator:SI 1 "comparison_operator" + [(reg 24) (const_int 0)])))] + "" + "* + arm_output_asm_insn (\"mvn%d1\\t%0, #0\", operands); + return arm_output_asm_insn (\"mov%D1\\t%0, #0\", operands); +" +[(set_attr "conds" "use") + (set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (not:SI (match_operator:SI 1 "comparison_operator" + [(reg 24) (const_int 0)])))] + "" + "* + arm_output_asm_insn (\"mvn%d1\\t%0, #1\", operands); + return arm_output_asm_insn (\"mov%D1\\t%0, #0\", operands); +" +[(set_attr "conds" "use") + (set_attr "length" "2")]) + + +;; Jump and linkage insns + +(define_insn "jump" + [(set (pc) + (label_ref (match_operand 0 "" "")))] + "" + "* +{ + extern int arm_ccfsm_state; + + if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return (arm_output_asm_insn (\"b\\t%l0\", operands)); +}") + +(define_insn "call" + [(call (match_operand 0 "memory_operand" "m") + (match_operand 1 "general_operand" "g")) + (clobber (reg:SI 14))] + "" + "* + return (output_call (operands)); +" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) +;; length is worst case, normally it is only two + (set_attr "length" "3") + (set_attr "type" "call")]) + +(define_insn "" + [(call (mem:SI (match_operand 0 "memory_operand" "m")) + (match_operand 1 "general_operand" "g")) + (clobber (reg:SI 14))] + "" + "* + return (output_call_mem (operands)); +" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "length" "3") + (set_attr "type" "call")]) + +(define_insn "call_value" + [(set (match_operand 0 "" "=rf") + (call (match_operand 1 "memory_operand" "m") + (match_operand 2 "general_operand" "g"))) + (clobber (reg:SI 14))] + "" + "* + return (output_call (&operands[1])); +" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "length" "3") + (set_attr "type" "call")]) + +(define_insn "" + [(set (match_operand 0 "" "=rf") + (call (mem:SI (match_operand 1 "memory_operand" "m")) + (match_operand 2 "general_operand" "g"))) + (clobber (reg:SI 14))] + "! CONSTANT_ADDRESS_P (XEXP (operands[1], 0))" + "* + return (output_call_mem (&operands[1])); +" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "length" "3") + (set_attr "type" "call")]) + +;; Allow calls to SYMBOL_REFs specially as they are not valid general addresses +;; The 'a' causes the operand to be treated as an address, i.e. no '#' output. + +(define_insn "" + [(call (mem:SI (match_operand:SI 0 "" "i")) + (match_operand:SI 1 "general_operand" "g")) + (clobber (reg:SI 14))] + "GET_CODE (operands[0]) == SYMBOL_REF" + "* + return (arm_output_asm_insn (\"bl\\t%a0\", operands)); +" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "type" "call")]) + +(define_insn "" + [(set (match_operand 0 "s_register_operand" "=rf") + (call (mem:SI (match_operand:SI 1 "" "i")) + (match_operand:SI 2 "general_operand" "g"))) + (clobber (reg:SI 14))] + "GET_CODE(operands[1]) == SYMBOL_REF" + "* + return (arm_output_asm_insn (\"bl\\t%a1\", operands)); +" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "type" "call")]) + +;; Often the return insn will be the same as loading from memory, so set attr +(define_insn "return" + [(return)] + "USE_RETURN_INSN" + "* +{ + extern int arm_ccfsm_state; + + if (arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return output_return_instruction (NULL, TRUE); +}" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (pc) + (if_then_else (match_operator 0 "comparison_operator" + [(reg 24) (const_int 0)]) + (return) + (pc)))] + "USE_RETURN_INSN" + "* +{ + extern int arm_ccfsm_state; + + if (arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return output_return_instruction (operands[0], TRUE); +}" +[(set_attr "conds" "use") + (set_attr "type" "load")]) + +(define_insn "" + [(set (pc) + (if_then_else (match_operator 0 "comparison_operator" + [(reg 24) (const_int 0)]) + (pc) + (return)))] + "USE_RETURN_INSN" + "* +{ + extern int arm_ccfsm_state; + + if (arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return output_return_instruction + (gen_rtx (reverse_condition (GET_CODE (operands[0])), + GET_MODE (operands[0]), XEXP (operands[0], 0), + XEXP (operands[0], 1)), + TRUE); +}" +[(set_attr "conds" "use") + (set_attr "type" "load")]) + +;; Call subroutine returning any type. + +(define_expand "untyped_call" + [(parallel [(call (match_operand 0 "" "") + (const_int 0)) + (match_operand 1 "" "") + (match_operand 2 "" "")])] + "" + " +{ + int i; + + emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx)); + + for (i = 0; i < XVECLEN (operands[2], 0); i++) + { + rtx set = XVECEXP (operands[2], 0, i); + emit_move_insn (SET_DEST (set), SET_SRC (set)); + } + + /* The optimizer does not know that the call sets the function value + registers we stored in the result block. We avoid problems by + claiming that all hard registers are used and clobbered at this + point. */ + emit_insn (gen_blockage ()); + + DONE; +}") + +;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and +;; all of memory. This blocks insns from being moved across this point. + +(define_insn "blockage" + [(unspec_volatile [(const_int 0)] 0)] + "" + "" +[(set_attr "length" "0") + (set_attr "type" "block")]) + +(define_insn "tablejump" + [(set (pc) + (match_operand:SI 0 "s_register_operand" "r")) + (use (label_ref (match_operand 1 "" "")))] + "" + "* + return arm_output_asm_insn (\"mov\\tpc, %0\\t@ table jump, label %l1\", + operands); +") + +(define_insn "" + [(set (pc) + (match_operand:SI 0 "memory_operand" "m")) + (use (label_ref (match_operand 1 "" "")))] + "" + "* + return arm_output_asm_insn (\"ldr\\tpc, %0\\t@ table jump, label %l1\", + operands); +" +[(set_attr "type" "load")]) + +(define_insn "indirect_jump" + [(set (pc) + (match_operand:SI 0 "s_register_operand" "r"))] + "" + "* + return arm_output_asm_insn (\"mov\\tpc, %0\\t@ indirect jump\", operands); +") + +(define_insn "" + [(set (pc) + (match_operand:SI 0 "memory_operand" "m"))] + "" + "* + return arm_output_asm_insn (\"ldr\\tpc, %0\\t@ indirect jump\", operands); +" +[(set_attr "type" "load")]) + +;; Misc insns + +(define_insn "nop" + [(const_int 0)] + "" + "* + return arm_output_asm_insn (\"mov\\tr0, r0\\t@ nop\", operands); +") + +;; Patterns to allow combination of arithmetic, cond code and shifts + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (match_operator:SI 1 "shiftable_operator" + [(match_operator:SI 3 "shift_operator" + [(match_operand:SI 4 "s_register_operand" "r") + (match_operand:SI 5 "nonmemory_operand" "rI")]) + (match_operand:SI 2 "s_register_operand" "r")]))] + "" + "* + return (output_arithmetic_with_shift (operands, TRUE, FALSE)); +") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator" + [(match_operator:SI 3 "shift_operator" + [(match_operand:SI 4 "s_register_operand" "r") + (match_operand:SI 5 "nonmemory_operand" "rI")]) + (match_operand:SI 2 "s_register_operand" "r")]) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (match_op_dup 1 [(match_op_dup 3 [(match_dup 4) (match_dup 5)]) + (match_dup 2)]))] + "" + "* + return (output_arithmetic_with_shift (operands, TRUE, TRUE)); +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator" + [(match_operator:SI 3 "shift_operator" + [(match_operand:SI 4 "s_register_operand" "r") + (match_operand:SI 5 "nonmemory_operand" "rI")]) + (match_operand:SI 2 "s_register_operand" "r")]) + (const_int 0))) + (clobber (match_scratch:SI 0 "=r"))] + "" + "* + return (output_arithmetic_with_shift (operands, TRUE, TRUE)); +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "nonmemory_operand" "rn")])))] + "" + "* +{ + rtx ops[6]; + + ops[0] = operands[0]; + ops[1] = gen_rtx (MINUS, SImode, operands[1], operands[2]); + ops[2] = operands[1]; + ops[3] = operands[2]; + ops[4] = operands[3]; + ops[5] = operands[4]; + return output_arithmetic_with_shift (ops, FALSE, FALSE); +} +") + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (minus:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "nonmemory_operand" "rn")])) + (const_int 0))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3) + (match_dup 4)])))] + "" + "* +{ + rtx ops[6]; + + ops[0] = operands[0]; + ops[1] = gen_rtx (MINUS, SImode, operands[1], operands[2]); + ops[2] = operands[1]; + ops[3] = operands[2]; + ops[4] = operands[3]; + ops[5] = operands[4]; + return output_arithmetic_with_shift (ops, FALSE, TRUE); +} +" +[(set_attr "conds" "set")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (minus:SI (match_operand:SI 1 "s_register_operand" "r") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "nonmemory_operand" "rn")])) + (const_int 0))) + (clobber (match_scratch:SI 0 "=r"))] + "" + "* +{ + rtx ops[6]; + + ops[0] = operands[0]; + ops[1] = gen_rtx (MINUS, SImode, operands[1], operands[2]); + ops[2] = operands[1]; + ops[3] = operands[2]; + ops[4] = operands[3]; + ops[5] = operands[4]; + return output_arithmetic_with_shift (ops, FALSE, TRUE); +} +" +[(set_attr "conds" "set")]) + +;; These variants of the above insns can occur if the first operand is the +;; frame pointer and we eliminate that. This is a kludge, but there doesn't +;; seem to be a way around it. Most of the predicates have to be null +;; because the format can be generated part way through reload, so +;; if we don't match it as soon as it becomes available, reload doesn't know +;; how to reload pseudos that haven't got hard registers; the constraints will +;; sort everything out. + +(define_insn "" + [(set (match_operand:SI 0 "" "=&r") + (plus:SI (plus:SI (match_operator:SI 5 "shift_operator" + [(match_operand:SI 3 "" "r") + (match_operand:SI 4 "" "rn")]) + (match_operand:SI 2 "" "r")) + (match_operand:SI 1 "const_int_operand" "n")))] + "reload_in_progress" + "* +{ + char instr[100]; + sprintf (instr, \"add\\t%%0, %%2, %%3, %s %%4\", + shift_instr (GET_CODE (operands[5]), &operands[4])); + arm_output_asm_insn (instr, operands); + operands[2] = operands[1]; + operands[1] = operands[0]; + return output_add_immediate (operands); +}" +; we have no idea how long the add_immediate is, it could be up to 4. +[(set_attr "length" "5")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (plus:SI + (plus:SI + (match_operator:SI 5 "shift_operator" + [(match_operand:SI 3 "" "r") + (match_operand:SI 4 "" "rn")]) + (match_operand:SI 1 "" "r")) + (match_operand:SI 2 "const_int_operand" "n")) + (const_int 0))) + (set (match_operand:SI 0 "" "=&r") + (plus:SI (plus:SI (match_op_dup 5 [(match_dup 3) (match_dup 4)]) + (match_dup 1)) + (match_dup 2)))] + "reload_in_progress" + "* +{ + char instr[100]; + sprintf (instr, \"adds\\t%%0, %%0, %%3, %s %%4\", + shift_instr (GET_CODE (operands[5]), &operands[4])); + output_add_immediate (operands); + return arm_output_asm_insn (instr, operands); +}" +[(set_attr "conds" "set") + (set_attr "length" "5")]) + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV (plus:SI + (plus:SI + (match_operator:SI 5 "shift_operator" + [(match_operand:SI 3 "" "r") + (match_operand:SI 4 "" "rn")]) + (match_operand:SI 1 "" "r")) + (match_operand:SI 2 "const_int_operand" "n")) + (const_int 0))) + (clobber (match_scratch:SI 0 "=&r"))] + "reload_in_progress" + "* +{ + char instr[100]; + sprintf (instr, \"adds\\t%%0, %%0, %%3, %s %%4\", + shift_instr (GET_CODE (operands[5]), &operands[4])); + output_add_immediate (operands); + return arm_output_asm_insn (instr, operands); +}" +[(set_attr "conds" "set") + (set_attr "length" "5")]) + + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (and:SI (match_operator 1 "comparison_operator" + [(reg 24) (const_int 0)]) + (match_operand:SI 2 "s_register_operand" "r")))] + "" + "* + arm_output_asm_insn (\"mov%D1\\t%0, #0\", operands); + return arm_output_asm_insn (\"and%d1\\t%0, %2, #1\", operands); +" +[(set_attr "conds" "use") + (set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (ior:SI (match_operator 2 "comparison_operator" + [(reg 24) (const_int 0)]) + (match_operand:SI 1 "s_register_operand" "0,?r")))] + "" + "* + if (which_alternative != 0) + arm_output_asm_insn (\"mov%D2\\t%0, %1\", operands); + return arm_output_asm_insn (\"orr%d2\\t%0, %1, #1\", operands); +" +[(set_attr "conds" "use") + (set_attr "length" "1,2")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (match_operator 1 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_add_operand" "rL")])) + (clobber (reg 24))] + "" + "* + if (GET_CODE (operands[1]) == LT) + { + if (operands[3] == const0_rtx) + return arm_output_asm_insn (\"mov\\t%0, %2, lsr #31\", operands); + if (GET_CODE (operands[3]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[3]))) + arm_output_asm_insn (\"add\\t%0, %2, #%n3\", operands); + else + arm_output_asm_insn (\"sub\\t%0, %2, %3\", operands); + return arm_output_asm_insn (\"mov\\t%0, %0, lsr #31\", operands); + } + if (GET_CODE (operands[1]) == GE && operands[3] == const0_rtx) + { + arm_output_asm_insn (\"mvn\\t%0, %2\", operands); + return arm_output_asm_insn (\"mov\\t%0, %0, lsr #31\", operands); + } + if (GET_CODE (operands[1]) == NE) + { + if (GET_CODE (operands[3]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[3]))) + arm_output_asm_insn (\"adds\\t%0, %2, #%n3\", operands); + else + arm_output_asm_insn (\"subs\\t%0, %2, %3\", operands); + return arm_output_asm_insn (\"movne\\t%0, #1\", operands); + } + if (GET_CODE (operands[3]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[3]))) + arm_output_asm_insn (\"cmn\\t%2, #%n3\", operands); + else + arm_output_asm_insn (\"cmp\\t%2, %3\", operands); + arm_output_asm_insn (\"mov%D1\\t%0, #0\", operands); + return arm_output_asm_insn (\"mov%d1\\t%0, #1\", operands); +" +[(set_attr "conds" "clob") + (set_attr "length" "3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=&r") + (ior:SI (match_operator 1 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_rhs_operand" "rI")]) + (match_operator 4 "comparison_operator" + [(match_operand:SI 5 "s_register_operand" "r") + (match_operand:SI 6 "arm_rhs_operand" "rI")]))) + (clobber (reg 24))] + "" + "* +{ + int dominant = comparison_dominates_p (GET_CODE (operands[4]), + GET_CODE (operands[1])); + + arm_output_asm_insn (dominant ? \"cmp\\t%5, %6\" : \"cmp\\t%2, %3\", + operands); + arm_output_asm_insn (\"mov\\t%0, #0\", operands); + if (GET_CODE (operands[1]) == GET_CODE (operands[4]) + || comparison_dominates_p (GET_CODE (operands[1]), + GET_CODE (operands[4])) + || dominant) + { + arm_output_asm_insn (dominant ? \"cmp%D4\\t%2, %3\" : \"cmp%D1\\t%5,%6\", + operands); + } + else + { + arm_output_asm_insn (\"mov%d1\\t%0, #1\", operands); + arm_output_asm_insn (\"cmp\\t%5, %6\", operands); + } + return arm_output_asm_insn (dominant ? \"mov%d1\\t%0, #1\" + : \"mov%d4\\t%0, #1\", operands); +} +" +[(set_attr "conds" "clob") +; worst case length + (set_attr "length" "5")]) + +(define_split + [(set (pc) + (if_then_else (match_operator 5 "equality_operator" + [(ior:SI (match_operator 6 "comparison_operator" + [(match_operand:SI 0 "s_register_operand" "r") + (match_operand:SI 1 "arm_add_operand" "rL")]) + (match_operator 7 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_add_operand" "rL")])) + (const_int 0)]) + (label_ref (match_operand 4 "" "")) + (pc))) + (clobber (reg 24))] + "(GET_CODE (operands[6]) == GET_CODE (operands[7]) + || comparison_dominates_p (GET_CODE (operands[6]), GET_CODE (operands[7])) + || comparison_dominates_p (GET_CODE (operands[7]), GET_CODE (operands[6])))" + [(set (reg:CC 24) + (compare:CC (ior:CC (match_op_dup 6 + [(match_dup 0) (match_dup 1)]) + (match_op_dup 7 + [(match_dup 2) (match_dup 3)])) + (const_int 0))) + (set (pc) + (if_then_else (match_op_dup 5 [(reg:CC 24) (const_int 0)]) + (label_ref (match_dup 4)) + (pc)))] + " +{ + enum rtx_code code = comparison_dominates_p (GET_CODE (operands[6]), + GET_CODE (operands[7])) + ? GET_CODE (operands[7]) : GET_CODE (operands[6]); + + if (GET_CODE (operands[5]) == NE) + operands[5] = gen_rtx (code, CCmode, + XEXP (operands[5], 0), XEXP (operands[5], 1)); + else + operands[5] = gen_rtx (reverse_condition (code), CCmode, + XEXP (operands[5], 0), XEXP (operands[5], 1)); +} +") + +;; Don't match these patterns if we can use a conditional compare, since they +;; tell the final prescan branch elimator code that full branch inlining +;; can't be done. + +(define_insn "" + [(set (pc) + (if_then_else (ne + (ior:SI (match_operator 5 "comparison_operator" + [(match_operand:SI 0 "s_register_operand" "r") + (match_operand:SI 1 "arm_add_operand" "rL")]) + (match_operator 6 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_rhs_operand" "rL")])) + (const_int 0)) + (label_ref (match_operand 4 "" "")) + (pc))) + (clobber (reg 24))] + "!(GET_CODE (operands[5]) == GET_CODE (operands[6]) + || comparison_dominates_p (GET_CODE (operands[5]), GET_CODE (operands[6])) + || comparison_dominates_p (GET_CODE (operands[6]), GET_CODE (operands[5])))" + "* +{ + extern int arm_ccfsm_state; + + if (GET_CODE (operands[1]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[1]))) + arm_output_asm_insn (\"cmn\\t%0, #%n1\", operands); + else + arm_output_asm_insn (\"cmp\\t%0, %1\", operands); + arm_output_asm_insn (\"b%d5\\t%l4\", operands); + if (GET_CODE (operands[3]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[3]))) + arm_output_asm_insn (\"cmn\\t%2, #%n3\", operands); + else + arm_output_asm_insn (\"cmp\\t%2, %3\", operands); + if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return arm_output_asm_insn (\"b%d6\\t%l4\", operands); +}" +[(set_attr "conds" "jump_clob") + (set_attr "length" "4")]) + +(define_insn "" + [(set (reg:CC 24) + (compare:CC (ior:CC (match_operator 4 "comparison_operator" + [(match_operand:SI 0 "s_register_operand" "r") + (match_operand:SI 1 "arm_add_operand" "rL")]) + (match_operator 5 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_add_operand" "rL")])) + (const_int 0)))] + "(GET_CODE (operands[4]) == GET_CODE (operands[5]) + || comparison_dominates_p (GET_CODE (operands[4]), GET_CODE (operands[5])) + || comparison_dominates_p (GET_CODE (operands[5]), GET_CODE (operands[4])))" + "* + if (comparison_dominates_p (GET_CODE (operands[5]), GET_CODE (operands[4]))) + { + if (GET_CODE (operands[3]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[3]))) + arm_output_asm_insn (\"cmn\\t%2, #%n3\", operands); + else + arm_output_asm_insn (\"cmp\\t%2, %3\", operands); + if (GET_CODE (operands[1]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[1]))) + return arm_output_asm_insn (\"cmn%D5\\t%0, #%n1\", operands); + return arm_output_asm_insn (\"cmp%D5\\t%0, %1\", operands); + } + if (GET_CODE (operands[1]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[1]))) + arm_output_asm_insn (\"cmn\\t%0, #%n1\", operands); + else + arm_output_asm_insn (\"cmp\\t%0, %1\", operands); + if (GET_CODE (operands[3]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[3]))) + return arm_output_asm_insn (\"cmn%D4\\t%2, #%n3\", operands); + return arm_output_asm_insn (\"cmp%D4\\t%2, %3\", operands); +" +[(set_attr "conds" "set") + (set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r") + (if_then_else (match_operator 3 "equality_operator" + [(match_operator 4 "comparison_operator" + [(reg 24) (const_int 0)]) + (const_int 0)]) + (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))] + "" + "* + if (GET_CODE (operands[3]) == NE) + { + if (which_alternative != 0) + arm_output_asm_insn (\"mov%d4\\t%0, %1\", operands); + if (which_alternative != 1) + arm_output_asm_insn (\"mov%D4\\t%0, %2\", operands); + return \"\"; + } + if (which_alternative != 0) + arm_output_asm_insn (\"mov%D4\\t%0, %1\", operands); + if (which_alternative != 1) + arm_output_asm_insn (\"mov%d4\\t%0, %2\", operands); + return \"\"; +" +[(set_attr "conds" "use") + (set_attr "length" "1,1,2")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (match_operator:SI 5 "shiftable_operator" + [(match_operator:SI 4 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_rhs_operand" "rI,rI")]) + (match_operand:SI 1 "s_register_operand" "0,?r")])) + (clobber (reg 24))] + "" + "* +{ + char *instr = arithmetic_instr (operands[5], TRUE); + char pattern[100]; + + if (GET_CODE (operands[4]) == LT && operands[3] == const0_rtx) + { + sprintf (pattern, \"%s\\t%%0, %%1, %%2, lsr #31\", instr); + return arm_output_asm_insn (pattern, operands); + } + arm_output_asm_insn (\"cmp\\t%2, %3\", operands); + if (GET_CODE (operands[5]) == AND) + arm_output_asm_insn (\"mov%D4\\t%0, #0\", operands); + else if (which_alternative != 0) + arm_output_asm_insn (\"mov%D4\\t%0, %1\", operands); + sprintf (pattern, \"%s%%d4\\t%%0, %%1, #1\", instr); + return arm_output_asm_insn (pattern, operands); +} +" +[(set_attr "conds" "clob") + (set_attr "length" "3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r") + (match_operator:SI 4 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_rhs_operand" "rI,rI")]))) + (clobber (reg 24))] + "" + "* + arm_output_asm_insn (\"cmp\\t%2, %3\", operands); + if (which_alternative != 0) + arm_output_asm_insn (\"mov%D4\\t%0, %1\", operands); + return arm_output_asm_insn (\"sub%d4\\t%0, %1, #1\", operands); +" +[(set_attr "conds" "clob") + (set_attr "length" "2,3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=&r") + (and:SI (match_operator 1 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_rhs_operand" "rI")]) + (match_operator 4 "comparison_operator" + [(match_operand:SI 5 "s_register_operand" "r") + (match_operand:SI 6 "arm_rhs_operand" "rI")]))) + (clobber (reg 24))] + "" + "* +{ + int dominant = + comparison_dominates_p (reverse_condition (GET_CODE (operands[1])), + reverse_condition (GET_CODE (operands[4]))) + ? 1 + : comparison_dominates_p (reverse_condition (GET_CODE (operands[4])), + reverse_condition (GET_CODE (operands[1]))) + ? 2 : 0; + arm_output_asm_insn (dominant == 2 ? \"cmp\\t%5, %6\" : \"cmp\\t%2, %3\", + operands); + arm_output_asm_insn (\"mov\\t%0, #1\", operands); + if (GET_CODE (operands[1]) == GET_CODE (operands[4]) || dominant) + { + arm_output_asm_insn (dominant == 2 ? \"cmp%d4\\t%2, %3\" + : \"cmp%d1\\t%5, %6\", operands); + } + else + { + arm_output_asm_insn (\"mov%D1\\t%0, #0\", operands); + arm_output_asm_insn (\"cmp\\t%5, %6\", operands); + } + return arm_output_asm_insn (dominant == 2 ? \"mov%D1\\t%0, #0\" + : \"mov%D4\\t%0, #0\", operands); +} +" +[(set_attr "conds" "clob") + (set_attr "length" "5")]) + +(define_split + [(set (pc) + (if_then_else (match_operator 1 "equality_operator" + [(and:SI (match_operator 2 "comparison_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "arm_add_operand" "rL")]) + (match_operator 0 "comparison_operator" + [(match_operand:SI 5 "s_register_operand" "r") + (match_operand:SI 6 "arm_add_operand" "rL")])) + (const_int 0)]) + (label_ref (match_operand 7 "" "")) + (pc))) + (clobber (reg 24))] + "(GET_CODE (operands[2]) == GET_CODE (operands[0]) + || comparison_dominates_p (reverse_condition (GET_CODE (operands[2])), + reverse_condition (GET_CODE (operands[0]))) + || comparison_dominates_p (reverse_condition (GET_CODE (operands[0])), + reverse_condition (GET_CODE (operands[2]))))" + [(set (reg:CC 24) + (compare:CC (ior:CC (match_op_dup 2 + [(match_dup 3) (match_dup 4)]) + (match_op_dup 0 + [(match_dup 5) (match_dup 6)])) + (const_int 0))) + (set (pc) + (if_then_else (match_op_dup 1 [(reg:CC 24) (const_int 0)]) + (label_ref (match_dup 7)) + (pc)))] + " +{ + /* Use DeMorgans law to convert this into an IOR of the inverse conditions + This is safe since we only do it for integer comparisons. */ + enum rtx_code code = + comparison_dominates_p (reverse_condition (GET_CODE (operands[2])), + reverse_condition (GET_CODE (operands[0]))) + ? GET_CODE (operands[0]) : GET_CODE (operands[2]); + + operands[2] = gen_rtx (reverse_condition (GET_CODE (operands[2])), + GET_MODE (operands[2]), operands[3], operands[4]); + operands[0] = gen_rtx (reverse_condition (GET_CODE (operands[0])), + GET_MODE (operands[0]), operands[5], operands[6]); + if (GET_CODE (operands[1]) == NE) + operands[1] = gen_rtx (code, CCmode, + XEXP (operands[1], 0), XEXP (operands[1], 1)); + else + operands[1] = gen_rtx (reverse_condition (code), CCmode, + XEXP (operands[1], 0), XEXP (operands[1], 1)); +} +") + +;; Don't match these patterns if we can use a conditional compare, since they +;; tell the final prescan branch elimator code that full branch inlining +;; can't be done. + +(define_insn "" + [(set (pc) + (if_then_else (eq + (and:SI (match_operator 1 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_add_operand" "rL")]) + (match_operator 4 "comparison_operator" + [(match_operand:SI 5 "s_register_operand" "r") + (match_operand:SI 6 "arm_rhs_operand" "rL")])) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc))) + (clobber (reg 24))] + "!(GET_CODE (operands[1]) == GET_CODE (operands[4]) + || comparison_dominates_p (reverse_condition (GET_CODE (operands[1])), + reverse_condition (GET_CODE (operands[4]))) + || comparison_dominates_p (reverse_condition (GET_CODE (operands[4])), + reverse_condition (GET_CODE (operands[1]))))" + "* +{ + extern int arm_ccfsm_state; + + if (GET_CODE (operands[3]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[3]))) + arm_output_asm_insn (\"cmn\\t%2, #%n3\", operands); + else + arm_output_asm_insn (\"cmp\\t%2, %3\", operands); + arm_output_asm_insn (\"b%D1\\t%l0\", operands); + if (GET_CODE (operands[6]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[6]))) + arm_output_asm_insn (\"cmn\\t%5, #%n6\", operands); + else + arm_output_asm_insn (\"cmp\\t%5, %6\", operands); + if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2) + { + arm_ccfsm_state += 2; + return \"\"; + } + return arm_output_asm_insn (\"b%D4\\t%l0\", operands); +}" +[(set_attr "conds" "jump_clob") + (set_attr "length" "4")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (neg:SI (match_operator 3 "comparison_operator" + [(match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rI")]))) + (clobber (reg 24))] + "" + "* + if (GET_CODE (operands[3]) == LT) + { + if (operands[3] == const0_rtx) + return arm_output_asm_insn (\"mov\\t%0, %1, asr #31\", operands); + arm_output_asm_insn (\"sub\\t%0, %1, %2\", operands); + return arm_output_asm_insn (\"mov\\t%0, %0, asr #31\", operands); + } + if (GET_CODE (operands[3]) == NE) + { + arm_output_asm_insn (\"subs\\t%0, %1, %2\", operands); + return arm_output_asm_insn (\"mvnne\\t%0, #0\", operands); + } + if (GET_CODE (operands[3]) == GT) + { + arm_output_asm_insn (\"subs\\t%0, %1, %2\", operands); + return arm_output_asm_insn (\"mvnne\\t%0, %0, asr #31\", operands); + } + arm_output_asm_insn (\"cmp\\t%1, %2\", operands); + arm_output_asm_insn (\"mov%D3\\t%0, #0\", operands); + return arm_output_asm_insn (\"mvn%d3\\t%0, #0\", operands); +" +[(set_attr "conds" "clob") + (set_attr "length" "3")]) + +(define_insn "movcond" + [(set (match_operand:SI 0 "s_register_operand" "=r,r,r") + (if_then_else:SI (match_operator 5 "comparison_operator" + [(match_operand:SI 3 "s_register_operand" "r,r,r") + (match_operand:SI 4 "arm_add_operand" "rL,rL,rL")]) + (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI") + (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))) + (clobber (reg 24))] + "" + "* + if (GET_CODE (operands[5]) == LT + && (operands[4] == const0_rtx)) + { + if (which_alternative != 1 && GET_CODE (operands[4]) == REG) + { + arm_output_asm_insn (\"ands\\t%0, %1, %3, asr #32\", operands); + if (operands[2] == const0_rtx) + return \"\"; + return arm_output_asm_insn (\"movcc\\t%0, %2\", operands); + } + else if (which_alternative != 0 && GET_CODE (operands[2]) == REG) + { + arm_output_asm_insn (\"bics\\t%0, %2, %3, asr #32\", operands); + if (operands[1] == const0_rtx) + return \"\"; + return arm_output_asm_insn (\"movcs\\t%0, %1\", operands); + } + /* The only case that falls through to here is when both ops 1 & 2 + are constants */ + } + if (GET_CODE (operands[5]) == GE + && (operands[4] == const0_rtx)) + { + if (which_alternative != 1 && GET_CODE (operands[1]) == REG) + { + arm_output_asm_insn (\"bics\\t%0, %1, %3, asr #32\", operands); + if (operands[2] == const0_rtx) + return \"\"; + return arm_output_asm_insn (\"movcs\\t%0, %2\", operands); + } + else if (which_alternative != 0 && GET_CODE (operands[2]) == REG) + { + arm_output_asm_insn (\"ands\\t%0, %2, %3, asr #32\", operands); + if (operands[1] == const0_rtx) + return \"\"; + return arm_output_asm_insn (\"movcc\\t%0, %1\", operands); + } + /* The only case that falls through to here is when both ops 1 & 2 + are constants */ + } + if (GET_CODE (operands[4]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[4]))) + arm_output_asm_insn (\"cmn\\t%3, #%n4\", operands); + else + arm_output_asm_insn (\"cmp\\t%3, %4\", operands); + if (which_alternative != 0) + arm_output_asm_insn (\"mov%d5\\t%0, %1\", operands); + if (which_alternative != 1) + arm_output_asm_insn (\"mov%D5\\t%0, %2\", operands); + return \"\"; +" +[(set_attr "conds" "clob") + (set_attr "length" "2,2,3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (if_then_else:SI (match_operator 9 "comparison_operator" + [(match_operand:SI 5 "s_register_operand" "r") + (match_operand:SI 6 "arm_add_operand" "rL")]) + (match_operator:SI 8 "shiftable_operator" + [(match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rI")]) + (match_operator:SI 7 "shiftable_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "arm_rhs_operand" "rI")]))) + (clobber (reg 24))] + "" + "* +{ + char pattern[100]; + + if (GET_CODE (operands[6]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[6]))) + arm_output_asm_insn (\"cmn\\t%5, #%n6\", operands); + else + arm_output_asm_insn (\"cmp\\t%5, %6\", operands); + sprintf (pattern, \"%s%%d9\\t%%0, %%1, %%2\", arithmetic_instr (operands[8], + FALSE)); + arm_output_asm_insn (pattern, operands); + sprintf (pattern, \"%s%%D9\\t%%0, %%3, %%4\", arithmetic_instr (operands[7], + FALSE)); + return arm_output_asm_insn (pattern, operands); +} +" +[(set_attr "conds" "clob") + (set_attr "length" "3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 6 "comparison_operator" + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_add_operand" "rL,rL")]) + (match_operator:SI 7 "shiftable_operator" + [(match_operand:SI 4 "s_register_operand" "r,r") + (match_operand:SI 5 "arm_rhs_operand" "rI,rI")]) + (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm"))) + (clobber (reg 24))] + "" + "* +{ + char pattern[100]; + + /* If we have an operation where (op x 0) is the identity operation and + the condtional operator is LT or GE and we are comparing against zero and + everything is in registers then we can do this in two instructions */ + if (operands[3] == const0_rtx + && GET_CODE (operands[7]) != AND + && GET_CODE (operands[5]) == REG + && GET_CODE (operands[1]) == REG + && REGNO (operands[1]) == REGNO (operands[4]) + && REGNO (operands[4]) != REGNO (operands[0])) + { + if (GET_CODE (operands[6]) == LT) + { + arm_output_asm_insn (\"and\\t%0, %5, %2, asr #31\", operands); + sprintf (pattern, \"%s\\t%%0, %%4, %%0\", + arithmetic_instr (operands[7], FALSE)); + return arm_output_asm_insn (pattern, operands); + } + else if (GET_CODE (operands[6]) == GE) + { + arm_output_asm_insn (\"bic\\t%0, %5, %2, asr #31\", operands); + sprintf (pattern, \"%s\\t%%0, %%4, %%0\", + arithmetic_instr (operands[7], FALSE)); + return arm_output_asm_insn (pattern, operands); + } + } + if (GET_CODE (operands[3]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[3]))) + arm_output_asm_insn (\"cmn\\t%2, #%n3\", operands); + else + arm_output_asm_insn (\"cmp\\t%2, %3\", operands); + sprintf (pattern, \"%s%%d6\\t%%0, %%4, %%5\", arithmetic_instr (operands[7], + FALSE)); + arm_output_asm_insn (pattern, operands); + if (which_alternative != 0) + { + if (GET_CODE (operands[1]) == MEM) + arm_output_asm_insn (\"ldr%D6\\t%0, %1\", operands); + else + arm_output_asm_insn (\"mov%D6\\t%0, %1\", operands); + } + return \"\"; +} +" +[(set_attr "conds" "clob") + (set_attr "length" "2,3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 6 "comparison_operator" + [(match_operand:SI 4 "s_register_operand" "r,r") + (match_operand:SI 5 "arm_add_operand" "rL,rL")]) + (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm") + (match_operator:SI 7 "shiftable_operator" + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_rhs_operand" "rI,rI")]))) + (clobber (reg 24))] + "" + "* +{ + char pattern[100]; + + /* If we have an operation where (op x 0) is the identity operation and + the condtional operator is LT or GE and we are comparing against zero and + everything is in registers then we can do this in two instructions */ + if (operands[5] == const0_rtx + && GET_CODE (operands[7]) != AND + && GET_CODE (operands[3]) == REG + && GET_CODE (operands[1]) == REG + && REGNO (operands[1]) == REGNO (operands[2]) + && REGNO (operands[2]) != REGNO (operands[0])) + { + if (GET_CODE (operands[6]) == GE) + { + arm_output_asm_insn (\"and\\t%0, %3, %4, asr #31\", operands); + sprintf (pattern, \"%s\\t%%0, %%2, %%0\", + arithmetic_instr (operands[7], FALSE)); + return arm_output_asm_insn (pattern, operands); + } + else if (GET_CODE (operands[6]) == LT) + { + arm_output_asm_insn (\"bic\\t%0, %3, %4, asr #31\", operands); + sprintf (pattern, \"%s\\t%%0, %%2, %%0\", + arithmetic_instr (operands[7], FALSE)); + return arm_output_asm_insn (pattern, operands); + } + } + if (GET_CODE (operands[5]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[5]))) + arm_output_asm_insn (\"cmn\\t%4, #%n5\", operands); + else + arm_output_asm_insn (\"cmp\\t%4, %5\", operands); + if (which_alternative != 0) + { + if (GET_CODE (operands[1]) == MEM) + arm_output_asm_insn (\"ldr%d6\\t%0, %1\", operands); + else + arm_output_asm_insn (\"mov%d6\\t%0, %1\", operands); + } + sprintf (pattern, \"%s%%D6\\t%%0, %%2, %%3\", arithmetic_instr (operands[7], + FALSE)); + return arm_output_asm_insn (pattern, operands); +} +" +[(set_attr "conds" "clob") + (set_attr "length" "2,3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 6 "comparison_operator" + [(match_operand:SI 4 "s_register_operand" "r,r") + (match_operand:SI 5 "arm_add_operand" "rL,rL")]) + (plus:SI + (match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_add_operand" "rL,rL")) + (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm"))) + (clobber (reg 24))] + "" + "* +{ + if (GET_CODE (operands[5]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[5]))) + arm_output_asm_insn (\"cmn\\t%4, #%n5\", operands); + else + arm_output_asm_insn (\"cmp\\t%4, %5\", operands); + if (GET_CODE (operands[3]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[3]))) + arm_output_asm_insn (\"sub%d6\\t%0, %2, #%n3\", operands); + else + arm_output_asm_insn (\"add%d6\\t%0, %2, %3\", operands); + if (which_alternative != 0) + { + if (GET_CODE (operands[1]) == MEM) + arm_output_asm_insn (\"ldr%D6\\t%0, %1\", operands); + else + arm_output_asm_insn (\"mov%D6\\t%0, %1\", operands); + } + return \"\"; +} +" +[(set_attr "conds" "clob") + (set_attr "length" "2,3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 6 "comparison_operator" + [(match_operand:SI 4 "s_register_operand" "r,r") + (match_operand:SI 5 "arm_add_operand" "rL,rL")]) + (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm") + (plus:SI + (match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_add_operand" "rL,rL")))) + (clobber (reg 24))] + "" + "* +{ + if (GET_CODE (operands[5]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[5]))) + arm_output_asm_insn (\"cmn\\t%4, #%n5\", operands); + else + arm_output_asm_insn (\"cmp\\t%4, %5\", operands); + if (GET_CODE (operands[3]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[3]))) + arm_output_asm_insn (\"sub%D6\\t%0, %2, #%n3\", operands); + else + arm_output_asm_insn (\"add%D6\\t%0, %2, %3\", operands); + if (which_alternative != 0) + { + if (GET_CODE (operands[6]) == MEM) + arm_output_asm_insn (\"ldr%d6\\t%0, %1\", operands); + else + arm_output_asm_insn (\"mov%d6\\t%0, %1\", operands); + } + return \"\"; +} +" +[(set_attr "conds" "clob") + (set_attr "length" "2,3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 5 "comparison_operator" + [(match_operand:SI 3 "s_register_operand" "r,r") + (match_operand:SI 4 "arm_add_operand" "rL,rL")]) + (match_operand:SI 1 "arm_rhs_operand" "0,?rI") + (not:SI + (match_operand:SI 2 "s_register_operand" "r,r")))) + (clobber (reg 24))] + "" + "#" +[(set_attr "conds" "clob") + (set_attr "length" "2,3")]) + +;; if (GET_CODE (operands[3]) == CONST_INT +;; && !const_ok_for_arm (INTVAL (operands[3]))) +;; arm_output_asm_insn (\"cmn\\t%2, #%n3\", operands); +;; else +;; arm_output_asm_insn (\"cmp\\t%2, %3\", operands); +;; if (which_alternative != 0) +;; arm_output_asm_insn (\"mov%d1\\t%0, %4\", operands); +;; return arm_output_asm_insn (\"mvn%D1\\t%0, %5\", operands); + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 5 "comparison_operator" + [(match_operand:SI 3 "s_register_operand" "r,r") + (match_operand:SI 4 "arm_add_operand" "rL,rL")]) + (not:SI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:SI 1 "arm_rhs_operand" "0,?rI"))) + (clobber (reg 24))] + "" + "* +{ + char pattern[100]; + + if (GET_CODE (operands[30]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[4]))) + arm_output_asm_insn (\"cmn\\t%3, #%n4\", operands); + else + arm_output_asm_insn (\"cmp\\t%3, %4\", operands); + if (which_alternative != 0) + arm_output_asm_insn (\"mov%D5\\t%0, %1\", operands); + return arm_output_asm_insn (\"mvn%d5\\t%0, %2\", operands); + +} +" +[(set_attr "conds" "clob") + (set_attr "length" "2,3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 6 "comparison_operator" + [(match_operand:SI 4 "s_register_operand" "r,r") + (match_operand:SI 5 "arm_add_operand" "rL,rL")]) + (match_operator:SI 7 "shift_operator" + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_rhs_operand" "rn,rn")]) + (match_operand:SI 1 "arm_rhs_operand" "0,?rI"))) + (clobber (reg 24))] + "" + "* +{ + char pattern[100]; + + if (GET_CODE (operands[5]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[5]))) + arm_output_asm_insn (\"cmn\\t%4, #%n5\", operands); + else + arm_output_asm_insn (\"cmp\\t%4, %5\", operands); + if (which_alternative != 0) + arm_output_asm_insn (\"mov%D6\\t%0, %1\", operands); + sprintf (pattern, \"mov%%d6\\t%%0, %%2, %s %%3\", + shift_instr (GET_CODE (operands[7]), &operands[3])); + return arm_output_asm_insn (pattern, operands); +} +" +[(set_attr "conds" "clob") + (set_attr "length" "2,3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 6 "comparison_operator" + [(match_operand:SI 4 "s_register_operand" "r,r") + (match_operand:SI 5 "arm_add_operand" "rL,rL")]) + (match_operand:SI 1 "arm_rhs_operand" "0,?rI") + (match_operator:SI 7 "shift_operator" + [(match_operand:SI 2 "s_register_operand" "r,r") + (match_operand:SI 3 "arm_rhs_operand" "rn,rn")]))) + (clobber (reg 24))] + "" + "* +{ + char pattern[100]; + + if (GET_CODE (operands[5]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[5]))) + arm_output_asm_insn (\"cmn\\t%4, #%n5\", operands); + else + arm_output_asm_insn (\"cmp\\t%4, %5\", operands); + if (which_alternative != 0) + arm_output_asm_insn (\"mov%d6\\t%0, %1\", operands); + sprintf (pattern, \"mov%%D6\\t%%0, %%2, %s %%3\", + shift_instr (GET_CODE (operands[7]), &operands[3])); + return arm_output_asm_insn (pattern, operands); +} +" +[(set_attr "conds" "clob") + (set_attr "length" "2,3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (if_then_else:SI (match_operator 7 "comparison_operator" + [(match_operand:SI 5 "s_register_operand" "r") + (match_operand:SI 6 "arm_add_operand" "rL")]) + (match_operator:SI 8 "shift_operator" + [(match_operand:SI 1 "s_register_operand" "r") + (match_operand:SI 2 "arm_rhs_operand" "rn")]) + (match_operator:SI 9 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "arm_rhs_operand" "rI")]))) + (clobber (reg 24))] + "" + "* +{ + char pattern[100]; + + if (GET_CODE (operands[6]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[6]))) + arm_output_asm_insn (\"cmn\\t%5, #%n6\", operands); + else + arm_output_asm_insn (\"cmp\\t%5, %6\", operands); + sprintf (pattern, \"mov%%d7\\t%%0, %%1, %s %%2\", + shift_instr (GET_CODE (operands[8]), &operands[2])); + arm_output_asm_insn (pattern, operands); + sprintf (pattern, \"mov%%D7\\t%%0, %%3, %s %%4\", + shift_instr (GET_CODE (operands[9]), &operands[4])); + return arm_output_asm_insn (pattern, operands); +} +" +[(set_attr "conds" "clob") + (set_attr "length" "3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (if_then_else:SI (match_operator 6 "comparison_operator" + [(match_operand:SI 4 "s_register_operand" "r") + (match_operand:SI 5 "arm_add_operand" "rL")]) + (not:SI (match_operand:SI 1 "s_register_operand" "r")) + (match_operator:SI 7 "shiftable_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_rhs_operand" "rI")]))) + (clobber (reg 24))] + "" + "* +{ + char pattern[100]; + + if (GET_CODE (operands[5]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[5]))) + arm_output_asm_insn (\"cmn\\t%4, #%n5\", operands); + else + arm_output_asm_insn (\"cmp\\t%4, %5\", operands); + arm_output_asm_insn (\"mvn%d6\\t%0, %1\", operands); + sprintf (pattern, \"%s%%D6\\t%%0, %%2, %%3\", arithmetic_instr (operands[7], + FALSE)); + return arm_output_asm_insn (pattern, operands); +} +" +[(set_attr "conds" "clob") + (set_attr "length" "3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (if_then_else:SI (match_operator 6 "comparison_operator" + [(match_operand:SI 4 "s_register_operand" "r") + (match_operand:SI 5 "arm_add_operand" "rL")]) + (match_operator:SI 7 "shiftable_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "arm_rhs_operand" "rI")]) + (not:SI (match_operand:SI 1 "s_register_operand" "r")))) + (clobber (reg 24))] + "" + "* +{ + char pattern[100]; + + if (GET_CODE (operands[5]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[5]))) + arm_output_asm_insn (\"cmn\\t%4, #%n5\", operands); + else + arm_output_asm_insn (\"cmp\\t%4, %5\", operands); + arm_output_asm_insn (\"mvn%D6\\t%0, %1\", operands); + sprintf (pattern, \"%s%%d6\\t%%0, %%2, %%3\", arithmetic_instr (operands[7], + FALSE)); + return arm_output_asm_insn (pattern, operands); +} +" +[(set_attr "conds" "clob") + (set_attr "length" "3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 5 "comparison_operator" + [(match_operand:SI 3 "s_register_operand" "r,r") + (match_operand:SI 4 "arm_add_operand" "rL,rL")]) + (neg:SI + (match_operand:SI 2 "s_register_operand" "r,r")) + (match_operand:SI 1 "arm_rhs_operand" "0,?rI"))) + (clobber (reg:CC 24))] + "" + "* + if (GET_CODE (operands[4]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[4]))) + arm_output_asm_insn (\"cmn\\t%3, #%n4\", operands); + else + arm_output_asm_insn (\"cmp\\t%3, %4\", operands); + if (which_alternative != 0) + arm_output_asm_insn (\"mov%D5\\t%0, %1\", operands); + return arm_output_asm_insn (\"rsb%d5\\t%0, %2, #0\", operands); +" +[(set_attr "conds" "clob") + (set_attr "length" "2,3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 5 "comparison_operator" + [(match_operand:SI 3 "s_register_operand" "r,r") + (match_operand:SI 4 "arm_add_operand" "rL,rL")]) + (match_operand:SI 1 "arm_rhs_operand" "0,?rI") + (neg:SI + (match_operand:SI 2 "s_register_operand" "r,r")))) + (clobber (reg:CC 24))] + "" + "* + if (GET_CODE (operands[4]) == CONST_INT + && !const_ok_for_arm (INTVAL (operands[4]))) + arm_output_asm_insn (\"cmn\\t%3, #%n4\", operands); + else + arm_output_asm_insn (\"cmp\\t%3, %4\", operands); + if (which_alternative != 0) + arm_output_asm_insn (\"mov%d5\\t%0, %1\", operands); + return arm_output_asm_insn (\"rsb%D5\\t%0, %2, #0\", operands); +" +[(set_attr "conds" "clob") + (set_attr "length" "2,3")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (match_operator:SI 1 "shiftable_operator" + [(match_operand:SI 2 "memory_operand" "m") + (match_operand:SI 3 "memory_operand" "m")])) + (clobber (match_scratch:SI 4 "=r"))] + "adjacent_mem_locations (operands[2], operands[3])" + "* +{ + rtx ldm[3]; + rtx arith[3]; + char pattern[100]; + int val1 = 0, val2 = 0; + + sprintf (pattern, \"%s\\t%%0, %%1, %%2\", + arithmetic_instr (operands[1], FALSE)); + if (REGNO (operands[0]) > REGNO (operands[4])) + { + ldm[1] = operands[4]; + ldm[2] = operands[0]; + } + else + { + ldm[1] = operands[0]; + ldm[2] = operands[4]; + } + if (GET_CODE (XEXP (operands[2], 0)) != REG) + val1 = INTVAL (XEXP (XEXP (operands[2], 0), 1)); + if (GET_CODE (XEXP (operands[3], 0)) != REG) + val2 = INTVAL (XEXP (XEXP (operands[3], 0), 1)); + arith[0] = operands[0]; + if (val1 < val2) + { + arith[1] = ldm[1]; + arith[2] = ldm[2]; + } + else + { + arith[1] = ldm[2]; + arith[2] = ldm[1]; + } + if (val1 && val2) + { + rtx ops[3]; + ldm[0] = ops[0] = operands[4]; + ops[1] = XEXP (XEXP (operands[2], 0), 0); + ops[2] = XEXP (XEXP (operands[2], 0), 1); + output_add_immediate (ops); + if (val1 < val2) + arm_output_asm_insn (\"ldmia\\t%0, {%1, %2}\", ldm); + else + arm_output_asm_insn (\"ldmda\\t%0, {%1, %2}\", ldm); + } + else if (val1) + { + ldm[0] = XEXP (operands[3], 0); + if (val1 < val2) + arm_output_asm_insn (\"ldmda\\t%0, {%1, %2}\", ldm); + else + arm_output_asm_insn (\"ldmia\\t%0, {%1, %2}\", ldm); + } + else + { + ldm[0] = XEXP (operands[2], 0); + if (val1 < val2) + arm_output_asm_insn (\"ldmia\\t%0, {%1, %2}\", ldm); + else + arm_output_asm_insn (\"ldmda\\t%0, {%1, %2}\", ldm); + } + return arm_output_asm_insn (pattern, arith); +} +" +[(set_attr "length" "3") + (set_attr "type" "load")]) + +;; the arm can support extended pre-inc instructions + +;; In all these cases, we use operands 0 and 1 for the register being +;; incremented because those are the operands that local-alloc will +;; tie and these are the pair most likely to be tieable (and the ones +;; that will benefit the most). + +;; We reject the frame pointer if it occurs anywhere in these patterns since +;; elimination will cause too many headaches. + +(define_insn "" + [(set (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0") + (match_operand:SI 2 "index_operand" "rJ"))) + (match_operand:QI 3 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "* + return arm_output_asm_insn (\"strb\\t%3, [%0, %2]!\", operands); +" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r"))) + (match_operand:QI 3 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "* + return arm_output_asm_insn (\"strb\\t%3, [%0, -%2]!\", operands); +" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (match_operand:QI 3 "s_register_operand" "=r") + (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0") + (match_operand:SI 2 "index_operand" "rJ")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "* + return arm_output_asm_insn (\"ldrb\\t%3, [%0, %2]!\", operands); +" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:QI 3 "s_register_operand" "=r") + (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "* + return arm_output_asm_insn (\"ldrb\\t%3, [%0, -%2]!\", operands); +" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:SI 3 "s_register_operand" "=r") + (zero_extend:SI + (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0") + (match_operand:SI 2 "index_operand" "rJ"))))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "* + return arm_output_asm_insn (\"ldrb\\t%3, [%0, %2]!\\t@ z_extendqisi\", + operands); +" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:SI 3 "s_register_operand" "=r") + (zero_extend:SI + (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r"))))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "* + return arm_output_asm_insn (\"ldrb\\t%3, [%0, -%2]!\\t@ z_extendqisi\", + operands); +" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0") + (match_operand:SI 2 "index_operand" "rJ"))) + (match_operand:SI 3 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "* + return arm_output_asm_insn (\"str\\t%3, [%0, %2]!\", operands); +" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r"))) + (match_operand:SI 3 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "* + return arm_output_asm_insn (\"str\\t%3, [%0, -%2]!\", operands); +" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (match_operand:SI 3 "s_register_operand" "=r") + (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0") + (match_operand:SI 2 "index_operand" "rJ")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "* + return arm_output_asm_insn (\"ldr\\t%3, [%0, %2]!\", operands); +" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:SI 3 "s_register_operand" "=r") + (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "* + return arm_output_asm_insn (\"ldr\\t%3, [%0, -%2]!\", operands); +" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:HI 3 "s_register_operand" "=r") + (mem:HI (plus:SI (match_operand:SI 1 "s_register_operand" "%0") + (match_operand:SI 2 "index_operand" "rJ")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "* + return arm_output_asm_insn (\"ldr\\t%3, [%0, %2]!\\t@ loadhi\", operands); +" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:HI 3 "s_register_operand" "=r") + (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "r")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_dup 2)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && (GET_CODE (operands[2]) != REG + || REGNO (operands[2]) != FRAME_POINTER_REGNUM)" + "* + return arm_output_asm_insn (\"ldr\\t%3, [%0, -%2]!\\t@ loadhi\", operands); +" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (mem:QI (plus:SI (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")]) + (match_operand:SI 1 "s_register_operand" "0"))) + (match_operand:QI 5 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)]) + (match_dup 1)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "* +{ + char instr[100]; + + sprintf (instr, \"strb\\t%%5, [%%0, %%3, %s %%4]!\", + shift_instr (GET_CODE (operands[2]), &operands[4])); + return arm_output_asm_insn (instr, operands); +} +" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")]))) + (match_operand:QI 5 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3) + (match_dup 4)])))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "* +{ + char instr[100]; + + sprintf (instr, \"strb\\t%%5, [%%0, -%%3, %s %%4]!\", + shift_instr (GET_CODE (operands[2]), &operands[4])); + return arm_output_asm_insn (instr, operands); +} +" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (match_operand:QI 5 "s_register_operand" "=r") + (mem:QI (plus:SI (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")]) + (match_operand:SI 1 "s_register_operand" "0")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)]) + (match_dup 1)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "* +{ + char instr[100]; + + sprintf (instr, \"ldrb\\t%%5, [%%0, %%3, %s %%4]!\", + shift_instr (GET_CODE (operands[2]), &operands[4])); + return arm_output_asm_insn (instr, operands); +} +" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:QI 5 "s_register_operand" "=r") + (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")])))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3) + (match_dup 4)])))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "* +{ + char instr[100]; + + sprintf (instr, \"ldrb\\t%%5, [%%0, -%%3, %s %%4]!\", + shift_instr (GET_CODE (operands[2]), &operands[4])); + return arm_output_asm_insn (instr, operands); +} +" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (mem:SI (plus:SI (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")]) + (match_operand:SI 1 "s_register_operand" "0"))) + (match_operand:SI 5 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)]) + (match_dup 1)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "* +{ + char instr[100]; + + sprintf (instr, \"str\\t%%5, [%%0, %%3, %s %%4]!\", + shift_instr (GET_CODE (operands[2]), &operands[4])); + return arm_output_asm_insn (instr, operands); +} +" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")]))) + (match_operand:SI 5 "s_register_operand" "r")) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3) + (match_dup 4)])))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "* +{ + char instr[100]; + + sprintf (instr, \"str\\t%%5, [%%0, -%%3, %s %%4]!\", + shift_instr (GET_CODE (operands[2]), &operands[4])); + return arm_output_asm_insn (instr, operands); +} +" +[(set_attr "type" "store1")]) + +(define_insn "" + [(set (match_operand:SI 5 "s_register_operand" "=r") + (mem:SI (plus:SI (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")]) + (match_operand:SI 1 "s_register_operand" "0")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)]) + (match_dup 1)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "* +{ + char instr[100]; + + sprintf (instr, \"ldr\\t%%5, [%%0, %%3, %s %%4]!\", + shift_instr (GET_CODE (operands[2]), &operands[4])); + return arm_output_asm_insn (instr, operands); +} +" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:SI 5 "s_register_operand" "=r") + (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")])))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3) + (match_dup 4)])))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "* +{ + char instr[100]; + + sprintf (instr, \"ldr\\t%%5, [%%0, -%%3, %s %%4]!\", + shift_instr (GET_CODE (operands[2]), &operands[4])); + return arm_output_asm_insn (instr, operands); +} +" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:HI 5 "s_register_operand" "=r") + (mem:HI (plus:SI (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")]) + (match_operand:SI 1 "s_register_operand" "0")))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)]) + (match_dup 1)))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "* +{ + char instr[100]; + + sprintf (instr, \"ldr\\t%%5, [%%0, %%3, %s %%4]!\\t@ loadhi\", + shift_instr (GET_CODE (operands[2]), &operands[4])); + return arm_output_asm_insn (instr, operands); +} +" +[(set_attr "type" "load")]) + +(define_insn "" + [(set (match_operand:HI 5 "s_register_operand" "=r") + (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operator:SI 2 "shift_operator" + [(match_operand:SI 3 "s_register_operand" "r") + (match_operand:SI 4 "const_shift_operand" "n")])))) + (set (match_operand:SI 0 "s_register_operand" "=r") + (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3) + (match_dup 4)])))] + "REGNO (operands[0]) != FRAME_POINTER_REGNUM + && REGNO (operands[1]) != FRAME_POINTER_REGNUM + && REGNO (operands[3]) != FRAME_POINTER_REGNUM" + "* +{ + char instr[100]; + + sprintf (instr, \"ldr\\t%%5, [%%0, -%%3, %s %%4]!\\t@ loadhi\", + shift_instr (GET_CODE (operands[2]), &operands[4])); + return arm_output_asm_insn (instr, operands); +} +" +[(set_attr "type" "load")]) + +; It can also support extended post-inc expressions, but combine doesn't +; try these.... +; It doesn't seem worth adding peepholes for anything but the most common +; cases since, unlike combine, the increment must immediately follow the load +; for this pattern to match. +; When loading we must watch to see that the base register isn't trampled by +; the load. In such cases this isn't a post-inc expression. + +(define_peephole + [(set (mem:QI (match_operand:SI 0 "s_register_operand" "+r")) + (match_operand:QI 2 "s_register_operand" "r")) + (set (match_dup 0) + (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))] + "" + "* + return arm_output_asm_insn (\"strb\\t%2, [%0], %1\", operands); +") + +(define_peephole + [(set (match_operand:QI 0 "s_register_operand" "=r") + (mem:QI (match_operand:SI 1 "s_register_operand" "+r"))) + (set (match_dup 1) + (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))] + "REGNO(operands[0]) != REGNO(operands[1]) + && (GET_CODE (operands[2]) != REG + || REGNO(operands[0]) != REGNO (operands[2]))" + "* + return arm_output_asm_insn (\"ldrb\\t%0, [%1], %2\", operands); +") + +(define_peephole + [(set (mem:SI (match_operand:SI 0 "s_register_operand" "+r")) + (match_operand:SI 2 "s_register_operand" "r")) + (set (match_dup 0) + (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))] + "" + "* + return arm_output_asm_insn (\"str\\t%2, [%0], %1\", operands); +") + +(define_peephole + [(set (match_operand:HI 0 "s_register_operand" "=r") + (mem:HI (match_operand:SI 1 "s_register_operand" "+r"))) + (set (match_dup 1) + (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))] + "REGNO(operands[0]) != REGNO(operands[1]) + && (GET_CODE (operands[2]) != REG + || REGNO(operands[0]) != REGNO (operands[2]))" + "* + return arm_output_asm_insn (\"ldr\\t%0, [%1], %2\\t@ loadhi\", operands); +") + +(define_peephole + [(set (match_operand:SI 0 "s_register_operand" "=r") + (mem:SI (match_operand:SI 1 "s_register_operand" "+r"))) + (set (match_dup 1) + (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))] + "REGNO(operands[0]) != REGNO(operands[1]) + && (GET_CODE (operands[2]) != REG + || REGNO(operands[0]) != REGNO (operands[2]))" + "* + return arm_output_asm_insn (\"ldr\\t%0, [%1], %2\", operands); +") + +; This pattern is never tried by combine, so do it as a peephole + +(define_peephole + [(set (match_operand:SI 0 "s_register_operand" "=r") + (match_operand:SI 1 "s_register_operand" "r")) + (set (match_operand 2 "cc_register" "") + (compare (match_dup 1) (const_int 0)))] + "" + "* + return arm_output_asm_insn (\"subs\\t%0, %1, #0\", operands); +" +[(set_attr "conds" "set")]) + +; Peepholes to spot possible load- and store-multiples, if the ordering is +; reversed, check that the memory references aren't volatile. + +(define_peephole + [(set (match_operand:SI 0 "s_register_operand" "=r") + (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 12)))) + (set (match_operand:SI 2 "s_register_operand" "=r") + (mem:SI (plus:SI (match_dup 1) (const_int 8)))) + (set (match_operand:SI 3 "s_register_operand" "=r") + (mem:SI (plus:SI (match_dup 1) (const_int 4)))) + (set (match_operand:SI 4 "s_register_operand" "=r") + (mem:SI (match_dup 1)))] + "REGNO (operands[0]) > REGNO (operands[2]) + && REGNO (operands[2]) > REGNO (operands[3]) + && REGNO (operands[3]) > REGNO (operands[4]) + && !(REGNO (operands[1]) == REGNO (operands[0]) + || REGNO (operands[1]) == REGNO (operands[2]) + || REGNO (operands[1]) == REGNO (operands[3]) + || REGNO (operands[1]) == REGNO (operands[4])) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (insn))) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (prev_nonnote_insn (insn)))) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (prev_nonnote_insn + (prev_nonnote_insn (insn))))) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (prev_nonnote_insn + (prev_nonnote_insn + (prev_nonnote_insn (insn))))))" + "* + return arm_output_asm_insn (\"ldmia\\t%1, {%4, %3, %2, %0}\\t@ phole ldm\", + operands); +") + +(define_peephole + [(set (match_operand:SI 0 "s_register_operand" "=r") + (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 8)))) + (set (match_operand:SI 2 "s_register_operand" "=r") + (mem:SI (plus:SI (match_dup 1) (const_int 4)))) + (set (match_operand:SI 3 "s_register_operand" "=r") + (mem:SI (match_dup 1)))] + "REGNO (operands[0]) > REGNO (operands[2]) + && REGNO (operands[2]) > REGNO (operands[3]) + && !(REGNO (operands[1]) == REGNO (operands[0]) + || REGNO (operands[1]) == REGNO (operands[2]) + || REGNO (operands[1]) == REGNO (operands[3])) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (insn))) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (prev_nonnote_insn (insn)))) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (prev_nonnote_insn + (prev_nonnote_insn (insn)))))" + "* + return arm_output_asm_insn (\"ldmia\\t%1, {%3, %2, %0}\\t@ phole ldm\", + operands); +") + +(define_peephole + [(set (match_operand:SI 0 "s_register_operand" "=r") + (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 4)))) + (set (match_operand:SI 2 "s_register_operand" "=r") + (mem:SI (match_dup 1)))] + "REGNO (operands[0]) > REGNO (operands[2]) + && !(REGNO (operands[1]) == REGNO (operands[0]) + || REGNO (operands[1]) == REGNO (operands[2])) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (insn))) + && !MEM_VOLATILE_P (SET_SRC (PATTERN (prev_nonnote_insn (insn))))" + "* + return arm_output_asm_insn (\"ldmia\\t%1, {%2, %0}\\t@ phole ldm\", + operands); +") + +(define_peephole + [(set (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 12))) + (match_operand:SI 0 "s_register_operand" "r")) + (set (mem:SI (plus:SI (match_dup 1) (const_int 8))) + (match_operand:SI 2 "s_register_operand" "r")) + (set (mem:SI (plus:SI (match_dup 1) (const_int 4))) + (match_operand:SI 3 "s_register_operand" "r")) + (set (mem:SI (match_dup 1)) + (match_operand:SI 4 "s_register_operand" "r"))] + "REGNO (operands[0]) > REGNO (operands[2]) + && REGNO (operands[2]) > REGNO (operands[3]) + && REGNO (operands[3]) > REGNO (operands[4]) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (insn))) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (prev_nonnote_insn (insn)))) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (prev_nonnote_insn + (prev_nonnote_insn (insn))))) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (prev_nonnote_insn + (prev_nonnote_insn + (prev_nonnote_insn (insn))))))" + "* + return arm_output_asm_insn (\"stmia\\t%1, {%4, %3, %2, %0}\\t@ phole stm\", + operands); +") + +(define_peephole + [(set (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 8))) + (match_operand:SI 0 "s_register_operand" "r")) + (set (mem:SI (plus:SI (match_dup 1) (const_int 4))) + (match_operand:SI 2 "s_register_operand" "r")) + (set (mem:SI (match_dup 1)) + (match_operand:SI 3 "s_register_operand" "r"))] + "REGNO (operands[0]) > REGNO (operands[2]) + && REGNO (operands[2]) > REGNO (operands[3]) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (insn))) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (prev_nonnote_insn (insn)))) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (prev_nonnote_insn + (prev_nonnote_insn (insn)))))" + "* + return arm_output_asm_insn (\"stmia\\t%1, {%3, %2, %0}\\t@ phole stm\", + operands); +") + +(define_peephole + [(set (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 4))) + (match_operand:SI 0 "s_register_operand" "r")) + (set (mem:SI (match_dup 1)) + (match_operand:SI 2 "s_register_operand" "r"))] + "REGNO (operands[0]) > REGNO (operands[2]) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (insn))) + && !MEM_VOLATILE_P (SET_DEST (PATTERN (prev_nonnote_insn (insn))))" + "* + return arm_output_asm_insn (\"stmia\\t%1, {%2, %0}\\t@ phole stm\", + operands); +") + +;; A call followed by return can be replaced by restoring the regs and +;; jumping to the subroutine, provided we aren't passing the address of +;; any of our local variables. If we call alloca then this is unsafe +;; since restoring the frame frees the memory, which is not what we want. +;; Sometimes the return might have been targeted by the final prescan: +;; if so then emit a propper return insn as well. +;; Unfortunately, if the frame pointer is required, we don't know if the +;; current function has any implicit stack pointer adjustments that will +;; be restored by the return: we can't therefore do a tail call. +;; Another unfortunate that we can't handle is if current_function_args_size +;; is non-zero: in this case elimination of the argument pointer assumed +;; that lr was pushed onto the stack, so eliminating upsets the offset +;; calculations. + +(define_peephole + [(parallel [(call (mem:SI (match_operand:SI 0 "" "i")) + (match_operand:SI 1 "general_operand" "g")) + (clobber (reg:SI 14))]) + (return)] + "(GET_CODE (operands[0]) == SYMBOL_REF && USE_RETURN_INSN + && !get_frame_size () && !current_function_calls_alloca + && !frame_pointer_needed && !current_function_args_size)" + "* +{ + extern rtx arm_target_insn; + extern int arm_ccfsm_state, arm_current_cc; + + if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn)) + { + arm_current_cc ^= 1; + output_return_instruction (NULL, TRUE); + arm_ccfsm_state = 0; + arm_target_insn = NULL; + } + + output_return_instruction (NULL, FALSE); + return (arm_output_asm_insn (\"b\\t%a0\", operands)); +}" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "length" "2")]) + +(define_peephole + [(parallel [(set (match_operand 0 "s_register_operand" "=rf") + (call (mem:SI (match_operand:SI 1 "" "i")) + (match_operand:SI 2 "general_operand" "g"))) + (clobber (reg:SI 14))]) + (return)] + "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN + && !get_frame_size () && !current_function_calls_alloca + && !frame_pointer_needed && !current_function_args_size)" + "* +{ + extern rtx arm_target_insn; + extern int arm_ccfsm_state, arm_current_cc; + + if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn)) + { + arm_current_cc ^= 1; + output_return_instruction (NULL, TRUE); + arm_ccfsm_state = 0; + arm_target_insn = NULL; + } + + output_return_instruction (NULL, FALSE); + return (arm_output_asm_insn (\"b\\t%a1\", operands)); +}" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "length" "2")]) + +;; As above but when this function is not void, we must be returning the +;; result of the called subroutine. + +(define_peephole + [(parallel [(set (match_operand 0 "s_register_operand" "=rf") + (call (mem:SI (match_operand:SI 1 "" "i")) + (match_operand:SI 2 "general_operand" "g"))) + (clobber (reg:SI 14))]) + (use (match_dup 0)) + (return)] + "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN + && !get_frame_size () && !current_function_calls_alloca + && !frame_pointer_needed && !current_function_args_size)" + "* +{ + extern rtx arm_target_insn; + extern int arm_ccfsm_state, arm_current_cc; + + if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn)) + { + arm_current_cc ^= 1; + output_return_instruction (NULL, TRUE); + arm_ccfsm_state = 0; + arm_target_insn = NULL; + } + + output_return_instruction (NULL, FALSE); + return (arm_output_asm_insn (\"b\\t%a1\", operands)); +}" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set_attr "length" "2")]) + +;; If calling a subroutine and then jumping back to somewhere else, but not +;; too far away, then we can set the link register with the branch address +;; and jump direct to the subroutine. On return from the subroutine +;; execution continues at the branch; this avoids a prefetch stall. +;; We use the length attribute (via short_branch ()) to establish whether or +;; not this is possible, this is the same asthe sparc does. + +(define_peephole + [(parallel[(call (mem:SI (match_operand:SI 0 "" "i")) + (match_operand:SI 1 "general_operand" "g")) + (clobber (reg:SI 14))]) + (set (pc) + (label_ref (match_operand 2 "" "")))] + "GET_CODE (operands[0]) == SYMBOL_REF + && short_branch (INSN_UID (insn), INSN_UID (operands[2])) + && arm_insn_not_targeted (insn)" + "* +{ + int backward = arm_backwards_branch (INSN_UID (insn), + INSN_UID (operands[2])); + +#if 0 + /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or + * above, leaving it out means that the code will still run on an arm 2 or 3 + */ + if (TARGET_6) + { + if (backward) + arm_output_asm_insn (\"sub\\tlr, pc, #(8 + . -%l2)\", operands); + else + arm_output_asm_insn (\"add\\tlr, pc, #(%l2 - . -8)\", operands); + } + else +#endif + { + arm_output_asm_insn (\"mov\\tlr, pc\\t@ protect cc\"); + if (backward) + arm_output_asm_insn (\"sub\\tlr, lr, #(4 + . -%l2)\", operands); + else + arm_output_asm_insn (\"add\\tlr, lr, #(%l2 - . -4)\", operands); + } + return arm_output_asm_insn (\"b\\t%a0\", operands); +}" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set (attr "length") + (if_then_else (eq_attr "cpu" "arm6") + (const_int 2) + (const_int 3)))]) + +(define_peephole + [(parallel[(set (match_operand:SI 0 "s_register_operand" "=r") + (call (mem:SI (match_operand:SI 1 "" "i")) + (match_operand:SI 2 "general_operand" "g"))) + (clobber (reg:SI 14))]) + (set (pc) + (label_ref (match_operand 3 "" "")))] + "GET_CODE (operands[0]) == SYMBOL_REF + && short_branch (INSN_UID (insn), INSN_UID (operands[3])) + && arm_insn_not_targeted (insn)" + "* +{ + int backward = arm_backwards_branch (INSN_UID (insn), + INSN_UID (operands[3])); + +#if 0 + /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or + * above, leaving it out means that the code will still run on an arm 2 or 3 + */ + if (TARGET_6) + { + if (backward) + arm_output_asm_insn (\"sub\\tlr, pc, #(8 + . -%l3)\", operands); + else + arm_output_asm_insn (\"add\\tlr, pc, #(%l3 - . -8)\", operands); + } + else +#endif + { + arm_output_asm_insn (\"mov\\tlr, pc\\t@ protect cc\"); + if (backward) + arm_output_asm_insn (\"sub\\tlr, lr, #(4 + . -%l3)\", operands); + else + arm_output_asm_insn (\"add\\tlr, lr, #(%l3 - . -4)\", operands); + } + return arm_output_asm_insn (\"b\\t%a1\", operands); +}" +[(set (attr "conds") + (if_then_else (eq_attr "cpu" "arm6") + (const_string "clob") + (const_string "nocond"))) + (set (attr "length") + (if_then_else (eq_attr "cpu" "arm6") + (const_int 2) + (const_int 3)))]) + +(define_split + [(set (pc) + (if_then_else (match_operator 0 "comparison_operator" + [(match_operator:SI 1 "shift_operator" + [(match_operand:SI 2 "s_register_operand" "r") + (match_operand:SI 3 "nonmemory_operand" "rn")]) + (match_operand:SI 4 "s_register_operand" "r")]) + (label_ref (match_operand 5 "" "")) + (pc))) + (clobber (reg 24))] + "" + [(set (reg:CC 24) + (compare:CC (match_dup 4) + (match_op_dup 1 [(match_dup 2) (match_dup 3)]))) + (set (pc) + (if_then_else (match_op_dup 0 [(reg 24) (const_int 0)]) + (label_ref (match_dup 5)) + (pc)))] + " + operands[0] = gen_rtx (swap_condition (GET_CODE (operands[0])), VOIDmode, + operands[1], operands[2]); +") + +(define_split + [(set (match_operand:SI 0 "s_register_operand" "") + (and:SI (ge:SI (match_operand:SI 1 "s_register_operand" "") + (const_int 0)) + (neg:SI (match_operator:SI 2 "comparison_operator" + [(match_operand:SI 3 "s_register_operand" "") + (match_operand:SI 4 "arm_rhs_operand" "")])))) + (clobber (match_operand:SI 5 "s_register_operand" ""))] + "" + [(set (match_dup 5) (not:SI (ashiftrt:SI (match_dup 1) (const_int 31)))) + (set (match_dup 0) (and:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)]) + (match_dup 5)))] + "") + +;; This pattern can be used because cc_noov mode implies that the following +;; branch will be an equality (EQ or NE), so the sign extension is not +;; needed. Combine doesn't eliminate these because by the time it sees the +;; branch it no-longer knows that the data came from memory. + +(define_insn "" + [(set (reg:CC_NOOV 24) + (compare:CC_NOOV + (ashift:SI (subreg:SI (match_operand:QI 0 "memory_operand" "m") 0) + (const_int 24)) + (match_operand 1 "immediate_operand" "I"))) + (clobber (match_scratch:SI 2 "=r"))] + "((unsigned long) INTVAL (operands[1])) + == (((unsigned long) INTVAL (operands[1])) >> 24) << 24" + "* + operands[1] = GEN_INT (((unsigned long) INTVAL (operands[1])) >> 24); + arm_output_asm_insn (\"ldrb\\t%2, %0\", operands); + return arm_output_asm_insn (\"cmp\\t%2, %1\", operands); +" +[(set_attr "conds" "set") + (set_attr "length" "2") + (set_attr "type" "load")]) + +(define_expand "save_stack_nonlocal" + [(match_operand:DI 0 "memory_operand" "") + (match_operand:SI 1 "s_register_operand" "")] + "" + " +{ + /* We also need to save the frame pointer for non-local gotos */ + emit_move_insn (operand_subword (operands[0], 0, 0, DImode), + hard_frame_pointer_rtx); + emit_move_insn (operand_subword (operands[0], 1, 0, DImode), operands[1]); + DONE; +}") + +(define_expand "restore_stack_nonlocal" + [(match_operand:SI 0 "s_register_operand" "") + (match_operand:DI 1 "memory_operand" "")] + "" + " +{ + /* Restore the frame pointer first, the stack pointer second. */ + emit_move_insn (operands[0], operand_subword (operands[1], 1, 0, DImode)); + emit_move_insn (hard_frame_pointer_rtx, operand_subword (operands[1], 0, 0, + DImode)); + DONE; +}") + +;; This split is only used during output to reduce the number of patterns +;; that need assembler instructions adding to them. We allowed the setting +;; of the conditions to be implicit during rtl generation so that +;; the conditional compare patterns would work. However this conflicts to +;; some extend with the conditional data operations, so we have to split them +;; up again here. + +(define_split + [(set (match_operand:SI 0 "s_register_operand" "") + (if_then_else:SI (match_operator 1 "comparison_operator" + [(match_operand 2 "" "") (match_operand 3 "" "")]) + (match_operand 4 "" "") + (match_operand 5 "" ""))) + (clobber (reg 24))] + "reload_completed" + [(set (match_dup 6) (match_dup 7)) + (set (match_dup 0) + (if_then_else:SI (match_op_dup 1 [(match_dup 6) (const_int 0)]) + (match_dup 4) + (match_dup 5)))] + " +{ + enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2], + operands[3]); + + operands[6] = gen_rtx (REG, mode, 24); + operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]); +} +") + + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r,r") + (if_then_else:SI (match_operator 4 "comparison_operator" + [(match_operand 3 "cc_register" "") (const_int 0)]) + (match_operand:SI 1 "arm_rhs_operand" "0,?rI") + (not:SI + (match_operand:SI 2 "s_register_operand" "r,r"))))] + "" + "* + if (which_alternative != 0) + arm_output_asm_insn (\"mov%d4\\t%0, %1\", operands); + return arm_output_asm_insn (\"mvn%D4\\t%0, %2\", operands); +" +[(set_attr "conds" "use") + (set_attr "length" "1,2")]) + +;; The next two patterns occur when an AND operation is followed by a +;; scc insn sequence + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 1) + (match_operand:SI 2 "immediate_operand" "n")))] + "" + "* + operands[2] = GEN_INT (1 << INTVAL (operands[2])); + arm_output_asm_insn (\"ands\\t%0, %1, %2\", operands); + return arm_output_asm_insn (\"mvnne\\t%0, #0\", operands); +" +[(set_attr "conds" "clob") + (set_attr "length" "2")]) + +(define_insn "" + [(set (match_operand:SI 0 "s_register_operand" "=r") + (not:SI + (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r") + (const_int 1) + (match_operand:SI 2 "immediate_operand" "n"))))] + "" + "* + operands[2] = GEN_INT (1 << INTVAL (operands[2])); + arm_output_asm_insn (\"tst\\t%1, %2\", operands); + arm_output_asm_insn (\"mvneq\\t%0, #0\", operands); + return arm_output_asm_insn (\"movne\\t%0, #0\", operands); +" +[(set_attr "conds" "clob") + (set_attr "length" "3")]) diff --git a/gcc/config/arm/xm-arm.h b/gcc/config/arm/xm-arm.h index d75ac79bcfb..c8eaf0d3545 100644 --- a/gcc/config/arm/xm-arm.h +++ b/gcc/config/arm/xm-arm.h @@ -2,6 +2,7 @@ Copyright (C) 1991, 1993 Free Software Foundation, Inc. Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl) and Martin Simmons (@harleqn.co.uk). + More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk) This file is part of GNU CC. @@ -29,6 +30,28 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #define HOST_BITS_PER_INT 32 #define HOST_BITS_PER_LONG 32 +/* A code distinguishing the floating point format of the host + machine. There are three defined values: IEEE_FLOAT_FORMAT, + VAX_FLOAT_FORMAT, and UNKNOWN_FLOAT_FORMAT. */ + +#define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT + +/* If not compiled with GNU C, use C alloca. */ +#ifndef __GNUC__ +#define USE_C_ALLOCA +#endif + +/* Define this if the library function putenv is available on your machine */ +#define HAVE_PUTENV 1 + +/* Define this if the library function vprintf is available on your machine */ +#define HAVE_VPRINTF 1 + +/* Define this to be 1 if you know the host compiler supports prototypes, even + if it doesn't define __STDC__, or define it to be 0 if you do not want any + prototypes when compiling GNU CC. */ +#define USE_PROTOTYPES 1 + /* target machine dependencies. tm.h is a symbolic link to the actual target specific file. */ #include "tm.h" @@ -38,3 +61,5 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #define FATAL_EXIT_CODE 33 /* EOF xm-arm.h */ + + -- 2.11.4.GIT