1 /* Functions and structures shared between arm and aarch64.
3 Copyright (C) 1991-2016 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #ifndef GCC_AARCH_COMMON_PROTOS_H
24 #define GCC_AARCH_COMMON_PROTOS_H
26 extern int aarch_accumulator_forwarding (rtx_insn
*, rtx_insn
*);
27 extern int aarch_crypto_can_dual_issue (rtx_insn
*, rtx_insn
*);
28 extern int aarch_forward_to_shift_is_not_shifted_reg (rtx_insn
*, rtx_insn
*);
29 extern bool aarch_rev16_p (rtx
);
30 extern bool aarch_rev16_shleft_mask_imm_p (rtx
, machine_mode
);
31 extern bool aarch_rev16_shright_mask_imm_p (rtx
, machine_mode
);
32 extern int arm_early_load_addr_dep (rtx
, rtx
);
33 extern int arm_early_store_addr_dep (rtx
, rtx
);
34 extern int arm_mac_accumulator_is_mul_result (rtx
, rtx
);
35 extern int arm_mac_accumulator_is_result (rtx
, rtx
);
36 extern int arm_no_early_alu_shift_dep (rtx
, rtx
);
37 extern int arm_no_early_alu_shift_value_dep (rtx
, rtx
);
38 extern int arm_no_early_mul_dep (rtx
, rtx
);
39 extern int arm_no_early_store_addr_dep (rtx
, rtx
);
40 extern bool arm_rtx_shift_left_p (rtx
);
42 /* RTX cost table definitions. These are used when tuning for speed rather
43 than for size and should reflect the _additional_ cost over the cost
44 of the fastest instruction in the machine, which is COSTS_N_INSNS (1).
45 Therefore it's okay for some costs to be 0.
46 Costs may not have a negative value. */
49 const int arith
; /* ADD/SUB. */
50 const int logical
; /* AND/ORR/EOR/BIC, etc. */
51 const int shift
; /* Simple shift. */
52 const int shift_reg
; /* Simple shift by reg. */
53 const int arith_shift
; /* Additional when arith also shifts... */
54 const int arith_shift_reg
; /* ... and when the shift is by a reg. */
55 const int log_shift
; /* Additional when logic also shifts... */
56 const int log_shift_reg
; /* ... and when the shift is by a reg. */
57 const int extend
; /* Zero/sign extension. */
58 const int extend_arith
; /* Extend and arith. */
59 const int bfi
; /* Bit-field insert. */
60 const int bfx
; /* Bit-field extraction. */
61 const int clz
; /* Count Leading Zeros. */
62 const int rev
; /* Reverse bits/bytes. */
63 const int non_exec
; /* Extra cost when not executing insn. */
64 const bool non_exec_costs_exec
; /* True if non-execution must add the exec
68 struct mult_cost_table
71 const int flag_setting
; /* Additional cost if multiply sets flags. */
78 /* Calculations of LDM costs are complex. We assume an initial cost
79 (ldm_1st) which will load the number of registers mentioned in
80 ldm_regs_per_insn_1st registers; then each additional
81 ldm_regs_per_insn_subsequent registers cost one more insn.
82 Similarly for STM operations.
83 Therefore the ldm_regs_per_insn_1st/stm_regs_per_insn_1st and
84 ldm_regs_per_insn_subsequent/stm_regs_per_insn_subsequent fields indicate
85 the number of registers loaded/stored and are expressed by a simple integer
86 and not by a COSTS_N_INSNS (N) expression.
91 const int load_sign_extend
; /* Additional to load cost. */
92 const int ldrd
; /* Cost of LDRD. */
94 const int ldm_regs_per_insn_1st
;
95 const int ldm_regs_per_insn_subsequent
;
96 const int loadf
; /* SFmode. */
97 const int loadd
; /* DFmode. */
98 const int load_unaligned
; /* Extra for unaligned loads. */
102 const int stm_regs_per_insn_1st
;
103 const int stm_regs_per_insn_subsequent
;
104 const int storef
; /* SFmode. */
105 const int stored
; /* DFmode. */
106 const int store_unaligned
; /* Extra for unaligned stores. */
107 const int loadv
; /* Vector load. */
108 const int storev
; /* Vector store. */
115 const int mult_addsub
; /* Non-fused. */
116 const int fma
; /* Fused. */
118 const int fpconst
; /* Immediate. */
119 const int neg
; /* NEG and ABS. */
121 const int widen
; /* Widen to this size. */
122 const int narrow
; /* Narrow from this size. */
125 const int roundint
; /* V8 round to integral, remains FP format. */
128 struct vector_cost_table
133 struct cpu_cost_table
135 const struct alu_cost_table alu
;
136 const struct mult_cost_table mult
[2]; /* SImode and DImode. */
137 const struct mem_cost_table ldst
;
138 const struct fp_cost_table fp
[2]; /* SFmode and DFmode. */
139 const struct vector_cost_table vect
;
143 #endif /* GCC_AARCH_COMMON_PROTOS_H */