1 ;; ARM Cortex-A53 pipeline description
2 ;; Copyright (C) 2013-2018 Free Software Foundation, Inc.
4 ;; Contributed by ARM Ltd.
6 ;; This file is part of GCC.
8 ;; GCC is free software; you can redistribute it and/or modify it
9 ;; under the terms of the GNU General Public License as published by
10 ;; the Free Software Foundation; either version 3, or (at your option)
13 ;; GCC is distributed in the hope that it will be useful, but
14 ;; WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;; General Public License for more details.
18 ;; You should have received a copy of the GNU General Public License
19 ;; along with GCC; see the file COPYING3. If not see
20 ;; <http://www.gnu.org/licenses/>.
22 (define_automaton "cortex_a53")
24 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
25 ;; General-purpose functional units.
26 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
28 ;; We use slot0 and slot1 to model constraints on which instructions may
31 (define_cpu_unit "cortex_a53_slot0" "cortex_a53")
32 (define_cpu_unit "cortex_a53_slot1" "cortex_a53")
33 (final_presence_set "cortex_a53_slot1" "cortex_a53_slot0")
35 (define_reservation "cortex_a53_slot_any"
39 (define_reservation "cortex_a53_single_issue"
43 ;; Used to model load and store pipelines. Load/store instructions
44 ;; can dual-issue with other instructions, but two load/stores cannot
45 ;; simultaneously issue.
47 (define_cpu_unit "cortex_a53_store" "cortex_a53")
48 (define_cpu_unit "cortex_a53_load" "cortex_a53")
49 (define_cpu_unit "cortex_a53_ls_agen" "cortex_a53")
51 ;; Used to model a branch pipeline. Branches can dual-issue with other
52 ;; instructions (except when those instructions take multiple cycles
55 (define_cpu_unit "cortex_a53_branch" "cortex_a53")
57 ;; Used to model an integer divide pipeline.
59 (define_cpu_unit "cortex_a53_idiv" "cortex_a53")
61 ;; Used to model an integer multiply/multiply-accumulate pipeline.
63 (define_cpu_unit "cortex_a53_imul" "cortex_a53")
65 ;; Model general structural hazards, for wherever we need them.
67 (define_cpu_unit "cortex_a53_hazard" "cortex_a53")
69 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
71 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
73 (define_insn_reservation "cortex_a53_shift" 2
74 (and (eq_attr "tune" "cortexa53")
75 (eq_attr "type" "adr,shift_imm,mov_imm,mvn_imm,mov_shift"))
76 "cortex_a53_slot_any")
78 (define_insn_reservation "cortex_a53_shift_reg" 2
79 (and (eq_attr "tune" "cortexa53")
80 (eq_attr "type" "shift_reg,mov_shift_reg"))
81 "cortex_a53_slot_any+cortex_a53_hazard")
83 (define_insn_reservation "cortex_a53_alu" 3
84 (and (eq_attr "tune" "cortexa53")
85 (eq_attr "type" "alu_imm,alus_imm,logic_imm,logics_imm,
86 alu_sreg,alus_sreg,logic_reg,logics_reg,
87 adc_imm,adcs_imm,adc_reg,adcs_reg,
88 csel,clz,rbit,rev,alu_dsp_reg,
89 mov_reg,mvn_reg,mrs,multiple,no_insn"))
90 "cortex_a53_slot_any")
92 (define_insn_reservation "cortex_a53_alu_shift" 3
93 (and (eq_attr "tune" "cortexa53")
94 (eq_attr "type" "alu_shift_imm,alus_shift_imm,
95 crc,logic_shift_imm,logics_shift_imm,
96 alu_ext,alus_ext,bfm,bfx,extend,mvn_shift"))
97 "cortex_a53_slot_any")
99 (define_insn_reservation "cortex_a53_alu_shift_reg" 3
100 (and (eq_attr "tune" "cortexa53")
101 (eq_attr "type" "alu_shift_reg,alus_shift_reg,
102 logic_shift_reg,logics_shift_reg,
104 "cortex_a53_slot_any+cortex_a53_hazard")
106 (define_insn_reservation "cortex_a53_alu_extr" 3
107 (and (eq_attr "tune" "cortexa53")
108 (eq_attr "type" "rotate_imm"))
109 "cortex_a53_slot1|cortex_a53_single_issue")
111 (define_insn_reservation "cortex_a53_mul" 4
112 (and (eq_attr "tune" "cortexa53")
113 (ior (eq_attr "mul32" "yes")
114 (eq_attr "mul64" "yes")))
115 "cortex_a53_slot_any+cortex_a53_imul")
117 ;; From the perspective of the GCC scheduling state machine, if we wish to
118 ;; model an instruction as serialising other instructions, we are best to do
119 ;; so by modelling it as taking very few cycles. Scheduling many other
120 ;; instructions underneath it at the cost of freedom to pick from the
121 ;; ready list is likely to hurt us more than it helps. However, we do
122 ;; want to model some resource and latency cost for divide instructions in
123 ;; order to avoid divides ending up too lumpy.
125 (define_insn_reservation "cortex_a53_div" 4
126 (and (eq_attr "tune" "cortexa53")
127 (eq_attr "type" "udiv,sdiv"))
128 "cortex_a53_slot0,cortex_a53_idiv*2")
130 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
131 ;; Load/store instructions.
132 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
134 ;; TODO: load<n> is not prescriptive about how much data is to be loaded.
135 ;; This is most obvious for LDRD from AArch32 and LDP (X register) from
136 ;; AArch64, both are tagged load2 but LDP will load 128-bits compared to
137 ;; LDRD which is 64-bits.
139 ;; For the below, we assume AArch64 X-registers for load2, and AArch32
140 ;; registers for load3/load4.
142 (define_insn_reservation "cortex_a53_load1" 4
143 (and (eq_attr "tune" "cortexa53")
144 (eq_attr "type" "load_byte,load_4,load_acq"))
145 "cortex_a53_slot_any+cortex_a53_ls_agen,
148 (define_insn_reservation "cortex_a53_store1" 2
149 (and (eq_attr "tune" "cortexa53")
150 (eq_attr "type" "store_4,store_rel"))
151 "cortex_a53_slot_any+cortex_a53_ls_agen,
154 ;; Model AArch64-sized LDP Xm, Xn, [Xa]
156 (define_insn_reservation "cortex_a53_load2" 4
157 (and (eq_attr "tune" "cortexa53")
158 (eq_attr "type" "load_8"))
159 "cortex_a53_single_issue+cortex_a53_ls_agen,
160 cortex_a53_load+cortex_a53_slot0,
163 (define_insn_reservation "cortex_a53_store2" 2
164 (and (eq_attr "tune" "cortexa53")
165 (eq_attr "type" "store_8"))
166 "cortex_a53_slot_any+cortex_a53_ls_agen,
169 ;; Model AArch32-sized LDM Ra, {Rm, Rn, Ro}
171 (define_insn_reservation "cortex_a53_load3plus" 6
172 (and (eq_attr "tune" "cortexa53")
173 (eq_attr "type" "load_12,load_16"))
174 "cortex_a53_single_issue+cortex_a53_ls_agen,
175 cortex_a53_load+cortex_a53_slot0,
178 (define_insn_reservation "cortex_a53_store3plus" 2
179 (and (eq_attr "tune" "cortexa53")
180 (eq_attr "type" "store_12,store_16"))
181 "cortex_a53_slot_any+cortex_a53_ls_agen,
182 cortex_a53_store+cortex_a53_slot0,
185 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
187 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
189 ;; Model all branches as dual-issuable from either execution, which
190 ;; is not strictly true for all cases (indirect branches).
192 (define_insn_reservation "cortex_a53_branch" 0
193 (and (eq_attr "tune" "cortexa53")
194 (eq_attr "type" "branch,call"))
195 "cortex_a53_slot_any+cortex_a53_branch")
197 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
198 ;; General-purpose register bypasses
199 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
201 ;; Model bypasses for ALU to ALU instructions.
203 (define_bypass 0 "cortex_a53_shift*"
206 (define_bypass 1 "cortex_a53_shift*"
207 "cortex_a53_shift*,cortex_a53_alu_*")
209 (define_bypass 1 "cortex_a53_alu*"
212 (define_bypass 1 "cortex_a53_alu*"
213 "cortex_a53_alu_shift*"
214 "arm_no_early_alu_shift_dep")
216 (define_bypass 2 "cortex_a53_alu*"
217 "cortex_a53_alu_*,cortex_a53_shift*")
219 ;; Model a bypass from MUL/MLA to MLA instructions.
221 (define_bypass 1 "cortex_a53_mul"
223 "aarch_accumulator_forwarding")
225 ;; Model a bypass from MUL/MLA to ALU instructions.
227 (define_bypass 2 "cortex_a53_mul"
230 (define_bypass 3 "cortex_a53_mul"
231 "cortex_a53_alu_*,cortex_a53_shift*")
233 ;; Model bypasses for loads which are to be consumed by the ALU.
235 (define_bypass 2 "cortex_a53_load1"
238 (define_bypass 3 "cortex_a53_load1"
239 "cortex_a53_alu_*,cortex_a53_shift*")
241 (define_bypass 3 "cortex_a53_load2"
244 ;; Model a bypass for ALU instructions feeding stores.
246 (define_bypass 0 "cortex_a53_alu*,cortex_a53_shift*"
248 "arm_no_early_store_addr_dep")
250 ;; Model a bypass for load and multiply instructions feeding stores.
252 (define_bypass 1 "cortex_a53_mul,
255 "arm_no_early_store_addr_dep")
257 ;; Model a bypass for load to load/store address.
259 (define_bypass 3 "cortex_a53_load1"
261 "arm_early_load_addr_dep_ptr")
263 (define_bypass 3 "cortex_a53_load1"
265 "arm_early_store_addr_dep_ptr")
267 ;; Model a GP->FP register move as similar to stores.
269 (define_bypass 0 "cortex_a53_alu*,cortex_a53_shift*"
272 (define_bypass 1 "cortex_a53_mul,
277 (define_bypass 2 "cortex_a53_alu*"
278 "cortex_a53_r2f_cvt")
280 (define_bypass 3 "cortex_a53_mul,
283 "cortex_a53_r2f_cvt")
285 ;; Model flag forwarding to branches.
287 (define_bypass 0 "cortex_a53_alu*,cortex_a53_shift*"
290 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
291 ;; Floating-point/Advanced SIMD.
292 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
294 (define_automaton "cortex_a53_advsimd")
296 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
297 ;; Broad Advanced SIMD type categorisation
298 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
300 (define_attr "cortex_a53_advsimd_type"
301 "advsimd_alu, advsimd_alu_q,
302 advsimd_mul, advsimd_mul_q,
303 advsimd_div_s, advsimd_div_s_q,
304 advsimd_div_d, advsimd_div_d_q,
305 advsimd_load_64, advsimd_store_64,
306 advsimd_load_128, advsimd_store_128,
307 advsimd_load_lots, advsimd_store_lots,
310 (eq_attr "type" "neon_add, neon_qadd, neon_add_halve, neon_sub, neon_qsub,\
311 neon_sub_halve, neon_abs, neon_neg, neon_qneg,\
312 neon_qabs, neon_abd, neon_minmax, neon_compare,\
313 neon_compare_zero, neon_arith_acc, neon_reduc_add,\
314 neon_reduc_add_acc, neon_reduc_minmax,\
315 neon_logic, neon_tst, neon_shift_imm,\
316 neon_shift_reg, neon_shift_acc, neon_sat_shift_imm,\
317 neon_sat_shift_reg, neon_ins, neon_move,\
318 neon_permute, neon_zip, neon_tbl1,\
319 neon_tbl2, neon_tbl3, neon_tbl4, neon_bsl,\
320 neon_cls, neon_cnt, neon_dup,\
321 neon_ext, neon_rbit, neon_rev,\
322 neon_fp_abd_s, neon_fp_abd_d,\
323 neon_fp_abs_s, neon_fp_abs_d,\
324 neon_fp_addsub_s, neon_fp_addsub_d, neon_fp_compare_s,\
325 neon_fp_compare_d, neon_fp_minmax_s,\
326 neon_fp_minmax_d, neon_fp_neg_s, neon_fp_neg_d,\
327 neon_fp_reduc_add_s, neon_fp_reduc_add_d,\
328 neon_fp_reduc_minmax_s, neon_fp_reduc_minmax_d,\
329 neon_fp_cvt_widen_h, neon_fp_to_int_s,neon_fp_to_int_d,\
330 neon_int_to_fp_s, neon_int_to_fp_d, neon_fp_round_s,\
331 neon_fp_recpe_s, neon_fp_recpe_d, neon_fp_recps_s,\
332 neon_fp_recps_d, neon_fp_recpx_s, neon_fp_recpx_d,\
333 neon_fp_rsqrte_s, neon_fp_rsqrte_d, neon_fp_rsqrts_s,\
335 (const_string "advsimd_alu")
336 (eq_attr "type" "neon_add_q, neon_add_widen, neon_add_long,\
337 neon_qadd_q, neon_add_halve_q, neon_add_halve_narrow_q,\
338 neon_sub_q, neon_sub_widen, neon_sub_long,\
339 neon_qsub_q, neon_sub_halve_q, neon_sub_halve_narrow_q,\
340 neon_abs_q, neon_neg_q, neon_qneg_q, neon_qabs_q,\
341 neon_abd_q, neon_abd_long, neon_minmax_q,\
342 neon_compare_q, neon_compare_zero_q,\
343 neon_arith_acc_q, neon_reduc_add_q,\
344 neon_reduc_add_long, neon_reduc_add_acc_q,\
345 neon_reduc_minmax_q, neon_logic_q, neon_tst_q,\
346 neon_shift_imm_q, neon_shift_imm_narrow_q,\
347 neon_shift_imm_long, neon_shift_reg_q,\
348 neon_shift_acc_q, neon_sat_shift_imm_q,\
349 neon_sat_shift_imm_narrow_q, neon_sat_shift_reg_q,\
350 neon_ins_q, neon_move_q, neon_move_narrow_q,\
351 neon_permute_q, neon_zip_q,\
352 neon_tbl1_q, neon_tbl2_q, neon_tbl3_q,\
353 neon_tbl4_q, neon_bsl_q, neon_cls_q, neon_cnt_q,\
354 neon_dup_q, neon_ext_q, neon_rbit_q,\
355 neon_rev_q, neon_fp_abd_s_q, neon_fp_abd_d_q,\
356 neon_fp_abs_s_q, neon_fp_abs_d_q,\
357 neon_fp_addsub_s_q, neon_fp_addsub_d_q,\
358 neon_fp_compare_s_q, neon_fp_compare_d_q,\
359 neon_fp_minmax_s_q, neon_fp_minmax_d_q,\
360 neon_fp_cvt_widen_s, neon_fp_neg_s_q, neon_fp_neg_d_q,\
361 neon_fp_reduc_add_s_q, neon_fp_reduc_add_d_q,\
362 neon_fp_reduc_minmax_s_q, neon_fp_reduc_minmax_d_q,\
363 neon_fp_cvt_narrow_s_q, neon_fp_cvt_narrow_d_q,\
364 neon_fp_to_int_s_q, neon_fp_to_int_d_q,\
365 neon_int_to_fp_s_q, neon_int_to_fp_d_q,\
367 neon_fp_recpe_s_q, neon_fp_recpe_d_q,\
368 neon_fp_recps_s_q, neon_fp_recps_d_q,\
369 neon_fp_recpx_s_q, neon_fp_recpx_d_q,\
370 neon_fp_rsqrte_s_q, neon_fp_rsqrte_d_q,\
371 neon_fp_rsqrts_s_q, neon_fp_rsqrts_d_q")
372 (const_string "advsimd_alu_q")
373 (eq_attr "type" "neon_mul_b, neon_mul_h, neon_mul_s,\
374 neon_mul_h_scalar, neon_mul_s_scalar,\
375 neon_sat_mul_b, neon_sat_mul_h, neon_sat_mul_s,\
376 neon_sat_mul_h_scalar, neon_sat_mul_s_scalar,\
377 neon_mla_b, neon_mla_h, neon_mla_s,\
378 neon_mla_h_scalar, neon_mla_s_scalar,\
379 neon_fp_mul_s, neon_fp_mul_s_scalar,\
380 neon_fp_mul_d, neon_fp_mla_s,\
381 neon_fp_mla_s_scalar, neon_fp_mla_d")
382 (const_string "advsimd_mul")
383 (eq_attr "type" "neon_mul_b_q, neon_mul_h_q, neon_mul_s_q,\
384 neon_mul_b_long, neon_mul_h_long, neon_mul_s_long,\
385 neon_mul_d_long, neon_mul_h_scalar_q,\
386 neon_mul_s_scalar_q, neon_mul_h_scalar_long,\
387 neon_mul_s_scalar_long, neon_sat_mul_b_q,\
388 neon_sat_mul_h_q, neon_sat_mul_s_q,\
389 neon_sat_mul_b_long, neon_sat_mul_h_long,\
390 neon_sat_mul_s_long, neon_sat_mul_h_scalar_q,\
391 neon_sat_mul_s_scalar_q, neon_sat_mul_h_scalar_long,\
392 neon_sat_mul_s_scalar_long, crypto_pmull, neon_mla_b_q,\
393 neon_mla_h_q, neon_mla_s_q, neon_mla_b_long,\
394 neon_mla_h_long, neon_mla_s_long,\
395 neon_mla_h_scalar_q, neon_mla_s_scalar_q,\
396 neon_mla_h_scalar_long, neon_mla_s_scalar_long,\
397 neon_sat_mla_b_long, neon_sat_mla_h_long,\
398 neon_sat_mla_s_long, neon_sat_mla_h_scalar_long,\
399 neon_sat_mla_s_scalar_long,\
400 neon_fp_mul_s_q, neon_fp_mul_s_scalar_q,\
401 neon_fp_mul_d_q, neon_fp_mul_d_scalar_q,\
402 neon_fp_mla_s_q, neon_fp_mla_s_scalar_q,\
403 neon_fp_mla_d_q, neon_fp_mla_d_scalar_q")
404 (const_string "advsimd_mul_q")
405 (eq_attr "type" "neon_fp_sqrt_s, neon_fp_div_s")
406 (const_string "advsimd_div_s")
407 (eq_attr "type" "neon_fp_sqrt_s_q, neon_fp_div_s_q")
408 (const_string "advsimd_div_s_q")
409 (eq_attr "type" "neon_fp_sqrt_d, neon_fp_div_d")
410 (const_string "advsimd_div_d")
411 (eq_attr "type" "neon_fp_sqrt_d_q, neon_fp_div_d_q")
412 (const_string "advsimd_div_d_q")
413 (eq_attr "type" "neon_ldr, neon_load1_1reg,\
414 neon_load1_all_lanes, neon_load1_all_lanes_q,\
415 neon_load1_one_lane, neon_load1_one_lane_q")
416 (const_string "advsimd_load_64")
417 (eq_attr "type" "neon_str, neon_store1_1reg,\
418 neon_store1_one_lane,neon_store1_one_lane_q")
419 (const_string "advsimd_store_64")
420 (eq_attr "type" "neon_load1_1reg_q, neon_load1_2reg,\
422 neon_load2_all_lanes, neon_load2_all_lanes_q,\
423 neon_load2_one_lane, neon_load2_one_lane_q")
424 (const_string "advsimd_load_128")
425 (eq_attr "type" "neon_store1_1reg_q, neon_store1_2reg,\
427 neon_store2_one_lane, neon_store2_one_lane_q")
428 (const_string "advsimd_store_128")
429 (eq_attr "type" "neon_load1_2reg_q, neon_load1_3reg, neon_load1_3reg_q,\
430 neon_load1_4reg, neon_load1_4reg_q, \
431 neon_load2_2reg_q, neon_load2_4reg,\
432 neon_load2_4reg_q, neon_load3_3reg,\
433 neon_load3_3reg_q, neon_load3_all_lanes,\
434 neon_load3_all_lanes_q, neon_load3_one_lane,\
435 neon_load3_one_lane_q, neon_load4_4reg,\
436 neon_load4_4reg_q, neon_load4_all_lanes,\
437 neon_load4_all_lanes_q, neon_load4_one_lane,\
438 neon_load4_one_lane_q, neon_ldp, neon_ldp_q")
439 (const_string "advsimd_load_lots")
440 (eq_attr "type" "neon_store1_2reg_q, neon_store1_3reg,\
441 neon_store1_3reg_q, neon_store1_4reg,\
442 neon_store1_4reg_q, neon_store2_2reg_q,\
443 neon_store2_4reg, neon_store2_4reg_q,\
444 neon_store3_3reg, neon_store3_3reg_q,\
445 neon_store3_one_lane, neon_store3_one_lane_q,\
446 neon_store4_4reg, neon_store4_4reg_q,\
447 neon_store4_one_lane, neon_store4_one_lane_q,\
448 neon_stp, neon_stp_q")
449 (const_string "advsimd_store_lots")]
450 (const_string "unknown")))
452 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
453 ;; Floating-point/Advanced SIMD functional units.
454 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
456 ;; We model the Advanced SIMD unit as two 64-bit units, each with three
457 ;; pipes, FP_ALU, FP_MUL, FP_DIV. We also give convenient reservations
458 ;; for 128-bit Advanced SIMD instructions, which use both units.
460 ;; The floating-point/Advanced SIMD ALU pipelines.
462 (define_cpu_unit "cortex_a53_fp_alu_lo,\
463 cortex_a53_fp_alu_hi"
464 "cortex_a53_advsimd")
466 (define_reservation "cortex_a53_fp_alu"
467 "cortex_a53_fp_alu_lo\
468 |cortex_a53_fp_alu_hi")
470 (define_reservation "cortex_a53_fp_alu_q"
471 "cortex_a53_fp_alu_lo\
472 +cortex_a53_fp_alu_hi")
474 ;; The floating-point/Advanced SIMD multiply/multiply-accumulate
477 (define_cpu_unit "cortex_a53_fp_mul_lo,\
478 cortex_a53_fp_mul_hi"
479 "cortex_a53_advsimd")
481 (define_reservation "cortex_a53_fp_mul"
482 "cortex_a53_fp_mul_lo\
483 |cortex_a53_fp_mul_hi")
485 (define_reservation "cortex_a53_fp_mul_q"
486 "cortex_a53_fp_mul_lo\
487 +cortex_a53_fp_mul_hi")
489 ;; Floating-point/Advanced SIMD divide/square root.
491 (define_cpu_unit "cortex_a53_fp_div_lo,\
492 cortex_a53_fp_div_hi"
493 "cortex_a53_advsimd")
495 ;; Once we choose a pipe, stick with it for three simulated cycles.
497 (define_reservation "cortex_a53_fp_div"
498 "(cortex_a53_fp_div_lo*3)\
499 |(cortex_a53_fp_div_hi*3)")
501 (define_reservation "cortex_a53_fp_div_q"
502 "(cortex_a53_fp_div_lo*3)\
503 +(cortex_a53_fp_div_hi*3)")
505 ;; Cryptographic extensions
507 (define_cpu_unit "cortex_a53_crypto"
508 "cortex_a53_advsimd")
510 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
511 ;; Floating-point arithmetic.
512 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
514 (define_insn_reservation "cortex_a53_fpalu" 4
515 (and (eq_attr "tune" "cortexa53")
516 (eq_attr "type" "ffariths, fadds, ffarithd, faddd, fmov,
517 f_cvt, fcmps, fcmpd, fccmps, fccmpd, fcsel,
518 f_rints, f_rintd, f_minmaxs, f_minmaxd"))
519 "cortex_a53_slot_any,cortex_a53_fp_alu")
521 (define_insn_reservation "cortex_a53_fconst" 2
522 (and (eq_attr "tune" "cortexa53")
523 (eq_attr "type" "fconsts,fconstd"))
524 "cortex_a53_slot_any,cortex_a53_fp_alu")
526 (define_insn_reservation "cortex_a53_fpmul" 4
527 (and (eq_attr "tune" "cortexa53")
528 (eq_attr "type" "fmuls,fmuld"))
529 "cortex_a53_slot_any,cortex_a53_fp_mul")
531 ;; For multiply-accumulate, model the add (accumulate) as being issued
532 ;; after the multiply completes.
534 (define_insn_reservation "cortex_a53_fpmac" 8
535 (and (eq_attr "tune" "cortexa53")
536 (eq_attr "type" "fmacs,fmacd,ffmas,ffmad"))
537 "cortex_a53_slot_any,cortex_a53_fp_mul,
538 nothing*3, cortex_a53_fp_alu")
540 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
541 ;; Floating-point to/from core transfers.
542 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
544 (define_insn_reservation "cortex_a53_r2f" 2
545 (and (eq_attr "tune" "cortexa53")
546 (eq_attr "type" "f_mcr,f_mcrr"))
547 "cortex_a53_slot_any,cortex_a53_fp_alu")
549 (define_insn_reservation "cortex_a53_f2r" 4
550 (and (eq_attr "tune" "cortexa53")
551 (eq_attr "type" "f_mrc,f_mrrc"))
552 "cortex_a53_slot_any,cortex_a53_fp_alu")
554 (define_insn_reservation "cortex_a53_r2f_cvt" 4
555 (and (eq_attr "tune" "cortexa53")
556 (eq_attr "type" "f_cvti2f, neon_from_gp, neon_from_gp_q"))
557 "cortex_a53_slot_any,cortex_a53_fp_alu")
559 (define_insn_reservation "cortex_a53_f2r_cvt" 5
560 (and (eq_attr "tune" "cortexa53")
561 (eq_attr "type" "f_cvtf2i, neon_to_gp, neon_to_gp_q"))
562 "cortex_a53_slot_any,cortex_a53_fp_alu")
564 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
565 ;; Floating-point flag transfer.
566 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
568 (define_insn_reservation "cortex_a53_f_flags" 5
569 (and (eq_attr "tune" "cortexa53")
570 (eq_attr "type" "f_flag"))
571 "cortex_a53_slot_any")
573 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
574 ;; Floating-point load/store.
575 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
577 (define_insn_reservation "cortex_a53_f_load_64" 3
578 (and (eq_attr "tune" "cortexa53")
579 (ior (eq_attr "type" "f_loads,f_loadd")
580 (eq_attr "cortex_a53_advsimd_type"
582 "cortex_a53_slot_any+cortex_a53_ls_agen,
585 (define_insn_reservation "cortex_a53_f_load_many" 4
586 (and (eq_attr "tune" "cortexa53")
587 (eq_attr "cortex_a53_advsimd_type"
588 "advsimd_load_128,advsimd_load_lots"))
589 "cortex_a53_single_issue+cortex_a53_ls_agen,
590 cortex_a53_load+cortex_a53_slot0,
593 (define_insn_reservation "cortex_a53_f_store_64" 0
594 (and (eq_attr "tune" "cortexa53")
595 (ior (eq_attr "type" "f_stores,f_stored")
596 (eq_attr "cortex_a53_advsimd_type"
597 "advsimd_store_64")))
598 "cortex_a53_slot_any+cortex_a53_ls_agen,
601 (define_insn_reservation "cortex_a53_f_store_many" 0
602 (and (eq_attr "tune" "cortexa53")
603 (eq_attr "cortex_a53_advsimd_type"
604 "advsimd_store_128,advsimd_store_lots"))
605 "cortex_a53_slot_any+cortex_a53_ls_agen,
606 cortex_a53_store+cortex_a53_slot0,
609 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
611 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
613 ;; Either we want to model use of the ALU pipe, the multiply pipe or the
614 ;; divide/sqrt pipe. In all cases we need to check if we are a 64-bit
615 ;; operation (in which case we model dual-issue without penalty)
616 ;; or a 128-bit operation in which case we require in our model that we
617 ;; issue from slot 0.
619 (define_insn_reservation "cortex_a53_advsimd_alu" 4
620 (and (eq_attr "tune" "cortexa53")
621 (eq_attr "cortex_a53_advsimd_type" "advsimd_alu"))
622 "cortex_a53_slot_any,cortex_a53_fp_alu")
624 (define_insn_reservation "cortex_a53_advsimd_alu_q" 4
625 (and (eq_attr "tune" "cortexa53")
626 (eq_attr "cortex_a53_advsimd_type" "advsimd_alu_q"))
627 "cortex_a53_slot0,cortex_a53_fp_alu_q")
629 (define_insn_reservation "cortex_a53_advsimd_mul" 4
630 (and (eq_attr "tune" "cortexa53")
631 (eq_attr "cortex_a53_advsimd_type" "advsimd_mul"))
632 "cortex_a53_slot_any,cortex_a53_fp_mul")
634 (define_insn_reservation "cortex_a53_advsimd_mul_q" 4
635 (and (eq_attr "tune" "cortexa53")
636 (eq_attr "cortex_a53_advsimd_type" "advsimd_mul_q"))
637 "cortex_a53_slot0,cortex_a53_fp_mul_q")
641 (define_insn_reservation "cortex_a53_advsimd_div_s" 14
642 (and (eq_attr "tune" "cortexa53")
643 (ior (eq_attr "type" "fdivs,fsqrts")
644 (eq_attr "cortex_a53_advsimd_type" "advsimd_div_s")))
645 "cortex_a53_slot0,cortex_a53_fp_mul,
648 (define_insn_reservation "cortex_a53_advsimd_div_d" 29
649 (and (eq_attr "tune" "cortexa53")
650 (ior (eq_attr "type" "fdivd,fsqrtd")
651 (eq_attr "cortex_a53_advsimd_type" "advsimd_div_d")))
652 "cortex_a53_slot0,cortex_a53_fp_mul,
655 (define_insn_reservation "cortex_a53_advsimd_div_s_q" 14
656 (and (eq_attr "tune" "cortexa53")
657 (eq_attr "cortex_a53_advsimd_type" "advsimd_div_s_q"))
658 "cortex_a53_single_issue,cortex_a53_fp_mul_q,
659 cortex_a53_fp_div_q")
661 (define_insn_reservation "cortex_a53_advsimd_divd_q" 29
662 (and (eq_attr "tune" "cortexa53")
663 (eq_attr "cortex_a53_advsimd_type" "advsimd_div_d_q"))
664 "cortex_a53_single_issue,cortex_a53_fp_mul_q,
665 cortex_a53_fp_div_q")
667 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
668 ;; ARMv8-A Cryptographic extensions.
669 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
671 ;; We want AESE and AESMC to end up consecutive to one another.
673 (define_insn_reservation "cortex_a53_crypto_aese" 3
674 (and (eq_attr "tune" "cortexa53")
675 (eq_attr "type" "crypto_aese"))
678 (define_insn_reservation "cortex_a53_crypto_aesmc" 3
679 (and (eq_attr "tune" "cortexa53")
680 (eq_attr "type" "crypto_aesmc"))
681 "cortex_a53_slot_any")
685 (define_insn_reservation "cortex_a53_crypto_sha1_fast" 3
686 (and (eq_attr "tune" "cortexa53")
687 (eq_attr "type" "crypto_sha1_fast"))
688 "cortex_a53_slot_any,cortex_a53_crypto")
690 (define_insn_reservation "cortex_a53_crypto_sha256_fast" 3
691 (and (eq_attr "tune" "cortexa53")
692 (eq_attr "type" "crypto_sha256_fast"))
693 "cortex_a53_slot0,cortex_a53_crypto")
695 (define_insn_reservation "cortex_a53_crypto_sha1_xor" 4
696 (and (eq_attr "tune" "cortexa53")
697 (eq_attr "type" "crypto_sha1_xor"))
698 "cortex_a53_slot0,cortex_a53_crypto")
700 (define_insn_reservation "cortex_a53_crypto_sha_slow" 5
701 (and (eq_attr "tune" "cortexa53")
702 (eq_attr "type" "crypto_sha1_slow, crypto_sha256_slow"))
703 "cortex_a53_slot0,cortex_a53_crypto")
705 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
706 ;; Floating-point/Advanced SIMD register bypasses.
707 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
709 ;; Model the late use of the accumulator operand for floating-point
710 ;; multiply-accumulate operations as a bypass reducing the latency
711 ;; of producing instructions to near zero.
713 (define_bypass 1 "cortex_a53_fpalu,
720 "aarch_accumulator_forwarding")
722 (define_bypass 4 "cortex_a53_fpmac"
724 "aarch_accumulator_forwarding")
726 ;; We want AESE and AESMC to end up consecutive to one another.
728 (define_bypass 0 "cortex_a53_crypto_aese"
729 "cortex_a53_crypto_aesmc"
730 "aarch_crypto_can_dual_issue")