2 * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "tcg/tcg-op.h"
22 #include "tcg/tcg-op-gvec.h"
23 #include "exec/cpu_ldst.h"
29 #include "translate.h"
30 #include "printinsn.h"
32 TCGv hex_gpr
[TOTAL_PER_THREAD_REGS
];
33 TCGv hex_pred
[NUM_PREGS
];
36 TCGv hex_slot_cancelled
;
37 TCGv hex_branch_taken
;
38 TCGv hex_new_value
[TOTAL_PER_THREAD_REGS
];
39 TCGv hex_reg_written
[TOTAL_PER_THREAD_REGS
];
40 TCGv hex_new_pred_value
[NUM_PREGS
];
41 TCGv hex_pred_written
;
42 TCGv hex_store_addr
[STORES_MAX
];
43 TCGv hex_store_width
[STORES_MAX
];
44 TCGv hex_store_val32
[STORES_MAX
];
45 TCGv_i64 hex_store_val64
[STORES_MAX
];
46 TCGv hex_pkt_has_store_s1
;
50 TCGv_i64 hex_llsc_val_i64
;
51 TCGv hex_VRegs_updated
;
52 TCGv hex_QRegs_updated
;
53 TCGv hex_vstore_addr
[VSTORES_MAX
];
54 TCGv hex_vstore_size
[VSTORES_MAX
];
55 TCGv hex_vstore_pending
[VSTORES_MAX
];
57 static const char * const hexagon_prednames
[] = {
58 "p0", "p1", "p2", "p3"
61 intptr_t ctx_future_vreg_off(DisasContext
*ctx
, int regnum
,
62 int num
, bool alloc_ok
)
66 /* See if it is already allocated */
67 for (int i
= 0; i
< ctx
->future_vregs_idx
; i
++) {
68 if (ctx
->future_vregs_num
[i
] == regnum
) {
69 return offsetof(CPUHexagonState
, future_VRegs
[i
]);
74 offset
= offsetof(CPUHexagonState
, future_VRegs
[ctx
->future_vregs_idx
]);
75 for (int i
= 0; i
< num
; i
++) {
76 ctx
->future_vregs_num
[ctx
->future_vregs_idx
+ i
] = regnum
++;
78 ctx
->future_vregs_idx
+= num
;
79 g_assert(ctx
->future_vregs_idx
<= VECTOR_TEMPS_MAX
);
83 intptr_t ctx_tmp_vreg_off(DisasContext
*ctx
, int regnum
,
84 int num
, bool alloc_ok
)
88 /* See if it is already allocated */
89 for (int i
= 0; i
< ctx
->tmp_vregs_idx
; i
++) {
90 if (ctx
->tmp_vregs_num
[i
] == regnum
) {
91 return offsetof(CPUHexagonState
, tmp_VRegs
[i
]);
96 offset
= offsetof(CPUHexagonState
, tmp_VRegs
[ctx
->tmp_vregs_idx
]);
97 for (int i
= 0; i
< num
; i
++) {
98 ctx
->tmp_vregs_num
[ctx
->tmp_vregs_idx
+ i
] = regnum
++;
100 ctx
->tmp_vregs_idx
+= num
;
101 g_assert(ctx
->tmp_vregs_idx
<= VECTOR_TEMPS_MAX
);
105 static void gen_exception_raw(int excp
)
107 gen_helper_raise_exception(cpu_env
, tcg_constant_i32(excp
));
110 static void gen_exec_counters(DisasContext
*ctx
)
112 tcg_gen_addi_tl(hex_gpr
[HEX_REG_QEMU_PKT_CNT
],
113 hex_gpr
[HEX_REG_QEMU_PKT_CNT
], ctx
->num_packets
);
114 tcg_gen_addi_tl(hex_gpr
[HEX_REG_QEMU_INSN_CNT
],
115 hex_gpr
[HEX_REG_QEMU_INSN_CNT
], ctx
->num_insns
);
116 tcg_gen_addi_tl(hex_gpr
[HEX_REG_QEMU_HVX_CNT
],
117 hex_gpr
[HEX_REG_QEMU_HVX_CNT
], ctx
->num_hvx_insns
);
120 static void gen_end_tb(DisasContext
*ctx
)
122 gen_exec_counters(ctx
);
123 tcg_gen_mov_tl(hex_gpr
[HEX_REG_PC
], hex_next_PC
);
124 tcg_gen_exit_tb(NULL
, 0);
125 ctx
->base
.is_jmp
= DISAS_NORETURN
;
128 static void gen_exception_end_tb(DisasContext
*ctx
, int excp
)
130 gen_exec_counters(ctx
);
131 tcg_gen_mov_tl(hex_gpr
[HEX_REG_PC
], hex_next_PC
);
132 gen_exception_raw(excp
);
133 ctx
->base
.is_jmp
= DISAS_NORETURN
;
137 #define PACKET_BUFFER_LEN 1028
138 static void print_pkt(Packet
*pkt
)
140 GString
*buf
= g_string_sized_new(PACKET_BUFFER_LEN
);
141 snprint_a_pkt_debug(buf
, pkt
);
142 HEX_DEBUG_LOG("%s", buf
->str
);
143 g_string_free(buf
, true);
145 #define HEX_DEBUG_PRINT_PKT(pkt) \
152 static int read_packet_words(CPUHexagonState
*env
, DisasContext
*ctx
,
155 bool found_end
= false;
156 int nwords
, max_words
;
158 memset(words
, 0, PACKET_WORDS_MAX
* sizeof(uint32_t));
159 for (nwords
= 0; !found_end
&& nwords
< PACKET_WORDS_MAX
; nwords
++) {
161 translator_ldl(env
, &ctx
->base
,
162 ctx
->base
.pc_next
+ nwords
* sizeof(uint32_t));
163 found_end
= is_packet_end(words
[nwords
]);
166 /* Read too many words without finding the end */
170 /* Check for page boundary crossing */
171 max_words
= -(ctx
->base
.pc_next
| TARGET_PAGE_MASK
) / sizeof(uint32_t);
172 if (nwords
> max_words
) {
173 /* We can only cross a page boundary at the beginning of a TB */
174 g_assert(ctx
->base
.num_insns
== 1);
177 HEX_DEBUG_LOG("decode_packet: pc = 0x%x\n", ctx
->base
.pc_next
);
178 HEX_DEBUG_LOG(" words = { ");
179 for (int i
= 0; i
< nwords
; i
++) {
180 HEX_DEBUG_LOG("0x%x, ", words
[i
]);
182 HEX_DEBUG_LOG("}\n");
187 static bool check_for_attrib(Packet
*pkt
, int attrib
)
189 for (int i
= 0; i
< pkt
->num_insns
; i
++) {
190 if (GET_ATTRIB(pkt
->insn
[i
].opcode
, attrib
)) {
197 static bool need_pc(Packet
*pkt
)
199 return check_for_attrib(pkt
, A_IMPLICIT_READS_PC
);
202 static bool need_slot_cancelled(Packet
*pkt
)
204 return check_for_attrib(pkt
, A_CONDEXEC
);
207 static bool need_pred_written(Packet
*pkt
)
209 return check_for_attrib(pkt
, A_WRITES_PRED_REG
);
212 static void gen_start_packet(DisasContext
*ctx
, Packet
*pkt
)
214 target_ulong next_PC
= ctx
->base
.pc_next
+ pkt
->encod_pkt_size_in_bytes
;
217 /* Clear out the disassembly context */
218 ctx
->reg_log_idx
= 0;
219 bitmap_zero(ctx
->regs_written
, TOTAL_PER_THREAD_REGS
);
220 ctx
->preg_log_idx
= 0;
221 bitmap_zero(ctx
->pregs_written
, NUM_PREGS
);
222 ctx
->future_vregs_idx
= 0;
223 ctx
->tmp_vregs_idx
= 0;
224 ctx
->vreg_log_idx
= 0;
225 bitmap_zero(ctx
->vregs_updated_tmp
, NUM_VREGS
);
226 bitmap_zero(ctx
->vregs_updated
, NUM_VREGS
);
227 bitmap_zero(ctx
->vregs_select
, NUM_VREGS
);
228 ctx
->qreg_log_idx
= 0;
229 for (i
= 0; i
< STORES_MAX
; i
++) {
230 ctx
->store_width
[i
] = 0;
232 tcg_gen_movi_tl(hex_pkt_has_store_s1
, pkt
->pkt_has_store_s1
);
233 ctx
->s1_store_processed
= false;
234 ctx
->pre_commit
= true;
237 /* Handy place to set a breakpoint before the packet executes */
238 gen_helper_debug_start_packet(cpu_env
);
239 tcg_gen_movi_tl(hex_this_PC
, ctx
->base
.pc_next
);
242 /* Initialize the runtime state for packet semantics */
244 tcg_gen_movi_tl(hex_gpr
[HEX_REG_PC
], ctx
->base
.pc_next
);
246 if (need_slot_cancelled(pkt
)) {
247 tcg_gen_movi_tl(hex_slot_cancelled
, 0);
249 if (pkt
->pkt_has_cof
) {
250 tcg_gen_movi_tl(hex_branch_taken
, 0);
251 tcg_gen_movi_tl(hex_next_PC
, next_PC
);
253 if (need_pred_written(pkt
)) {
254 tcg_gen_movi_tl(hex_pred_written
, 0);
257 if (pkt
->pkt_has_hvx
) {
258 tcg_gen_movi_tl(hex_VRegs_updated
, 0);
259 tcg_gen_movi_tl(hex_QRegs_updated
, 0);
263 bool is_gather_store_insn(Insn
*insn
, Packet
*pkt
)
265 if (GET_ATTRIB(insn
->opcode
, A_CVI_NEW
) &&
266 insn
->new_value_producer_slot
== 1) {
267 /* Look for gather instruction */
268 for (int i
= 0; i
< pkt
->num_insns
; i
++) {
269 Insn
*in
= &pkt
->insn
[i
];
270 if (GET_ATTRIB(in
->opcode
, A_CVI_GATHER
) && in
->slot
== 1) {
279 * The LOG_*_WRITE macros mark most of the writes in a packet
280 * However, there are some implicit writes marked as attributes
281 * of the applicable instructions.
283 static void mark_implicit_reg_write(DisasContext
*ctx
, Insn
*insn
,
284 int attrib
, int rnum
)
286 if (GET_ATTRIB(insn
->opcode
, attrib
)) {
288 * USR is used to set overflow and FP exceptions,
289 * so treat it as conditional
291 bool is_predicated
= GET_ATTRIB(insn
->opcode
, A_CONDEXEC
) ||
293 if (is_predicated
&& !is_preloaded(ctx
, rnum
)) {
294 tcg_gen_mov_tl(hex_new_value
[rnum
], hex_gpr
[rnum
]);
297 ctx_log_reg_write(ctx
, rnum
);
301 static void mark_implicit_pred_write(DisasContext
*ctx
, Insn
*insn
,
302 int attrib
, int pnum
)
304 if (GET_ATTRIB(insn
->opcode
, attrib
)) {
305 ctx_log_pred_write(ctx
, pnum
);
309 static void mark_implicit_reg_writes(DisasContext
*ctx
, Insn
*insn
)
311 mark_implicit_reg_write(ctx
, insn
, A_IMPLICIT_WRITES_FP
, HEX_REG_FP
);
312 mark_implicit_reg_write(ctx
, insn
, A_IMPLICIT_WRITES_SP
, HEX_REG_SP
);
313 mark_implicit_reg_write(ctx
, insn
, A_IMPLICIT_WRITES_LR
, HEX_REG_LR
);
314 mark_implicit_reg_write(ctx
, insn
, A_IMPLICIT_WRITES_LC0
, HEX_REG_LC0
);
315 mark_implicit_reg_write(ctx
, insn
, A_IMPLICIT_WRITES_SA0
, HEX_REG_SA0
);
316 mark_implicit_reg_write(ctx
, insn
, A_IMPLICIT_WRITES_LC1
, HEX_REG_LC1
);
317 mark_implicit_reg_write(ctx
, insn
, A_IMPLICIT_WRITES_SA1
, HEX_REG_SA1
);
318 mark_implicit_reg_write(ctx
, insn
, A_IMPLICIT_WRITES_USR
, HEX_REG_USR
);
319 mark_implicit_reg_write(ctx
, insn
, A_FPOP
, HEX_REG_USR
);
322 static void mark_implicit_pred_writes(DisasContext
*ctx
, Insn
*insn
)
324 mark_implicit_pred_write(ctx
, insn
, A_IMPLICIT_WRITES_P0
, 0);
325 mark_implicit_pred_write(ctx
, insn
, A_IMPLICIT_WRITES_P1
, 1);
326 mark_implicit_pred_write(ctx
, insn
, A_IMPLICIT_WRITES_P2
, 2);
327 mark_implicit_pred_write(ctx
, insn
, A_IMPLICIT_WRITES_P3
, 3);
330 static void gen_insn(CPUHexagonState
*env
, DisasContext
*ctx
,
331 Insn
*insn
, Packet
*pkt
)
333 if (insn
->generate
) {
334 mark_implicit_reg_writes(ctx
, insn
);
335 insn
->generate(env
, ctx
, insn
, pkt
);
336 mark_implicit_pred_writes(ctx
, insn
);
338 gen_exception_end_tb(ctx
, HEX_EXCP_INVALID_OPCODE
);
343 * Helpers for generating the packet commit
345 static void gen_reg_writes(DisasContext
*ctx
)
349 for (i
= 0; i
< ctx
->reg_log_idx
; i
++) {
350 int reg_num
= ctx
->reg_log
[i
];
352 tcg_gen_mov_tl(hex_gpr
[reg_num
], hex_new_value
[reg_num
]);
356 static void gen_pred_writes(DisasContext
*ctx
, Packet
*pkt
)
360 /* Early exit if the log is empty */
361 if (!ctx
->preg_log_idx
) {
366 * Only endloop instructions will conditionally
367 * write a predicate. If there are no endloop
368 * instructions, we can use the non-conditional
369 * write of the predicates.
371 if (pkt
->pkt_has_endloop
) {
372 TCGv zero
= tcg_constant_tl(0);
373 TCGv pred_written
= tcg_temp_new();
374 for (i
= 0; i
< ctx
->preg_log_idx
; i
++) {
375 int pred_num
= ctx
->preg_log
[i
];
377 tcg_gen_andi_tl(pred_written
, hex_pred_written
, 1 << pred_num
);
378 tcg_gen_movcond_tl(TCG_COND_NE
, hex_pred
[pred_num
],
380 hex_new_pred_value
[pred_num
],
383 tcg_temp_free(pred_written
);
385 for (i
= 0; i
< ctx
->preg_log_idx
; i
++) {
386 int pred_num
= ctx
->preg_log
[i
];
387 tcg_gen_mov_tl(hex_pred
[pred_num
], hex_new_pred_value
[pred_num
]);
389 /* Do this so HELPER(debug_commit_end) will know */
390 tcg_gen_ori_tl(hex_pred_written
, hex_pred_written
,
397 static void gen_check_store_width(DisasContext
*ctx
, int slot_num
)
400 TCGv slot
= tcg_constant_tl(slot_num
);
401 TCGv check
= tcg_constant_tl(ctx
->store_width
[slot_num
]);
402 gen_helper_debug_check_store_width(cpu_env
, slot
, check
);
406 static bool slot_is_predicated(Packet
*pkt
, int slot_num
)
408 for (int i
= 0; i
< pkt
->num_insns
; i
++) {
409 if (pkt
->insn
[i
].slot
== slot_num
) {
410 return GET_ATTRIB(pkt
->insn
[i
].opcode
, A_CONDEXEC
);
413 /* If we get to here, we didn't find an instruction in the requested slot */
414 g_assert_not_reached();
417 void process_store(DisasContext
*ctx
, Packet
*pkt
, int slot_num
)
419 bool is_predicated
= slot_is_predicated(pkt
, slot_num
);
420 TCGLabel
*label_end
= NULL
;
423 * We may have already processed this store
424 * See CHECK_NOSHUF in macros.h
426 if (slot_num
== 1 && ctx
->s1_store_processed
) {
429 ctx
->s1_store_processed
= true;
432 TCGv cancelled
= tcg_temp_new();
433 label_end
= gen_new_label();
435 /* Don't do anything if the slot was cancelled */
436 tcg_gen_extract_tl(cancelled
, hex_slot_cancelled
, slot_num
, 1);
437 tcg_gen_brcondi_tl(TCG_COND_NE
, cancelled
, 0, label_end
);
438 tcg_temp_free(cancelled
);
441 TCGv address
= tcg_temp_local_new();
442 tcg_gen_mov_tl(address
, hex_store_addr
[slot_num
]);
445 * If we know the width from the DisasContext, we can
446 * generate much cleaner code.
447 * Unfortunately, not all instructions execute the fSTORE
448 * macro during code generation. Anything that uses the
449 * generic helper will have this problem. Instructions
450 * that use fWRAP to generate proper TCG code will be OK.
452 switch (ctx
->store_width
[slot_num
]) {
454 gen_check_store_width(ctx
, slot_num
);
455 tcg_gen_qemu_st8(hex_store_val32
[slot_num
],
456 hex_store_addr
[slot_num
],
460 gen_check_store_width(ctx
, slot_num
);
461 tcg_gen_qemu_st16(hex_store_val32
[slot_num
],
462 hex_store_addr
[slot_num
],
466 gen_check_store_width(ctx
, slot_num
);
467 tcg_gen_qemu_st32(hex_store_val32
[slot_num
],
468 hex_store_addr
[slot_num
],
472 gen_check_store_width(ctx
, slot_num
);
473 tcg_gen_qemu_st64(hex_store_val64
[slot_num
],
474 hex_store_addr
[slot_num
],
480 * If we get to here, we don't know the width at
481 * TCG generation time, we'll use a helper to
482 * avoid branching based on the width at runtime.
484 TCGv slot
= tcg_constant_tl(slot_num
);
485 gen_helper_commit_store(cpu_env
, slot
);
488 tcg_temp_free(address
);
491 gen_set_label(label_end
);
495 static void process_store_log(DisasContext
*ctx
, Packet
*pkt
)
498 * When a packet has two stores, the hardware processes
499 * slot 1 and then slot 0. This will be important when
500 * the memory accesses overlap.
502 if (pkt
->pkt_has_store_s1
&& !pkt
->pkt_has_dczeroa
) {
503 process_store(ctx
, pkt
, 1);
505 if (pkt
->pkt_has_store_s0
&& !pkt
->pkt_has_dczeroa
) {
506 process_store(ctx
, pkt
, 0);
510 /* Zero out a 32-bit cache line */
511 static void process_dczeroa(DisasContext
*ctx
, Packet
*pkt
)
513 if (pkt
->pkt_has_dczeroa
) {
514 /* Store 32 bytes of zero starting at (addr & ~0x1f) */
515 TCGv addr
= tcg_temp_new();
516 TCGv_i64 zero
= tcg_constant_i64(0);
518 tcg_gen_andi_tl(addr
, hex_dczero_addr
, ~0x1f);
519 tcg_gen_qemu_st64(zero
, addr
, ctx
->mem_idx
);
520 tcg_gen_addi_tl(addr
, addr
, 8);
521 tcg_gen_qemu_st64(zero
, addr
, ctx
->mem_idx
);
522 tcg_gen_addi_tl(addr
, addr
, 8);
523 tcg_gen_qemu_st64(zero
, addr
, ctx
->mem_idx
);
524 tcg_gen_addi_tl(addr
, addr
, 8);
525 tcg_gen_qemu_st64(zero
, addr
, ctx
->mem_idx
);
531 static bool pkt_has_hvx_store(Packet
*pkt
)
534 for (i
= 0; i
< pkt
->num_insns
; i
++) {
535 int opcode
= pkt
->insn
[i
].opcode
;
536 if (GET_ATTRIB(opcode
, A_CVI
) && GET_ATTRIB(opcode
, A_STORE
)) {
543 static void gen_commit_hvx(DisasContext
*ctx
, Packet
*pkt
)
548 * for (i = 0; i < ctx->vreg_log_idx; i++) {
549 * int rnum = ctx->vreg_log[i];
550 * if (ctx->vreg_is_predicated[i]) {
551 * if (env->VRegs_updated & (1 << rnum)) {
552 * env->VRegs[rnum] = env->future_VRegs[rnum];
555 * env->VRegs[rnum] = env->future_VRegs[rnum];
559 for (i
= 0; i
< ctx
->vreg_log_idx
; i
++) {
560 int rnum
= ctx
->vreg_log
[i
];
561 bool is_predicated
= ctx
->vreg_is_predicated
[i
];
562 intptr_t dstoff
= offsetof(CPUHexagonState
, VRegs
[rnum
]);
563 intptr_t srcoff
= ctx_future_vreg_off(ctx
, rnum
, 1, false);
564 size_t size
= sizeof(MMVector
);
567 TCGv cmp
= tcg_temp_new();
568 TCGLabel
*label_skip
= gen_new_label();
570 tcg_gen_andi_tl(cmp
, hex_VRegs_updated
, 1 << rnum
);
571 tcg_gen_brcondi_tl(TCG_COND_EQ
, cmp
, 0, label_skip
);
573 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
, size
, size
);
574 gen_set_label(label_skip
);
576 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
, size
, size
);
581 * for (i = 0; i < ctx->qreg_log_idx; i++) {
582 * int rnum = ctx->qreg_log[i];
583 * if (ctx->qreg_is_predicated[i]) {
584 * if (env->QRegs_updated) & (1 << rnum)) {
585 * env->QRegs[rnum] = env->future_QRegs[rnum];
588 * env->QRegs[rnum] = env->future_QRegs[rnum];
592 for (i
= 0; i
< ctx
->qreg_log_idx
; i
++) {
593 int rnum
= ctx
->qreg_log
[i
];
594 bool is_predicated
= ctx
->qreg_is_predicated
[i
];
595 intptr_t dstoff
= offsetof(CPUHexagonState
, QRegs
[rnum
]);
596 intptr_t srcoff
= offsetof(CPUHexagonState
, future_QRegs
[rnum
]);
597 size_t size
= sizeof(MMQReg
);
600 TCGv cmp
= tcg_temp_new();
601 TCGLabel
*label_skip
= gen_new_label();
603 tcg_gen_andi_tl(cmp
, hex_QRegs_updated
, 1 << rnum
);
604 tcg_gen_brcondi_tl(TCG_COND_EQ
, cmp
, 0, label_skip
);
606 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
, size
, size
);
607 gen_set_label(label_skip
);
609 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
, size
, size
);
613 if (pkt_has_hvx_store(pkt
)) {
614 gen_helper_commit_hvx_stores(cpu_env
);
618 static void update_exec_counters(DisasContext
*ctx
, Packet
*pkt
)
620 int num_insns
= pkt
->num_insns
;
621 int num_real_insns
= 0;
622 int num_hvx_insns
= 0;
624 for (int i
= 0; i
< num_insns
; i
++) {
625 if (!pkt
->insn
[i
].is_endloop
&&
626 !pkt
->insn
[i
].part1
&&
627 !GET_ATTRIB(pkt
->insn
[i
].opcode
, A_IT_NOP
)) {
630 if (GET_ATTRIB(pkt
->insn
[i
].opcode
, A_CVI
)) {
636 ctx
->num_insns
+= num_real_insns
;
637 ctx
->num_hvx_insns
+= num_hvx_insns
;
640 static void gen_commit_packet(CPUHexagonState
*env
, DisasContext
*ctx
,
644 * If there is more than one store in a packet, make sure they are all OK
645 * before proceeding with the rest of the packet commit.
647 * dczeroa has to be the only store operation in the packet, so we go
648 * ahead and process that first.
650 * When there is an HVX store, there can also be a scalar store in either
651 * slot 0 or slot1, so we create a mask for the helper to indicate what
654 * When there are two scalar stores, we probe the one in slot 0.
656 * Note that we don't call the probe helper for packets with only one
657 * store. Therefore, we call process_store_log before anything else
658 * involved in committing the packet.
660 bool has_store_s0
= pkt
->pkt_has_store_s0
;
661 bool has_store_s1
= (pkt
->pkt_has_store_s1
&& !ctx
->s1_store_processed
);
662 bool has_hvx_store
= pkt_has_hvx_store(pkt
);
663 if (pkt
->pkt_has_dczeroa
) {
665 * The dczeroa will be the store in slot 0, check that we don't have
666 * a store in slot 1 or an HVX store.
668 g_assert(has_store_s0
&& !has_store_s1
&& !has_hvx_store
);
669 process_dczeroa(ctx
, pkt
);
670 } else if (has_hvx_store
) {
671 TCGv mem_idx
= tcg_constant_tl(ctx
->mem_idx
);
673 if (!has_store_s0
&& !has_store_s1
) {
674 gen_helper_probe_hvx_stores(cpu_env
, mem_idx
);
688 mask_tcgv
= tcg_constant_tl(mask
);
689 gen_helper_probe_pkt_scalar_hvx_stores(cpu_env
, mask_tcgv
, mem_idx
);
691 } else if (has_store_s0
&& has_store_s1
) {
693 * process_store_log will execute the slot 1 store first,
694 * so we only have to probe the store in slot 0
696 TCGv mem_idx
= tcg_constant_tl(ctx
->mem_idx
);
697 gen_helper_probe_pkt_scalar_store_s0(cpu_env
, mem_idx
);
700 process_store_log(ctx
, pkt
);
703 gen_pred_writes(ctx
, pkt
);
704 if (pkt
->pkt_has_hvx
) {
705 gen_commit_hvx(ctx
, pkt
);
707 update_exec_counters(ctx
, pkt
);
710 tcg_constant_tl(pkt
->pkt_has_store_s0
&& !pkt
->pkt_has_dczeroa
);
712 tcg_constant_tl(pkt
->pkt_has_store_s1
&& !pkt
->pkt_has_dczeroa
);
714 /* Handy place to set a breakpoint at the end of execution */
715 gen_helper_debug_commit_end(cpu_env
, has_st0
, has_st1
);
718 if (pkt
->vhist_insn
!= NULL
) {
719 ctx
->pre_commit
= false;
720 pkt
->vhist_insn
->generate(env
, ctx
, pkt
->vhist_insn
, pkt
);
723 if (pkt
->pkt_has_cof
) {
728 static void decode_and_translate_packet(CPUHexagonState
*env
, DisasContext
*ctx
)
730 uint32_t words
[PACKET_WORDS_MAX
];
735 nwords
= read_packet_words(env
, ctx
, words
);
737 gen_exception_end_tb(ctx
, HEX_EXCP_INVALID_PACKET
);
741 if (decode_packet(nwords
, words
, &pkt
, false) > 0) {
742 HEX_DEBUG_PRINT_PKT(&pkt
);
743 gen_start_packet(ctx
, &pkt
);
744 for (i
= 0; i
< pkt
.num_insns
; i
++) {
745 gen_insn(env
, ctx
, &pkt
.insn
[i
], &pkt
);
747 gen_commit_packet(env
, ctx
, &pkt
);
748 ctx
->base
.pc_next
+= pkt
.encod_pkt_size_in_bytes
;
750 gen_exception_end_tb(ctx
, HEX_EXCP_INVALID_PACKET
);
754 static void hexagon_tr_init_disas_context(DisasContextBase
*dcbase
,
757 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
759 ctx
->mem_idx
= MMU_USER_IDX
;
760 ctx
->num_packets
= 0;
762 ctx
->num_hvx_insns
= 0;
765 static void hexagon_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
769 static void hexagon_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
771 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
773 tcg_gen_insn_start(ctx
->base
.pc_next
);
776 static bool pkt_crosses_page(CPUHexagonState
*env
, DisasContext
*ctx
)
778 target_ulong page_start
= ctx
->base
.pc_first
& TARGET_PAGE_MASK
;
779 bool found_end
= false;
782 for (nwords
= 0; !found_end
&& nwords
< PACKET_WORDS_MAX
; nwords
++) {
783 uint32_t word
= cpu_ldl_code(env
,
784 ctx
->base
.pc_next
+ nwords
* sizeof(uint32_t));
785 found_end
= is_packet_end(word
);
787 uint32_t next_ptr
= ctx
->base
.pc_next
+ nwords
* sizeof(uint32_t);
788 return found_end
&& next_ptr
- page_start
>= TARGET_PAGE_SIZE
;
791 static void hexagon_tr_translate_packet(DisasContextBase
*dcbase
, CPUState
*cpu
)
793 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
794 CPUHexagonState
*env
= cpu
->env_ptr
;
796 decode_and_translate_packet(env
, ctx
);
798 if (ctx
->base
.is_jmp
== DISAS_NEXT
) {
799 target_ulong page_start
= ctx
->base
.pc_first
& TARGET_PAGE_MASK
;
800 target_ulong bytes_max
= PACKET_WORDS_MAX
* sizeof(target_ulong
);
802 if (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
||
803 (ctx
->base
.pc_next
- page_start
>= TARGET_PAGE_SIZE
- bytes_max
&&
804 pkt_crosses_page(env
, ctx
))) {
805 ctx
->base
.is_jmp
= DISAS_TOO_MANY
;
809 * The CPU log is used to compare against LLDB single stepping,
810 * so end the TLB after every packet.
812 HexagonCPU
*hex_cpu
= env_archcpu(env
);
813 if (hex_cpu
->lldb_compat
&& qemu_loglevel_mask(CPU_LOG_TB_CPU
)) {
814 ctx
->base
.is_jmp
= DISAS_TOO_MANY
;
819 static void hexagon_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
821 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
823 switch (ctx
->base
.is_jmp
) {
825 gen_exec_counters(ctx
);
826 tcg_gen_movi_tl(hex_gpr
[HEX_REG_PC
], ctx
->base
.pc_next
);
827 tcg_gen_exit_tb(NULL
, 0);
832 g_assert_not_reached();
836 static void hexagon_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
838 qemu_log("IN: %s\n", lookup_symbol(dcbase
->pc_first
));
839 log_target_disas(cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
843 static const TranslatorOps hexagon_tr_ops
= {
844 .init_disas_context
= hexagon_tr_init_disas_context
,
845 .tb_start
= hexagon_tr_tb_start
,
846 .insn_start
= hexagon_tr_insn_start
,
847 .translate_insn
= hexagon_tr_translate_packet
,
848 .tb_stop
= hexagon_tr_tb_stop
,
849 .disas_log
= hexagon_tr_disas_log
,
852 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
)
856 translator_loop(&hexagon_tr_ops
, &ctx
.base
, cs
, tb
, max_insns
);
860 static char new_value_names
[TOTAL_PER_THREAD_REGS
][NAME_LEN
];
861 static char reg_written_names
[TOTAL_PER_THREAD_REGS
][NAME_LEN
];
862 static char new_pred_value_names
[NUM_PREGS
][NAME_LEN
];
863 static char store_addr_names
[STORES_MAX
][NAME_LEN
];
864 static char store_width_names
[STORES_MAX
][NAME_LEN
];
865 static char store_val32_names
[STORES_MAX
][NAME_LEN
];
866 static char store_val64_names
[STORES_MAX
][NAME_LEN
];
867 static char vstore_addr_names
[VSTORES_MAX
][NAME_LEN
];
868 static char vstore_size_names
[VSTORES_MAX
][NAME_LEN
];
869 static char vstore_pending_names
[VSTORES_MAX
][NAME_LEN
];
871 void hexagon_translate_init(void)
879 qemu_set_log(qemu_loglevel
);
883 for (i
= 0; i
< TOTAL_PER_THREAD_REGS
; i
++) {
884 hex_gpr
[i
] = tcg_global_mem_new(cpu_env
,
885 offsetof(CPUHexagonState
, gpr
[i
]),
886 hexagon_regnames
[i
]);
888 snprintf(new_value_names
[i
], NAME_LEN
, "new_%s", hexagon_regnames
[i
]);
889 hex_new_value
[i
] = tcg_global_mem_new(cpu_env
,
890 offsetof(CPUHexagonState
, new_value
[i
]),
894 snprintf(reg_written_names
[i
], NAME_LEN
, "reg_written_%s",
895 hexagon_regnames
[i
]);
896 hex_reg_written
[i
] = tcg_global_mem_new(cpu_env
,
897 offsetof(CPUHexagonState
, reg_written
[i
]),
898 reg_written_names
[i
]);
901 for (i
= 0; i
< NUM_PREGS
; i
++) {
902 hex_pred
[i
] = tcg_global_mem_new(cpu_env
,
903 offsetof(CPUHexagonState
, pred
[i
]),
904 hexagon_prednames
[i
]);
906 snprintf(new_pred_value_names
[i
], NAME_LEN
, "new_pred_%s",
907 hexagon_prednames
[i
]);
908 hex_new_pred_value
[i
] = tcg_global_mem_new(cpu_env
,
909 offsetof(CPUHexagonState
, new_pred_value
[i
]),
910 new_pred_value_names
[i
]);
912 hex_pred_written
= tcg_global_mem_new(cpu_env
,
913 offsetof(CPUHexagonState
, pred_written
), "pred_written");
914 hex_next_PC
= tcg_global_mem_new(cpu_env
,
915 offsetof(CPUHexagonState
, next_PC
), "next_PC");
916 hex_this_PC
= tcg_global_mem_new(cpu_env
,
917 offsetof(CPUHexagonState
, this_PC
), "this_PC");
918 hex_slot_cancelled
= tcg_global_mem_new(cpu_env
,
919 offsetof(CPUHexagonState
, slot_cancelled
), "slot_cancelled");
920 hex_branch_taken
= tcg_global_mem_new(cpu_env
,
921 offsetof(CPUHexagonState
, branch_taken
), "branch_taken");
922 hex_pkt_has_store_s1
= tcg_global_mem_new(cpu_env
,
923 offsetof(CPUHexagonState
, pkt_has_store_s1
), "pkt_has_store_s1");
924 hex_dczero_addr
= tcg_global_mem_new(cpu_env
,
925 offsetof(CPUHexagonState
, dczero_addr
), "dczero_addr");
926 hex_llsc_addr
= tcg_global_mem_new(cpu_env
,
927 offsetof(CPUHexagonState
, llsc_addr
), "llsc_addr");
928 hex_llsc_val
= tcg_global_mem_new(cpu_env
,
929 offsetof(CPUHexagonState
, llsc_val
), "llsc_val");
930 hex_llsc_val_i64
= tcg_global_mem_new_i64(cpu_env
,
931 offsetof(CPUHexagonState
, llsc_val_i64
), "llsc_val_i64");
932 hex_VRegs_updated
= tcg_global_mem_new(cpu_env
,
933 offsetof(CPUHexagonState
, VRegs_updated
), "VRegs_updated");
934 hex_QRegs_updated
= tcg_global_mem_new(cpu_env
,
935 offsetof(CPUHexagonState
, QRegs_updated
), "QRegs_updated");
936 for (i
= 0; i
< STORES_MAX
; i
++) {
937 snprintf(store_addr_names
[i
], NAME_LEN
, "store_addr_%d", i
);
938 hex_store_addr
[i
] = tcg_global_mem_new(cpu_env
,
939 offsetof(CPUHexagonState
, mem_log_stores
[i
].va
),
940 store_addr_names
[i
]);
942 snprintf(store_width_names
[i
], NAME_LEN
, "store_width_%d", i
);
943 hex_store_width
[i
] = tcg_global_mem_new(cpu_env
,
944 offsetof(CPUHexagonState
, mem_log_stores
[i
].width
),
945 store_width_names
[i
]);
947 snprintf(store_val32_names
[i
], NAME_LEN
, "store_val32_%d", i
);
948 hex_store_val32
[i
] = tcg_global_mem_new(cpu_env
,
949 offsetof(CPUHexagonState
, mem_log_stores
[i
].data32
),
950 store_val32_names
[i
]);
952 snprintf(store_val64_names
[i
], NAME_LEN
, "store_val64_%d", i
);
953 hex_store_val64
[i
] = tcg_global_mem_new_i64(cpu_env
,
954 offsetof(CPUHexagonState
, mem_log_stores
[i
].data64
),
955 store_val64_names
[i
]);
957 for (int i
= 0; i
< VSTORES_MAX
; i
++) {
958 snprintf(vstore_addr_names
[i
], NAME_LEN
, "vstore_addr_%d", i
);
959 hex_vstore_addr
[i
] = tcg_global_mem_new(cpu_env
,
960 offsetof(CPUHexagonState
, vstore
[i
].va
),
961 vstore_addr_names
[i
]);
963 snprintf(vstore_size_names
[i
], NAME_LEN
, "vstore_size_%d", i
);
964 hex_vstore_size
[i
] = tcg_global_mem_new(cpu_env
,
965 offsetof(CPUHexagonState
, vstore
[i
].size
),
966 vstore_size_names
[i
]);
968 snprintf(vstore_pending_names
[i
], NAME_LEN
, "vstore_pending_%d", i
);
969 hex_vstore_pending
[i
] = tcg_global_mem_new(cpu_env
,
970 offsetof(CPUHexagonState
, vstore_pending
[i
]),
971 vstore_pending_names
[i
]);