Merge remote-tracking branch 'remotes/nvme/tags/nvme-fixes-20210407-pull-request...
[qemu.git] / target / hexagon / translate.c
blobeeaad5f8ba65341c9dbf0f3da64c5b9afa62db8e
1 /*
2 * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #define QEMU_GENERATE
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "tcg/tcg-op.h"
22 #include "exec/cpu_ldst.h"
23 #include "exec/log.h"
24 #include "internal.h"
25 #include "attribs.h"
26 #include "insn.h"
27 #include "decode.h"
28 #include "translate.h"
29 #include "printinsn.h"
31 TCGv hex_gpr[TOTAL_PER_THREAD_REGS];
32 TCGv hex_pred[NUM_PREGS];
33 TCGv hex_next_PC;
34 TCGv hex_this_PC;
35 TCGv hex_slot_cancelled;
36 TCGv hex_branch_taken;
37 TCGv hex_new_value[TOTAL_PER_THREAD_REGS];
38 #if HEX_DEBUG
39 TCGv hex_reg_written[TOTAL_PER_THREAD_REGS];
40 #endif
41 TCGv hex_new_pred_value[NUM_PREGS];
42 TCGv hex_pred_written;
43 TCGv hex_store_addr[STORES_MAX];
44 TCGv hex_store_width[STORES_MAX];
45 TCGv hex_store_val32[STORES_MAX];
46 TCGv_i64 hex_store_val64[STORES_MAX];
47 TCGv hex_pkt_has_store_s1;
48 TCGv hex_dczero_addr;
49 TCGv hex_llsc_addr;
50 TCGv hex_llsc_val;
51 TCGv_i64 hex_llsc_val_i64;
53 static const char * const hexagon_prednames[] = {
54 "p0", "p1", "p2", "p3"
57 void gen_exception(int excp)
59 TCGv_i32 helper_tmp = tcg_const_i32(excp);
60 gen_helper_raise_exception(cpu_env, helper_tmp);
61 tcg_temp_free_i32(helper_tmp);
64 void gen_exception_debug(void)
66 gen_exception(EXCP_DEBUG);
69 #if HEX_DEBUG
70 #define PACKET_BUFFER_LEN 1028
71 static void print_pkt(Packet *pkt)
73 GString *buf = g_string_sized_new(PACKET_BUFFER_LEN);
74 snprint_a_pkt_debug(buf, pkt);
75 HEX_DEBUG_LOG("%s", buf->str);
76 g_string_free(buf, true);
78 #define HEX_DEBUG_PRINT_PKT(pkt) print_pkt(pkt)
79 #else
80 #define HEX_DEBUG_PRINT_PKT(pkt) /* nothing */
81 #endif
83 static int read_packet_words(CPUHexagonState *env, DisasContext *ctx,
84 uint32_t words[])
86 bool found_end = false;
87 int nwords, max_words;
89 memset(words, 0, PACKET_WORDS_MAX * sizeof(uint32_t));
90 for (nwords = 0; !found_end && nwords < PACKET_WORDS_MAX; nwords++) {
91 words[nwords] = cpu_ldl_code(env,
92 ctx->base.pc_next + nwords * sizeof(uint32_t));
93 found_end = is_packet_end(words[nwords]);
95 if (!found_end) {
96 /* Read too many words without finding the end */
97 return 0;
100 /* Check for page boundary crossing */
101 max_words = -(ctx->base.pc_next | TARGET_PAGE_MASK) / sizeof(uint32_t);
102 if (nwords > max_words) {
103 /* We can only cross a page boundary at the beginning of a TB */
104 g_assert(ctx->base.num_insns == 1);
107 HEX_DEBUG_LOG("decode_packet: pc = 0x%x\n", ctx->base.pc_next);
108 HEX_DEBUG_LOG(" words = { ");
109 for (int i = 0; i < nwords; i++) {
110 HEX_DEBUG_LOG("0x%x, ", words[i]);
112 HEX_DEBUG_LOG("}\n");
114 return nwords;
117 static bool check_for_attrib(Packet *pkt, int attrib)
119 for (int i = 0; i < pkt->num_insns; i++) {
120 if (GET_ATTRIB(pkt->insn[i].opcode, attrib)) {
121 return true;
124 return false;
127 static bool need_pc(Packet *pkt)
129 return check_for_attrib(pkt, A_IMPLICIT_READS_PC);
132 static bool need_slot_cancelled(Packet *pkt)
134 return check_for_attrib(pkt, A_CONDEXEC);
137 static bool need_pred_written(Packet *pkt)
139 return check_for_attrib(pkt, A_WRITES_PRED_REG);
142 static void gen_start_packet(DisasContext *ctx, Packet *pkt)
144 target_ulong next_PC = ctx->base.pc_next + pkt->encod_pkt_size_in_bytes;
145 int i;
147 /* Clear out the disassembly context */
148 ctx->reg_log_idx = 0;
149 bitmap_zero(ctx->regs_written, TOTAL_PER_THREAD_REGS);
150 ctx->preg_log_idx = 0;
151 for (i = 0; i < STORES_MAX; i++) {
152 ctx->store_width[i] = 0;
154 tcg_gen_movi_tl(hex_pkt_has_store_s1, pkt->pkt_has_store_s1);
155 ctx->s1_store_processed = 0;
157 #if HEX_DEBUG
158 /* Handy place to set a breakpoint before the packet executes */
159 gen_helper_debug_start_packet(cpu_env);
160 tcg_gen_movi_tl(hex_this_PC, ctx->base.pc_next);
161 #endif
163 /* Initialize the runtime state for packet semantics */
164 if (need_pc(pkt)) {
165 tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next);
167 if (need_slot_cancelled(pkt)) {
168 tcg_gen_movi_tl(hex_slot_cancelled, 0);
170 if (pkt->pkt_has_cof) {
171 tcg_gen_movi_tl(hex_branch_taken, 0);
172 tcg_gen_movi_tl(hex_next_PC, next_PC);
174 if (need_pred_written(pkt)) {
175 tcg_gen_movi_tl(hex_pred_written, 0);
180 * The LOG_*_WRITE macros mark most of the writes in a packet
181 * However, there are some implicit writes marked as attributes
182 * of the applicable instructions.
184 static void mark_implicit_reg_write(DisasContext *ctx, Insn *insn,
185 int attrib, int rnum)
187 if (GET_ATTRIB(insn->opcode, attrib)) {
188 int is_predicated = GET_ATTRIB(insn->opcode, A_CONDEXEC);
189 if (is_predicated && !is_preloaded(ctx, rnum)) {
190 tcg_gen_mov_tl(hex_new_value[rnum], hex_gpr[rnum]);
193 ctx_log_reg_write(ctx, rnum);
197 static void mark_implicit_pred_write(DisasContext *ctx, Insn *insn,
198 int attrib, int pnum)
200 if (GET_ATTRIB(insn->opcode, attrib)) {
201 ctx_log_pred_write(ctx, pnum);
205 static void mark_implicit_writes(DisasContext *ctx, Insn *insn)
207 mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_FP, HEX_REG_FP);
208 mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SP, HEX_REG_SP);
209 mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_LR, HEX_REG_LR);
210 mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_LC0, HEX_REG_LC0);
211 mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SA0, HEX_REG_SA0);
212 mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_LC1, HEX_REG_LC1);
213 mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SA1, HEX_REG_SA1);
215 mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P0, 0);
216 mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P1, 1);
217 mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P2, 2);
218 mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P3, 3);
221 static void gen_insn(CPUHexagonState *env, DisasContext *ctx,
222 Insn *insn, Packet *pkt)
224 if (insn->generate) {
225 mark_implicit_writes(ctx, insn);
226 insn->generate(env, ctx, insn, pkt);
227 } else {
228 gen_exception(HEX_EXCP_INVALID_OPCODE);
229 ctx->base.is_jmp = DISAS_NORETURN;
234 * Helpers for generating the packet commit
236 static void gen_reg_writes(DisasContext *ctx)
238 int i;
240 for (i = 0; i < ctx->reg_log_idx; i++) {
241 int reg_num = ctx->reg_log[i];
243 tcg_gen_mov_tl(hex_gpr[reg_num], hex_new_value[reg_num]);
247 static void gen_pred_writes(DisasContext *ctx, Packet *pkt)
249 TCGv zero, control_reg, pval;
250 int i;
252 /* Early exit if the log is empty */
253 if (!ctx->preg_log_idx) {
254 return;
257 zero = tcg_const_tl(0);
258 control_reg = tcg_temp_new();
259 pval = tcg_temp_new();
262 * Only endloop instructions will conditionally
263 * write a predicate. If there are no endloop
264 * instructions, we can use the non-conditional
265 * write of the predicates.
267 if (pkt->pkt_has_endloop) {
268 TCGv pred_written = tcg_temp_new();
269 for (i = 0; i < ctx->preg_log_idx; i++) {
270 int pred_num = ctx->preg_log[i];
272 tcg_gen_andi_tl(pred_written, hex_pred_written, 1 << pred_num);
273 tcg_gen_movcond_tl(TCG_COND_NE, hex_pred[pred_num],
274 pred_written, zero,
275 hex_new_pred_value[pred_num],
276 hex_pred[pred_num]);
278 tcg_temp_free(pred_written);
279 } else {
280 for (i = 0; i < ctx->preg_log_idx; i++) {
281 int pred_num = ctx->preg_log[i];
282 tcg_gen_mov_tl(hex_pred[pred_num], hex_new_pred_value[pred_num]);
283 #if HEX_DEBUG
284 /* Do this so HELPER(debug_commit_end) will know */
285 tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << pred_num);
286 #endif
290 tcg_temp_free(zero);
291 tcg_temp_free(control_reg);
292 tcg_temp_free(pval);
295 #if HEX_DEBUG
296 static inline void gen_check_store_width(DisasContext *ctx, int slot_num)
298 TCGv slot = tcg_const_tl(slot_num);
299 TCGv check = tcg_const_tl(ctx->store_width[slot_num]);
300 gen_helper_debug_check_store_width(cpu_env, slot, check);
301 tcg_temp_free(slot);
302 tcg_temp_free(check);
304 #define HEX_DEBUG_GEN_CHECK_STORE_WIDTH(ctx, slot_num) \
305 gen_check_store_width(ctx, slot_num)
306 #else
307 #define HEX_DEBUG_GEN_CHECK_STORE_WIDTH(ctx, slot_num) /* nothing */
308 #endif
310 static bool slot_is_predicated(Packet *pkt, int slot_num)
312 for (int i = 0; i < pkt->num_insns; i++) {
313 if (pkt->insn[i].slot == slot_num) {
314 return GET_ATTRIB(pkt->insn[i].opcode, A_CONDEXEC);
317 /* If we get to here, we didn't find an instruction in the requested slot */
318 g_assert_not_reached();
321 void process_store(DisasContext *ctx, Packet *pkt, int slot_num)
323 bool is_predicated = slot_is_predicated(pkt, slot_num);
324 TCGLabel *label_end = NULL;
327 * We may have already processed this store
328 * See CHECK_NOSHUF in macros.h
330 if (slot_num == 1 && ctx->s1_store_processed) {
331 return;
333 ctx->s1_store_processed = 1;
335 if (is_predicated) {
336 TCGv cancelled = tcg_temp_new();
337 label_end = gen_new_label();
339 /* Don't do anything if the slot was cancelled */
340 tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1);
341 tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end);
342 tcg_temp_free(cancelled);
345 TCGv address = tcg_temp_local_new();
346 tcg_gen_mov_tl(address, hex_store_addr[slot_num]);
349 * If we know the width from the DisasContext, we can
350 * generate much cleaner code.
351 * Unfortunately, not all instructions execute the fSTORE
352 * macro during code generation. Anything that uses the
353 * generic helper will have this problem. Instructions
354 * that use fWRAP to generate proper TCG code will be OK.
356 switch (ctx->store_width[slot_num]) {
357 case 1:
358 HEX_DEBUG_GEN_CHECK_STORE_WIDTH(ctx, slot_num);
359 tcg_gen_qemu_st8(hex_store_val32[slot_num],
360 hex_store_addr[slot_num],
361 ctx->mem_idx);
362 break;
363 case 2:
364 HEX_DEBUG_GEN_CHECK_STORE_WIDTH(ctx, slot_num);
365 tcg_gen_qemu_st16(hex_store_val32[slot_num],
366 hex_store_addr[slot_num],
367 ctx->mem_idx);
368 break;
369 case 4:
370 HEX_DEBUG_GEN_CHECK_STORE_WIDTH(ctx, slot_num);
371 tcg_gen_qemu_st32(hex_store_val32[slot_num],
372 hex_store_addr[slot_num],
373 ctx->mem_idx);
374 break;
375 case 8:
376 HEX_DEBUG_GEN_CHECK_STORE_WIDTH(ctx, slot_num);
377 tcg_gen_qemu_st64(hex_store_val64[slot_num],
378 hex_store_addr[slot_num],
379 ctx->mem_idx);
380 break;
381 default:
384 * If we get to here, we don't know the width at
385 * TCG generation time, we'll use a helper to
386 * avoid branching based on the width at runtime.
388 TCGv slot = tcg_const_tl(slot_num);
389 gen_helper_commit_store(cpu_env, slot);
390 tcg_temp_free(slot);
393 tcg_temp_free(address);
395 if (is_predicated) {
396 gen_set_label(label_end);
400 static void process_store_log(DisasContext *ctx, Packet *pkt)
403 * When a packet has two stores, the hardware processes
404 * slot 1 and then slot 2. This will be important when
405 * the memory accesses overlap.
407 if (pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa) {
408 process_store(ctx, pkt, 1);
410 if (pkt->pkt_has_store_s0 && !pkt->pkt_has_dczeroa) {
411 process_store(ctx, pkt, 0);
415 /* Zero out a 32-bit cache line */
416 static void process_dczeroa(DisasContext *ctx, Packet *pkt)
418 if (pkt->pkt_has_dczeroa) {
419 /* Store 32 bytes of zero starting at (addr & ~0x1f) */
420 TCGv addr = tcg_temp_new();
421 TCGv_i64 zero = tcg_const_i64(0);
423 tcg_gen_andi_tl(addr, hex_dczero_addr, ~0x1f);
424 tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
425 tcg_gen_addi_tl(addr, addr, 8);
426 tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
427 tcg_gen_addi_tl(addr, addr, 8);
428 tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
429 tcg_gen_addi_tl(addr, addr, 8);
430 tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
432 tcg_temp_free(addr);
433 tcg_temp_free_i64(zero);
437 static void update_exec_counters(DisasContext *ctx, Packet *pkt)
439 int num_insns = pkt->num_insns;
440 int num_real_insns = 0;
442 for (int i = 0; i < num_insns; i++) {
443 if (!pkt->insn[i].is_endloop &&
444 !pkt->insn[i].part1 &&
445 !GET_ATTRIB(pkt->insn[i].opcode, A_IT_NOP)) {
446 num_real_insns++;
450 ctx->num_packets++;
451 ctx->num_insns += num_real_insns;
454 static void gen_exec_counters(DisasContext *ctx)
456 tcg_gen_addi_tl(hex_gpr[HEX_REG_QEMU_PKT_CNT],
457 hex_gpr[HEX_REG_QEMU_PKT_CNT], ctx->num_packets);
458 tcg_gen_addi_tl(hex_gpr[HEX_REG_QEMU_INSN_CNT],
459 hex_gpr[HEX_REG_QEMU_INSN_CNT], ctx->num_insns);
462 static void gen_commit_packet(DisasContext *ctx, Packet *pkt)
464 gen_reg_writes(ctx);
465 gen_pred_writes(ctx, pkt);
466 process_store_log(ctx, pkt);
467 process_dczeroa(ctx, pkt);
468 update_exec_counters(ctx, pkt);
469 #if HEX_DEBUG
471 TCGv has_st0 =
472 tcg_const_tl(pkt->pkt_has_store_s0 && !pkt->pkt_has_dczeroa);
473 TCGv has_st1 =
474 tcg_const_tl(pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa);
476 /* Handy place to set a breakpoint at the end of execution */
477 gen_helper_debug_commit_end(cpu_env, has_st0, has_st1);
479 tcg_temp_free(has_st0);
480 tcg_temp_free(has_st1);
482 #endif
484 if (pkt->pkt_has_cof) {
485 ctx->base.is_jmp = DISAS_NORETURN;
489 static void decode_and_translate_packet(CPUHexagonState *env, DisasContext *ctx)
491 uint32_t words[PACKET_WORDS_MAX];
492 int nwords;
493 Packet pkt;
494 int i;
496 nwords = read_packet_words(env, ctx, words);
497 if (!nwords) {
498 gen_exception(HEX_EXCP_INVALID_PACKET);
499 ctx->base.is_jmp = DISAS_NORETURN;
500 return;
503 if (decode_packet(nwords, words, &pkt, false) > 0) {
504 HEX_DEBUG_PRINT_PKT(&pkt);
505 gen_start_packet(ctx, &pkt);
506 for (i = 0; i < pkt.num_insns; i++) {
507 gen_insn(env, ctx, &pkt.insn[i], &pkt);
509 gen_commit_packet(ctx, &pkt);
510 ctx->base.pc_next += pkt.encod_pkt_size_in_bytes;
511 } else {
512 gen_exception(HEX_EXCP_INVALID_PACKET);
513 ctx->base.is_jmp = DISAS_NORETURN;
517 static void hexagon_tr_init_disas_context(DisasContextBase *dcbase,
518 CPUState *cs)
520 DisasContext *ctx = container_of(dcbase, DisasContext, base);
522 ctx->mem_idx = MMU_USER_IDX;
523 ctx->num_packets = 0;
524 ctx->num_insns = 0;
527 static void hexagon_tr_tb_start(DisasContextBase *db, CPUState *cpu)
531 static void hexagon_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
533 DisasContext *ctx = container_of(dcbase, DisasContext, base);
535 tcg_gen_insn_start(ctx->base.pc_next);
538 static bool hexagon_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
539 const CPUBreakpoint *bp)
541 DisasContext *ctx = container_of(dcbase, DisasContext, base);
543 tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next);
544 ctx->base.is_jmp = DISAS_NORETURN;
545 gen_exception_debug();
547 * The address covered by the breakpoint must be included in
548 * [tb->pc, tb->pc + tb->size) in order to for it to be
549 * properly cleared -- thus we increment the PC here so that
550 * the logic setting tb->size below does the right thing.
552 ctx->base.pc_next += 4;
553 return true;
556 static bool pkt_crosses_page(CPUHexagonState *env, DisasContext *ctx)
558 target_ulong page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
559 bool found_end = false;
560 int nwords;
562 for (nwords = 0; !found_end && nwords < PACKET_WORDS_MAX; nwords++) {
563 uint32_t word = cpu_ldl_code(env,
564 ctx->base.pc_next + nwords * sizeof(uint32_t));
565 found_end = is_packet_end(word);
567 uint32_t next_ptr = ctx->base.pc_next + nwords * sizeof(uint32_t);
568 return found_end && next_ptr - page_start >= TARGET_PAGE_SIZE;
571 static void hexagon_tr_translate_packet(DisasContextBase *dcbase, CPUState *cpu)
573 DisasContext *ctx = container_of(dcbase, DisasContext, base);
574 CPUHexagonState *env = cpu->env_ptr;
576 decode_and_translate_packet(env, ctx);
578 if (ctx->base.is_jmp == DISAS_NEXT) {
579 target_ulong page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
580 target_ulong bytes_max = PACKET_WORDS_MAX * sizeof(target_ulong);
582 if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE ||
583 (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE - bytes_max &&
584 pkt_crosses_page(env, ctx))) {
585 ctx->base.is_jmp = DISAS_TOO_MANY;
589 * The CPU log is used to compare against LLDB single stepping,
590 * so end the TLB after every packet.
592 HexagonCPU *hex_cpu = container_of(env, HexagonCPU, env);
593 if (hex_cpu->lldb_compat && qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
594 ctx->base.is_jmp = DISAS_TOO_MANY;
596 #if HEX_DEBUG
597 /* When debugging, only put one packet per TB */
598 ctx->base.is_jmp = DISAS_TOO_MANY;
599 #endif
603 static void hexagon_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
605 DisasContext *ctx = container_of(dcbase, DisasContext, base);
607 switch (ctx->base.is_jmp) {
608 case DISAS_TOO_MANY:
609 gen_exec_counters(ctx);
610 tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next);
611 if (ctx->base.singlestep_enabled) {
612 gen_exception_debug();
613 } else {
614 tcg_gen_exit_tb(NULL, 0);
616 break;
617 case DISAS_NORETURN:
618 gen_exec_counters(ctx);
619 tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], hex_next_PC);
620 if (ctx->base.singlestep_enabled) {
621 gen_exception_debug();
622 } else {
623 tcg_gen_exit_tb(NULL, 0);
625 break;
626 default:
627 g_assert_not_reached();
631 static void hexagon_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
633 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
634 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
638 static const TranslatorOps hexagon_tr_ops = {
639 .init_disas_context = hexagon_tr_init_disas_context,
640 .tb_start = hexagon_tr_tb_start,
641 .insn_start = hexagon_tr_insn_start,
642 .breakpoint_check = hexagon_tr_breakpoint_check,
643 .translate_insn = hexagon_tr_translate_packet,
644 .tb_stop = hexagon_tr_tb_stop,
645 .disas_log = hexagon_tr_disas_log,
648 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
650 DisasContext ctx;
652 translator_loop(&hexagon_tr_ops, &ctx.base, cs, tb, max_insns);
655 #define NAME_LEN 64
656 static char new_value_names[TOTAL_PER_THREAD_REGS][NAME_LEN];
657 #if HEX_DEBUG
658 static char reg_written_names[TOTAL_PER_THREAD_REGS][NAME_LEN];
659 #endif
660 static char new_pred_value_names[NUM_PREGS][NAME_LEN];
661 static char store_addr_names[STORES_MAX][NAME_LEN];
662 static char store_width_names[STORES_MAX][NAME_LEN];
663 static char store_val32_names[STORES_MAX][NAME_LEN];
664 static char store_val64_names[STORES_MAX][NAME_LEN];
666 void hexagon_translate_init(void)
668 int i;
670 opcode_init();
672 #if HEX_DEBUG
673 if (!qemu_logfile) {
674 qemu_set_log(qemu_loglevel);
676 #endif
678 for (i = 0; i < TOTAL_PER_THREAD_REGS; i++) {
679 hex_gpr[i] = tcg_global_mem_new(cpu_env,
680 offsetof(CPUHexagonState, gpr[i]),
681 hexagon_regnames[i]);
683 snprintf(new_value_names[i], NAME_LEN, "new_%s", hexagon_regnames[i]);
684 hex_new_value[i] = tcg_global_mem_new(cpu_env,
685 offsetof(CPUHexagonState, new_value[i]),
686 new_value_names[i]);
688 #if HEX_DEBUG
689 snprintf(reg_written_names[i], NAME_LEN, "reg_written_%s",
690 hexagon_regnames[i]);
691 hex_reg_written[i] = tcg_global_mem_new(cpu_env,
692 offsetof(CPUHexagonState, reg_written[i]),
693 reg_written_names[i]);
694 #endif
696 for (i = 0; i < NUM_PREGS; i++) {
697 hex_pred[i] = tcg_global_mem_new(cpu_env,
698 offsetof(CPUHexagonState, pred[i]),
699 hexagon_prednames[i]);
701 snprintf(new_pred_value_names[i], NAME_LEN, "new_pred_%s",
702 hexagon_prednames[i]);
703 hex_new_pred_value[i] = tcg_global_mem_new(cpu_env,
704 offsetof(CPUHexagonState, new_pred_value[i]),
705 new_pred_value_names[i]);
707 hex_pred_written = tcg_global_mem_new(cpu_env,
708 offsetof(CPUHexagonState, pred_written), "pred_written");
709 hex_next_PC = tcg_global_mem_new(cpu_env,
710 offsetof(CPUHexagonState, next_PC), "next_PC");
711 hex_this_PC = tcg_global_mem_new(cpu_env,
712 offsetof(CPUHexagonState, this_PC), "this_PC");
713 hex_slot_cancelled = tcg_global_mem_new(cpu_env,
714 offsetof(CPUHexagonState, slot_cancelled), "slot_cancelled");
715 hex_branch_taken = tcg_global_mem_new(cpu_env,
716 offsetof(CPUHexagonState, branch_taken), "branch_taken");
717 hex_pkt_has_store_s1 = tcg_global_mem_new(cpu_env,
718 offsetof(CPUHexagonState, pkt_has_store_s1), "pkt_has_store_s1");
719 hex_dczero_addr = tcg_global_mem_new(cpu_env,
720 offsetof(CPUHexagonState, dczero_addr), "dczero_addr");
721 hex_llsc_addr = tcg_global_mem_new(cpu_env,
722 offsetof(CPUHexagonState, llsc_addr), "llsc_addr");
723 hex_llsc_val = tcg_global_mem_new(cpu_env,
724 offsetof(CPUHexagonState, llsc_val), "llsc_val");
725 hex_llsc_val_i64 = tcg_global_mem_new_i64(cpu_env,
726 offsetof(CPUHexagonState, llsc_val_i64), "llsc_val_i64");
727 for (i = 0; i < STORES_MAX; i++) {
728 snprintf(store_addr_names[i], NAME_LEN, "store_addr_%d", i);
729 hex_store_addr[i] = tcg_global_mem_new(cpu_env,
730 offsetof(CPUHexagonState, mem_log_stores[i].va),
731 store_addr_names[i]);
733 snprintf(store_width_names[i], NAME_LEN, "store_width_%d", i);
734 hex_store_width[i] = tcg_global_mem_new(cpu_env,
735 offsetof(CPUHexagonState, mem_log_stores[i].width),
736 store_width_names[i]);
738 snprintf(store_val32_names[i], NAME_LEN, "store_val32_%d", i);
739 hex_store_val32[i] = tcg_global_mem_new(cpu_env,
740 offsetof(CPUHexagonState, mem_log_stores[i].data32),
741 store_val32_names[i]);
743 snprintf(store_val64_names[i], NAME_LEN, "store_val64_%d", i);
744 hex_store_val64[i] = tcg_global_mem_new_i64(cpu_env,
745 offsetof(CPUHexagonState, mem_log_stores[i].data64),
746 store_val64_names[i]);