target-xtensa: raise an exception for invalid and reserved opcodes
[qemu.git] / target-xtensa / translate.c
blobc81450d1a5393a972af622cf8a304d05e91d883f
1 /*
2 * Xtensa ISA:
3 * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
5 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of the Open Source and Linux Lab nor the
16 * names of its contributors may be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <stdio.h>
33 #include "cpu.h"
34 #include "exec-all.h"
35 #include "disas.h"
36 #include "tcg-op.h"
37 #include "qemu-log.h"
38 #include "sysemu.h"
40 #include "helpers.h"
41 #define GEN_HELPER 1
42 #include "helpers.h"
44 typedef struct DisasContext {
45 const XtensaConfig *config;
46 TranslationBlock *tb;
47 uint32_t pc;
48 uint32_t next_pc;
49 int cring;
50 int ring;
51 uint32_t lbeg;
52 uint32_t lend;
53 TCGv_i32 litbase;
54 int is_jmp;
55 int singlestep_enabled;
57 bool sar_5bit;
58 bool sar_m32_5bit;
59 bool sar_m32_allocated;
60 TCGv_i32 sar_m32;
62 uint32_t ccount_delta;
63 unsigned used_window;
64 } DisasContext;
66 static TCGv_ptr cpu_env;
67 static TCGv_i32 cpu_pc;
68 static TCGv_i32 cpu_R[16];
69 static TCGv_i32 cpu_SR[256];
70 static TCGv_i32 cpu_UR[256];
72 #include "gen-icount.h"
74 static const char * const sregnames[256] = {
75 [LBEG] = "LBEG",
76 [LEND] = "LEND",
77 [LCOUNT] = "LCOUNT",
78 [SAR] = "SAR",
79 [BR] = "BR",
80 [LITBASE] = "LITBASE",
81 [SCOMPARE1] = "SCOMPARE1",
82 [ACCLO] = "ACCLO",
83 [ACCHI] = "ACCHI",
84 [MR] = "MR0",
85 [MR + 1] = "MR1",
86 [MR + 2] = "MR2",
87 [MR + 3] = "MR3",
88 [WINDOW_BASE] = "WINDOW_BASE",
89 [WINDOW_START] = "WINDOW_START",
90 [PTEVADDR] = "PTEVADDR",
91 [RASID] = "RASID",
92 [ITLBCFG] = "ITLBCFG",
93 [DTLBCFG] = "DTLBCFG",
94 [EPC1] = "EPC1",
95 [EPC1 + 1] = "EPC2",
96 [EPC1 + 2] = "EPC3",
97 [EPC1 + 3] = "EPC4",
98 [EPC1 + 4] = "EPC5",
99 [EPC1 + 5] = "EPC6",
100 [EPC1 + 6] = "EPC7",
101 [DEPC] = "DEPC",
102 [EPS2] = "EPS2",
103 [EPS2 + 1] = "EPS3",
104 [EPS2 + 2] = "EPS4",
105 [EPS2 + 3] = "EPS5",
106 [EPS2 + 4] = "EPS6",
107 [EPS2 + 5] = "EPS7",
108 [EXCSAVE1] = "EXCSAVE1",
109 [EXCSAVE1 + 1] = "EXCSAVE2",
110 [EXCSAVE1 + 2] = "EXCSAVE3",
111 [EXCSAVE1 + 3] = "EXCSAVE4",
112 [EXCSAVE1 + 4] = "EXCSAVE5",
113 [EXCSAVE1 + 5] = "EXCSAVE6",
114 [EXCSAVE1 + 6] = "EXCSAVE7",
115 [CPENABLE] = "CPENABLE",
116 [INTSET] = "INTSET",
117 [INTCLEAR] = "INTCLEAR",
118 [INTENABLE] = "INTENABLE",
119 [PS] = "PS",
120 [VECBASE] = "VECBASE",
121 [EXCCAUSE] = "EXCCAUSE",
122 [CCOUNT] = "CCOUNT",
123 [PRID] = "PRID",
124 [EXCVADDR] = "EXCVADDR",
125 [CCOMPARE] = "CCOMPARE0",
126 [CCOMPARE + 1] = "CCOMPARE1",
127 [CCOMPARE + 2] = "CCOMPARE2",
130 static const char * const uregnames[256] = {
131 [THREADPTR] = "THREADPTR",
132 [FCR] = "FCR",
133 [FSR] = "FSR",
136 void xtensa_translate_init(void)
138 static const char * const regnames[] = {
139 "ar0", "ar1", "ar2", "ar3",
140 "ar4", "ar5", "ar6", "ar7",
141 "ar8", "ar9", "ar10", "ar11",
142 "ar12", "ar13", "ar14", "ar15",
144 int i;
146 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
147 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
148 offsetof(CPUState, pc), "pc");
150 for (i = 0; i < 16; i++) {
151 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
152 offsetof(CPUState, regs[i]),
153 regnames[i]);
156 for (i = 0; i < 256; ++i) {
157 if (sregnames[i]) {
158 cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0,
159 offsetof(CPUState, sregs[i]),
160 sregnames[i]);
164 for (i = 0; i < 256; ++i) {
165 if (uregnames[i]) {
166 cpu_UR[i] = tcg_global_mem_new_i32(TCG_AREG0,
167 offsetof(CPUState, uregs[i]),
168 uregnames[i]);
171 #define GEN_HELPER 2
172 #include "helpers.h"
175 static inline bool option_bits_enabled(DisasContext *dc, uint64_t opt)
177 return xtensa_option_bits_enabled(dc->config, opt);
180 static inline bool option_enabled(DisasContext *dc, int opt)
182 return xtensa_option_enabled(dc->config, opt);
185 static void init_litbase(DisasContext *dc)
187 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
188 dc->litbase = tcg_temp_local_new_i32();
189 tcg_gen_andi_i32(dc->litbase, cpu_SR[LITBASE], 0xfffff000);
193 static void reset_litbase(DisasContext *dc)
195 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
196 tcg_temp_free(dc->litbase);
200 static void init_sar_tracker(DisasContext *dc)
202 dc->sar_5bit = false;
203 dc->sar_m32_5bit = false;
204 dc->sar_m32_allocated = false;
207 static void reset_sar_tracker(DisasContext *dc)
209 if (dc->sar_m32_allocated) {
210 tcg_temp_free(dc->sar_m32);
214 static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa)
216 tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
217 if (dc->sar_m32_5bit) {
218 tcg_gen_discard_i32(dc->sar_m32);
220 dc->sar_5bit = true;
221 dc->sar_m32_5bit = false;
224 static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa)
226 TCGv_i32 tmp = tcg_const_i32(32);
227 if (!dc->sar_m32_allocated) {
228 dc->sar_m32 = tcg_temp_local_new_i32();
229 dc->sar_m32_allocated = true;
231 tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
232 tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32);
233 dc->sar_5bit = false;
234 dc->sar_m32_5bit = true;
235 tcg_temp_free(tmp);
238 static void gen_advance_ccount(DisasContext *dc)
240 if (dc->ccount_delta > 0) {
241 TCGv_i32 tmp = tcg_const_i32(dc->ccount_delta);
242 dc->ccount_delta = 0;
243 gen_helper_advance_ccount(tmp);
244 tcg_temp_free(tmp);
248 static void reset_used_window(DisasContext *dc)
250 dc->used_window = 0;
253 static void gen_exception(DisasContext *dc, int excp)
255 TCGv_i32 tmp = tcg_const_i32(excp);
256 gen_advance_ccount(dc);
257 gen_helper_exception(tmp);
258 tcg_temp_free(tmp);
261 static void gen_exception_cause(DisasContext *dc, uint32_t cause)
263 TCGv_i32 tpc = tcg_const_i32(dc->pc);
264 TCGv_i32 tcause = tcg_const_i32(cause);
265 gen_advance_ccount(dc);
266 gen_helper_exception_cause(tpc, tcause);
267 tcg_temp_free(tpc);
268 tcg_temp_free(tcause);
269 if (cause == ILLEGAL_INSTRUCTION_CAUSE ||
270 cause == SYSCALL_CAUSE) {
271 dc->is_jmp = DISAS_UPDATE;
275 static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause,
276 TCGv_i32 vaddr)
278 TCGv_i32 tpc = tcg_const_i32(dc->pc);
279 TCGv_i32 tcause = tcg_const_i32(cause);
280 gen_advance_ccount(dc);
281 gen_helper_exception_cause_vaddr(tpc, tcause, vaddr);
282 tcg_temp_free(tpc);
283 tcg_temp_free(tcause);
286 static void gen_check_privilege(DisasContext *dc)
288 if (dc->cring) {
289 gen_exception_cause(dc, PRIVILEGED_CAUSE);
290 dc->is_jmp = DISAS_UPDATE;
294 static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
296 tcg_gen_mov_i32(cpu_pc, dest);
297 if (dc->singlestep_enabled) {
298 gen_exception(dc, EXCP_DEBUG);
299 } else {
300 gen_advance_ccount(dc);
301 if (slot >= 0) {
302 tcg_gen_goto_tb(slot);
303 tcg_gen_exit_tb((tcg_target_long)dc->tb + slot);
304 } else {
305 tcg_gen_exit_tb(0);
308 dc->is_jmp = DISAS_UPDATE;
311 static void gen_jump(DisasContext *dc, TCGv dest)
313 gen_jump_slot(dc, dest, -1);
316 static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
318 TCGv_i32 tmp = tcg_const_i32(dest);
319 if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
320 slot = -1;
322 gen_jump_slot(dc, tmp, slot);
323 tcg_temp_free(tmp);
326 static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest,
327 int slot)
329 TCGv_i32 tcallinc = tcg_const_i32(callinc);
331 tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS],
332 tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN);
333 tcg_temp_free(tcallinc);
334 tcg_gen_movi_i32(cpu_R[callinc << 2],
335 (callinc << 30) | (dc->next_pc & 0x3fffffff));
336 gen_jump_slot(dc, dest, slot);
339 static void gen_callw(DisasContext *dc, int callinc, TCGv_i32 dest)
341 gen_callw_slot(dc, callinc, dest, -1);
344 static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot)
346 TCGv_i32 tmp = tcg_const_i32(dest);
347 if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
348 slot = -1;
350 gen_callw_slot(dc, callinc, tmp, slot);
351 tcg_temp_free(tmp);
354 static bool gen_check_loop_end(DisasContext *dc, int slot)
356 if (option_enabled(dc, XTENSA_OPTION_LOOP) &&
357 !(dc->tb->flags & XTENSA_TBFLAG_EXCM) &&
358 dc->next_pc == dc->lend) {
359 int label = gen_new_label();
361 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label);
362 tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1);
363 gen_jumpi(dc, dc->lbeg, slot);
364 gen_set_label(label);
365 gen_jumpi(dc, dc->next_pc, -1);
366 return true;
368 return false;
371 static void gen_jumpi_check_loop_end(DisasContext *dc, int slot)
373 if (!gen_check_loop_end(dc, slot)) {
374 gen_jumpi(dc, dc->next_pc, slot);
378 static void gen_brcond(DisasContext *dc, TCGCond cond,
379 TCGv_i32 t0, TCGv_i32 t1, uint32_t offset)
381 int label = gen_new_label();
383 tcg_gen_brcond_i32(cond, t0, t1, label);
384 gen_jumpi_check_loop_end(dc, 0);
385 gen_set_label(label);
386 gen_jumpi(dc, dc->pc + offset, 1);
389 static void gen_brcondi(DisasContext *dc, TCGCond cond,
390 TCGv_i32 t0, uint32_t t1, uint32_t offset)
392 TCGv_i32 tmp = tcg_const_i32(t1);
393 gen_brcond(dc, cond, t0, tmp, offset);
394 tcg_temp_free(tmp);
397 static void gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr)
399 gen_advance_ccount(dc);
400 tcg_gen_mov_i32(d, cpu_SR[sr]);
403 static void gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
405 tcg_gen_shri_i32(d, cpu_SR[EXCVADDR], 10);
406 tcg_gen_or_i32(d, d, cpu_SR[sr]);
407 tcg_gen_andi_i32(d, d, 0xfffffffc);
410 static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
412 static void (* const rsr_handler[256])(DisasContext *dc,
413 TCGv_i32 d, uint32_t sr) = {
414 [CCOUNT] = gen_rsr_ccount,
415 [PTEVADDR] = gen_rsr_ptevaddr,
418 if (sregnames[sr]) {
419 if (rsr_handler[sr]) {
420 rsr_handler[sr](dc, d, sr);
421 } else {
422 tcg_gen_mov_i32(d, cpu_SR[sr]);
424 } else {
425 qemu_log("RSR %d not implemented, ", sr);
429 static void gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s)
431 gen_helper_wsr_lbeg(s);
434 static void gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s)
436 gen_helper_wsr_lend(s);
439 static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s)
441 tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f);
442 if (dc->sar_m32_5bit) {
443 tcg_gen_discard_i32(dc->sar_m32);
445 dc->sar_5bit = false;
446 dc->sar_m32_5bit = false;
449 static void gen_wsr_br(DisasContext *dc, uint32_t sr, TCGv_i32 s)
451 tcg_gen_andi_i32(cpu_SR[sr], s, 0xffff);
454 static void gen_wsr_litbase(DisasContext *dc, uint32_t sr, TCGv_i32 s)
456 tcg_gen_andi_i32(cpu_SR[sr], s, 0xfffff001);
457 /* This can change tb->flags, so exit tb */
458 gen_jumpi_check_loop_end(dc, -1);
461 static void gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s)
463 tcg_gen_ext8s_i32(cpu_SR[sr], s);
466 static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v)
468 gen_helper_wsr_windowbase(v);
469 reset_used_window(dc);
472 static void gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v)
474 tcg_gen_andi_i32(cpu_SR[sr], v, (1 << dc->config->nareg / 4) - 1);
475 reset_used_window(dc);
478 static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v)
480 tcg_gen_andi_i32(cpu_SR[sr], v, 0xffc00000);
483 static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
485 gen_helper_wsr_rasid(v);
486 /* This can change tb->flags, so exit tb */
487 gen_jumpi_check_loop_end(dc, -1);
490 static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v)
492 tcg_gen_andi_i32(cpu_SR[sr], v, 0x01130000);
495 static void gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v)
497 tcg_gen_andi_i32(cpu_SR[sr], v,
498 dc->config->inttype_mask[INTTYPE_SOFTWARE]);
499 gen_helper_check_interrupts(cpu_env);
500 gen_jumpi_check_loop_end(dc, 0);
503 static void gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v)
505 TCGv_i32 tmp = tcg_temp_new_i32();
507 tcg_gen_andi_i32(tmp, v,
508 dc->config->inttype_mask[INTTYPE_EDGE] |
509 dc->config->inttype_mask[INTTYPE_NMI] |
510 dc->config->inttype_mask[INTTYPE_SOFTWARE]);
511 tcg_gen_andc_i32(cpu_SR[INTSET], cpu_SR[INTSET], tmp);
512 tcg_temp_free(tmp);
513 gen_helper_check_interrupts(cpu_env);
516 static void gen_wsr_intenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
518 tcg_gen_mov_i32(cpu_SR[sr], v);
519 gen_helper_check_interrupts(cpu_env);
520 gen_jumpi_check_loop_end(dc, 0);
523 static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v)
525 uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB |
526 PS_UM | PS_EXCM | PS_INTLEVEL;
528 if (option_enabled(dc, XTENSA_OPTION_MMU)) {
529 mask |= PS_RING;
531 tcg_gen_andi_i32(cpu_SR[sr], v, mask);
532 reset_used_window(dc);
533 gen_helper_check_interrupts(cpu_env);
534 /* This can change mmu index and tb->flags, so exit tb */
535 gen_jumpi_check_loop_end(dc, -1);
538 static void gen_wsr_prid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
542 static void gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v)
544 uint32_t id = sr - CCOMPARE;
545 if (id < dc->config->nccompare) {
546 uint32_t int_bit = 1 << dc->config->timerint[id];
547 gen_advance_ccount(dc);
548 tcg_gen_mov_i32(cpu_SR[sr], v);
549 tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit);
550 gen_helper_check_interrupts(cpu_env);
554 static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s)
556 static void (* const wsr_handler[256])(DisasContext *dc,
557 uint32_t sr, TCGv_i32 v) = {
558 [LBEG] = gen_wsr_lbeg,
559 [LEND] = gen_wsr_lend,
560 [SAR] = gen_wsr_sar,
561 [BR] = gen_wsr_br,
562 [LITBASE] = gen_wsr_litbase,
563 [ACCHI] = gen_wsr_acchi,
564 [WINDOW_BASE] = gen_wsr_windowbase,
565 [WINDOW_START] = gen_wsr_windowstart,
566 [PTEVADDR] = gen_wsr_ptevaddr,
567 [RASID] = gen_wsr_rasid,
568 [ITLBCFG] = gen_wsr_tlbcfg,
569 [DTLBCFG] = gen_wsr_tlbcfg,
570 [INTSET] = gen_wsr_intset,
571 [INTCLEAR] = gen_wsr_intclear,
572 [INTENABLE] = gen_wsr_intenable,
573 [PS] = gen_wsr_ps,
574 [PRID] = gen_wsr_prid,
575 [CCOMPARE] = gen_wsr_ccompare,
576 [CCOMPARE + 1] = gen_wsr_ccompare,
577 [CCOMPARE + 2] = gen_wsr_ccompare,
580 if (sregnames[sr]) {
581 if (wsr_handler[sr]) {
582 wsr_handler[sr](dc, sr, s);
583 } else {
584 tcg_gen_mov_i32(cpu_SR[sr], s);
586 } else {
587 qemu_log("WSR %d not implemented, ", sr);
591 static void gen_load_store_alignment(DisasContext *dc, int shift,
592 TCGv_i32 addr, bool no_hw_alignment)
594 if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
595 tcg_gen_andi_i32(addr, addr, ~0 << shift);
596 } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) &&
597 no_hw_alignment) {
598 int label = gen_new_label();
599 TCGv_i32 tmp = tcg_temp_new_i32();
600 tcg_gen_andi_i32(tmp, addr, ~(~0 << shift));
601 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
602 gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
603 gen_set_label(label);
604 tcg_temp_free(tmp);
608 static void gen_waiti(DisasContext *dc, uint32_t imm4)
610 TCGv_i32 pc = tcg_const_i32(dc->next_pc);
611 TCGv_i32 intlevel = tcg_const_i32(imm4);
612 gen_advance_ccount(dc);
613 gen_helper_waiti(pc, intlevel);
614 tcg_temp_free(pc);
615 tcg_temp_free(intlevel);
618 static void gen_window_check1(DisasContext *dc, unsigned r1)
620 if (dc->tb->flags & XTENSA_TBFLAG_EXCM) {
621 return;
623 if (option_enabled(dc, XTENSA_OPTION_WINDOWED_REGISTER) &&
624 r1 / 4 > dc->used_window) {
625 TCGv_i32 pc = tcg_const_i32(dc->pc);
626 TCGv_i32 w = tcg_const_i32(r1 / 4);
628 dc->used_window = r1 / 4;
629 gen_advance_ccount(dc);
630 gen_helper_window_check(pc, w);
632 tcg_temp_free(w);
633 tcg_temp_free(pc);
637 static void gen_window_check2(DisasContext *dc, unsigned r1, unsigned r2)
639 gen_window_check1(dc, r1 > r2 ? r1 : r2);
642 static void gen_window_check3(DisasContext *dc, unsigned r1, unsigned r2,
643 unsigned r3)
645 gen_window_check2(dc, r1, r2 > r3 ? r2 : r3);
648 static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned)
650 TCGv_i32 m = tcg_temp_new_i32();
652 if (hi) {
653 (is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16);
654 } else {
655 (is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v);
657 return m;
660 static void disas_xtensa_insn(DisasContext *dc)
662 #define HAS_OPTION_BITS(opt) do { \
663 if (!option_bits_enabled(dc, opt)) { \
664 qemu_log("Option is not enabled %s:%d\n", \
665 __FILE__, __LINE__); \
666 goto invalid_opcode; \
668 } while (0)
670 #define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt))
672 #define TBD() qemu_log("TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__)
673 #define RESERVED() do { \
674 qemu_log("RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \
675 dc->pc, b0, b1, b2, __FILE__, __LINE__); \
676 goto invalid_opcode; \
677 } while (0)
680 #ifdef TARGET_WORDS_BIGENDIAN
681 #define OP0 (((b0) & 0xf0) >> 4)
682 #define OP1 (((b2) & 0xf0) >> 4)
683 #define OP2 ((b2) & 0xf)
684 #define RRR_R ((b1) & 0xf)
685 #define RRR_S (((b1) & 0xf0) >> 4)
686 #define RRR_T ((b0) & 0xf)
687 #else
688 #define OP0 (((b0) & 0xf))
689 #define OP1 (((b2) & 0xf))
690 #define OP2 (((b2) & 0xf0) >> 4)
691 #define RRR_R (((b1) & 0xf0) >> 4)
692 #define RRR_S (((b1) & 0xf))
693 #define RRR_T (((b0) & 0xf0) >> 4)
694 #endif
695 #define RRR_X ((RRR_R & 0x4) >> 2)
696 #define RRR_Y ((RRR_T & 0x4) >> 2)
697 #define RRR_W (RRR_R & 0x3)
699 #define RRRN_R RRR_R
700 #define RRRN_S RRR_S
701 #define RRRN_T RRR_T
703 #define RRI8_R RRR_R
704 #define RRI8_S RRR_S
705 #define RRI8_T RRR_T
706 #define RRI8_IMM8 (b2)
707 #define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8)
709 #ifdef TARGET_WORDS_BIGENDIAN
710 #define RI16_IMM16 (((b1) << 8) | (b2))
711 #else
712 #define RI16_IMM16 (((b2) << 8) | (b1))
713 #endif
715 #ifdef TARGET_WORDS_BIGENDIAN
716 #define CALL_N (((b0) & 0xc) >> 2)
717 #define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2))
718 #else
719 #define CALL_N (((b0) & 0x30) >> 4)
720 #define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10))
721 #endif
722 #define CALL_OFFSET_SE \
723 (((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET)
725 #define CALLX_N CALL_N
726 #ifdef TARGET_WORDS_BIGENDIAN
727 #define CALLX_M ((b0) & 0x3)
728 #else
729 #define CALLX_M (((b0) & 0xc0) >> 6)
730 #endif
731 #define CALLX_S RRR_S
733 #define BRI12_M CALLX_M
734 #define BRI12_S RRR_S
735 #ifdef TARGET_WORDS_BIGENDIAN
736 #define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2))
737 #else
738 #define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4))
739 #endif
740 #define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12)
742 #define BRI8_M BRI12_M
743 #define BRI8_R RRI8_R
744 #define BRI8_S RRI8_S
745 #define BRI8_IMM8 RRI8_IMM8
746 #define BRI8_IMM8_SE RRI8_IMM8_SE
748 #define RSR_SR (b1)
750 uint8_t b0 = ldub_code(dc->pc);
751 uint8_t b1 = ldub_code(dc->pc + 1);
752 uint8_t b2 = ldub_code(dc->pc + 2);
754 static const uint32_t B4CONST[] = {
755 0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
758 static const uint32_t B4CONSTU[] = {
759 32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
762 if (OP0 >= 8) {
763 dc->next_pc = dc->pc + 2;
764 HAS_OPTION(XTENSA_OPTION_CODE_DENSITY);
765 } else {
766 dc->next_pc = dc->pc + 3;
769 switch (OP0) {
770 case 0: /*QRST*/
771 switch (OP1) {
772 case 0: /*RST0*/
773 switch (OP2) {
774 case 0: /*ST0*/
775 if ((RRR_R & 0xc) == 0x8) {
776 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
779 switch (RRR_R) {
780 case 0: /*SNM0*/
781 switch (CALLX_M) {
782 case 0: /*ILL*/
783 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
784 break;
786 case 1: /*reserved*/
787 RESERVED();
788 break;
790 case 2: /*JR*/
791 switch (CALLX_N) {
792 case 0: /*RET*/
793 case 2: /*JX*/
794 gen_window_check1(dc, CALLX_S);
795 gen_jump(dc, cpu_R[CALLX_S]);
796 break;
798 case 1: /*RETWw*/
799 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
801 TCGv_i32 tmp = tcg_const_i32(dc->pc);
802 gen_advance_ccount(dc);
803 gen_helper_retw(tmp, tmp);
804 gen_jump(dc, tmp);
805 tcg_temp_free(tmp);
807 break;
809 case 3: /*reserved*/
810 RESERVED();
811 break;
813 break;
815 case 3: /*CALLX*/
816 gen_window_check2(dc, CALLX_S, CALLX_N << 2);
817 switch (CALLX_N) {
818 case 0: /*CALLX0*/
820 TCGv_i32 tmp = tcg_temp_new_i32();
821 tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
822 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
823 gen_jump(dc, tmp);
824 tcg_temp_free(tmp);
826 break;
828 case 1: /*CALLX4w*/
829 case 2: /*CALLX8w*/
830 case 3: /*CALLX12w*/
831 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
833 TCGv_i32 tmp = tcg_temp_new_i32();
835 tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
836 gen_callw(dc, CALLX_N, tmp);
837 tcg_temp_free(tmp);
839 break;
841 break;
843 break;
845 case 1: /*MOVSPw*/
846 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
847 gen_window_check2(dc, RRR_T, RRR_S);
849 TCGv_i32 pc = tcg_const_i32(dc->pc);
850 gen_advance_ccount(dc);
851 gen_helper_movsp(pc);
852 tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]);
853 tcg_temp_free(pc);
855 break;
857 case 2: /*SYNC*/
858 switch (RRR_T) {
859 case 0: /*ISYNC*/
860 break;
862 case 1: /*RSYNC*/
863 break;
865 case 2: /*ESYNC*/
866 break;
868 case 3: /*DSYNC*/
869 break;
871 case 8: /*EXCW*/
872 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
873 break;
875 case 12: /*MEMW*/
876 break;
878 case 13: /*EXTW*/
879 break;
881 case 15: /*NOP*/
882 break;
884 default: /*reserved*/
885 RESERVED();
886 break;
888 break;
890 case 3: /*RFEIx*/
891 switch (RRR_T) {
892 case 0: /*RFETx*/
893 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
894 switch (RRR_S) {
895 case 0: /*RFEx*/
896 gen_check_privilege(dc);
897 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
898 gen_helper_check_interrupts(cpu_env);
899 gen_jump(dc, cpu_SR[EPC1]);
900 break;
902 case 1: /*RFUEx*/
903 RESERVED();
904 break;
906 case 2: /*RFDEx*/
907 gen_check_privilege(dc);
908 gen_jump(dc, cpu_SR[
909 dc->config->ndepc ? DEPC : EPC1]);
910 break;
912 case 4: /*RFWOw*/
913 case 5: /*RFWUw*/
914 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
915 gen_check_privilege(dc);
917 TCGv_i32 tmp = tcg_const_i32(1);
919 tcg_gen_andi_i32(
920 cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
921 tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]);
923 if (RRR_S == 4) {
924 tcg_gen_andc_i32(cpu_SR[WINDOW_START],
925 cpu_SR[WINDOW_START], tmp);
926 } else {
927 tcg_gen_or_i32(cpu_SR[WINDOW_START],
928 cpu_SR[WINDOW_START], tmp);
931 gen_helper_restore_owb();
932 gen_helper_check_interrupts(cpu_env);
933 gen_jump(dc, cpu_SR[EPC1]);
935 tcg_temp_free(tmp);
937 break;
939 default: /*reserved*/
940 RESERVED();
941 break;
943 break;
945 case 1: /*RFIx*/
946 HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT);
947 if (RRR_S >= 2 && RRR_S <= dc->config->nlevel) {
948 gen_check_privilege(dc);
949 tcg_gen_mov_i32(cpu_SR[PS],
950 cpu_SR[EPS2 + RRR_S - 2]);
951 gen_helper_check_interrupts(cpu_env);
952 gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]);
953 } else {
954 qemu_log("RFI %d is illegal\n", RRR_S);
955 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
957 break;
959 case 2: /*RFME*/
960 TBD();
961 break;
963 default: /*reserved*/
964 RESERVED();
965 break;
968 break;
970 case 4: /*BREAKx*/
971 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
972 TBD();
973 break;
975 case 5: /*SYSCALLx*/
976 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
977 switch (RRR_S) {
978 case 0: /*SYSCALLx*/
979 gen_exception_cause(dc, SYSCALL_CAUSE);
980 break;
982 case 1: /*SIMCALL*/
983 if (semihosting_enabled) {
984 gen_check_privilege(dc);
985 gen_helper_simcall(cpu_env);
986 } else {
987 qemu_log("SIMCALL but semihosting is disabled\n");
988 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
990 break;
992 default:
993 RESERVED();
994 break;
996 break;
998 case 6: /*RSILx*/
999 HAS_OPTION(XTENSA_OPTION_INTERRUPT);
1000 gen_check_privilege(dc);
1001 gen_window_check1(dc, RRR_T);
1002 tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]);
1003 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL);
1004 tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S);
1005 gen_helper_check_interrupts(cpu_env);
1006 gen_jumpi_check_loop_end(dc, 0);
1007 break;
1009 case 7: /*WAITIx*/
1010 HAS_OPTION(XTENSA_OPTION_INTERRUPT);
1011 gen_check_privilege(dc);
1012 gen_waiti(dc, RRR_S);
1013 break;
1015 case 8: /*ANY4p*/
1016 case 9: /*ALL4p*/
1017 case 10: /*ANY8p*/
1018 case 11: /*ALL8p*/
1019 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1021 const unsigned shift = (RRR_R & 2) ? 8 : 4;
1022 TCGv_i32 mask = tcg_const_i32(
1023 ((1 << shift) - 1) << RRR_S);
1024 TCGv_i32 tmp = tcg_temp_new_i32();
1026 tcg_gen_and_i32(tmp, cpu_SR[BR], mask);
1027 if (RRR_R & 1) { /*ALL*/
1028 tcg_gen_addi_i32(tmp, tmp, 1 << RRR_S);
1029 } else { /*ANY*/
1030 tcg_gen_add_i32(tmp, tmp, mask);
1032 tcg_gen_shri_i32(tmp, tmp, RRR_S + shift);
1033 tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR],
1034 tmp, RRR_T, 1);
1035 tcg_temp_free(mask);
1036 tcg_temp_free(tmp);
1038 break;
1040 default: /*reserved*/
1041 RESERVED();
1042 break;
1045 break;
1047 case 1: /*AND*/
1048 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1049 tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1050 break;
1052 case 2: /*OR*/
1053 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1054 tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1055 break;
1057 case 3: /*XOR*/
1058 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1059 tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1060 break;
1062 case 4: /*ST1*/
1063 switch (RRR_R) {
1064 case 0: /*SSR*/
1065 gen_window_check1(dc, RRR_S);
1066 gen_right_shift_sar(dc, cpu_R[RRR_S]);
1067 break;
1069 case 1: /*SSL*/
1070 gen_window_check1(dc, RRR_S);
1071 gen_left_shift_sar(dc, cpu_R[RRR_S]);
1072 break;
1074 case 2: /*SSA8L*/
1075 gen_window_check1(dc, RRR_S);
1077 TCGv_i32 tmp = tcg_temp_new_i32();
1078 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
1079 gen_right_shift_sar(dc, tmp);
1080 tcg_temp_free(tmp);
1082 break;
1084 case 3: /*SSA8B*/
1085 gen_window_check1(dc, RRR_S);
1087 TCGv_i32 tmp = tcg_temp_new_i32();
1088 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
1089 gen_left_shift_sar(dc, tmp);
1090 tcg_temp_free(tmp);
1092 break;
1094 case 4: /*SSAI*/
1096 TCGv_i32 tmp = tcg_const_i32(
1097 RRR_S | ((RRR_T & 1) << 4));
1098 gen_right_shift_sar(dc, tmp);
1099 tcg_temp_free(tmp);
1101 break;
1103 case 6: /*RER*/
1104 TBD();
1105 break;
1107 case 7: /*WER*/
1108 TBD();
1109 break;
1111 case 8: /*ROTWw*/
1112 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1113 gen_check_privilege(dc);
1115 TCGv_i32 tmp = tcg_const_i32(
1116 RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0));
1117 gen_helper_rotw(tmp);
1118 tcg_temp_free(tmp);
1119 reset_used_window(dc);
1121 break;
1123 case 14: /*NSAu*/
1124 HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
1125 gen_window_check2(dc, RRR_S, RRR_T);
1126 gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]);
1127 break;
1129 case 15: /*NSAUu*/
1130 HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
1131 gen_window_check2(dc, RRR_S, RRR_T);
1132 gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]);
1133 break;
1135 default: /*reserved*/
1136 RESERVED();
1137 break;
1139 break;
1141 case 5: /*TLB*/
1142 HAS_OPTION_BITS(
1143 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) |
1144 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
1145 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION));
1146 gen_check_privilege(dc);
1147 gen_window_check2(dc, RRR_S, RRR_T);
1149 TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0);
1151 switch (RRR_R & 7) {
1152 case 3: /*RITLB0*/ /*RDTLB0*/
1153 gen_helper_rtlb0(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1154 break;
1156 case 4: /*IITLB*/ /*IDTLB*/
1157 gen_helper_itlb(cpu_R[RRR_S], dtlb);
1158 /* This could change memory mapping, so exit tb */
1159 gen_jumpi_check_loop_end(dc, -1);
1160 break;
1162 case 5: /*PITLB*/ /*PDTLB*/
1163 tcg_gen_movi_i32(cpu_pc, dc->pc);
1164 gen_helper_ptlb(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1165 break;
1167 case 6: /*WITLB*/ /*WDTLB*/
1168 gen_helper_wtlb(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1169 /* This could change memory mapping, so exit tb */
1170 gen_jumpi_check_loop_end(dc, -1);
1171 break;
1173 case 7: /*RITLB1*/ /*RDTLB1*/
1174 gen_helper_rtlb1(cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1175 break;
1177 default:
1178 tcg_temp_free(dtlb);
1179 RESERVED();
1180 break;
1182 tcg_temp_free(dtlb);
1184 break;
1186 case 6: /*RT0*/
1187 gen_window_check2(dc, RRR_R, RRR_T);
1188 switch (RRR_S) {
1189 case 0: /*NEG*/
1190 tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1191 break;
1193 case 1: /*ABS*/
1195 int label = gen_new_label();
1196 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1197 tcg_gen_brcondi_i32(
1198 TCG_COND_GE, cpu_R[RRR_R], 0, label);
1199 tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1200 gen_set_label(label);
1202 break;
1204 default: /*reserved*/
1205 RESERVED();
1206 break;
1208 break;
1210 case 7: /*reserved*/
1211 RESERVED();
1212 break;
1214 case 8: /*ADD*/
1215 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1216 tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1217 break;
1219 case 9: /*ADD**/
1220 case 10:
1221 case 11:
1222 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1224 TCGv_i32 tmp = tcg_temp_new_i32();
1225 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8);
1226 tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
1227 tcg_temp_free(tmp);
1229 break;
1231 case 12: /*SUB*/
1232 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1233 tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1234 break;
1236 case 13: /*SUB**/
1237 case 14:
1238 case 15:
1239 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1241 TCGv_i32 tmp = tcg_temp_new_i32();
1242 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12);
1243 tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
1244 tcg_temp_free(tmp);
1246 break;
1248 break;
1250 case 1: /*RST1*/
1251 switch (OP2) {
1252 case 0: /*SLLI*/
1253 case 1:
1254 gen_window_check2(dc, RRR_R, RRR_S);
1255 tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S],
1256 32 - (RRR_T | ((OP2 & 1) << 4)));
1257 break;
1259 case 2: /*SRAI*/
1260 case 3:
1261 gen_window_check2(dc, RRR_R, RRR_T);
1262 tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T],
1263 RRR_S | ((OP2 & 1) << 4));
1264 break;
1266 case 4: /*SRLI*/
1267 gen_window_check2(dc, RRR_R, RRR_T);
1268 tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S);
1269 break;
1271 case 6: /*XSR*/
1273 TCGv_i32 tmp = tcg_temp_new_i32();
1274 if (RSR_SR >= 64) {
1275 gen_check_privilege(dc);
1277 gen_window_check1(dc, RRR_T);
1278 tcg_gen_mov_i32(tmp, cpu_R[RRR_T]);
1279 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
1280 gen_wsr(dc, RSR_SR, tmp);
1281 tcg_temp_free(tmp);
1282 if (!sregnames[RSR_SR]) {
1283 TBD();
1286 break;
1289 * Note: 64 bit ops are used here solely because SAR values
1290 * have range 0..63
1292 #define gen_shift_reg(cmd, reg) do { \
1293 TCGv_i64 tmp = tcg_temp_new_i64(); \
1294 tcg_gen_extu_i32_i64(tmp, reg); \
1295 tcg_gen_##cmd##_i64(v, v, tmp); \
1296 tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \
1297 tcg_temp_free_i64(v); \
1298 tcg_temp_free_i64(tmp); \
1299 } while (0)
1301 #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
1303 case 8: /*SRC*/
1304 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1306 TCGv_i64 v = tcg_temp_new_i64();
1307 tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]);
1308 gen_shift(shr);
1310 break;
1312 case 9: /*SRL*/
1313 gen_window_check2(dc, RRR_R, RRR_T);
1314 if (dc->sar_5bit) {
1315 tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
1316 } else {
1317 TCGv_i64 v = tcg_temp_new_i64();
1318 tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]);
1319 gen_shift(shr);
1321 break;
1323 case 10: /*SLL*/
1324 gen_window_check2(dc, RRR_R, RRR_S);
1325 if (dc->sar_m32_5bit) {
1326 tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32);
1327 } else {
1328 TCGv_i64 v = tcg_temp_new_i64();
1329 TCGv_i32 s = tcg_const_i32(32);
1330 tcg_gen_sub_i32(s, s, cpu_SR[SAR]);
1331 tcg_gen_andi_i32(s, s, 0x3f);
1332 tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]);
1333 gen_shift_reg(shl, s);
1334 tcg_temp_free(s);
1336 break;
1338 case 11: /*SRA*/
1339 gen_window_check2(dc, RRR_R, RRR_T);
1340 if (dc->sar_5bit) {
1341 tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
1342 } else {
1343 TCGv_i64 v = tcg_temp_new_i64();
1344 tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]);
1345 gen_shift(sar);
1347 break;
1348 #undef gen_shift
1349 #undef gen_shift_reg
1351 case 12: /*MUL16U*/
1352 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
1353 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1355 TCGv_i32 v1 = tcg_temp_new_i32();
1356 TCGv_i32 v2 = tcg_temp_new_i32();
1357 tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]);
1358 tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]);
1359 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
1360 tcg_temp_free(v2);
1361 tcg_temp_free(v1);
1363 break;
1365 case 13: /*MUL16S*/
1366 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
1367 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1369 TCGv_i32 v1 = tcg_temp_new_i32();
1370 TCGv_i32 v2 = tcg_temp_new_i32();
1371 tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]);
1372 tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]);
1373 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
1374 tcg_temp_free(v2);
1375 tcg_temp_free(v1);
1377 break;
1379 default: /*reserved*/
1380 RESERVED();
1381 break;
1383 break;
1385 case 2: /*RST2*/
1386 if (OP2 >= 8) {
1387 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1390 if (OP2 >= 12) {
1391 HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV);
1392 int label = gen_new_label();
1393 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label);
1394 gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE);
1395 gen_set_label(label);
1398 switch (OP2) {
1399 #define BOOLEAN_LOGIC(fn, r, s, t) \
1400 do { \
1401 HAS_OPTION(XTENSA_OPTION_BOOLEAN); \
1402 TCGv_i32 tmp1 = tcg_temp_new_i32(); \
1403 TCGv_i32 tmp2 = tcg_temp_new_i32(); \
1405 tcg_gen_shri_i32(tmp1, cpu_SR[BR], s); \
1406 tcg_gen_shri_i32(tmp2, cpu_SR[BR], t); \
1407 tcg_gen_##fn##_i32(tmp1, tmp1, tmp2); \
1408 tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp1, r, 1); \
1409 tcg_temp_free(tmp1); \
1410 tcg_temp_free(tmp2); \
1411 } while (0)
1413 case 0: /*ANDBp*/
1414 BOOLEAN_LOGIC(and, RRR_R, RRR_S, RRR_T);
1415 break;
1417 case 1: /*ANDBCp*/
1418 BOOLEAN_LOGIC(andc, RRR_R, RRR_S, RRR_T);
1419 break;
1421 case 2: /*ORBp*/
1422 BOOLEAN_LOGIC(or, RRR_R, RRR_S, RRR_T);
1423 break;
1425 case 3: /*ORBCp*/
1426 BOOLEAN_LOGIC(orc, RRR_R, RRR_S, RRR_T);
1427 break;
1429 case 4: /*XORBp*/
1430 BOOLEAN_LOGIC(xor, RRR_R, RRR_S, RRR_T);
1431 break;
1433 #undef BOOLEAN_LOGIC
1435 case 8: /*MULLi*/
1436 HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL);
1437 tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1438 break;
1440 case 10: /*MULUHi*/
1441 case 11: /*MULSHi*/
1442 HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL_HIGH);
1444 TCGv_i64 r = tcg_temp_new_i64();
1445 TCGv_i64 s = tcg_temp_new_i64();
1446 TCGv_i64 t = tcg_temp_new_i64();
1448 if (OP2 == 10) {
1449 tcg_gen_extu_i32_i64(s, cpu_R[RRR_S]);
1450 tcg_gen_extu_i32_i64(t, cpu_R[RRR_T]);
1451 } else {
1452 tcg_gen_ext_i32_i64(s, cpu_R[RRR_S]);
1453 tcg_gen_ext_i32_i64(t, cpu_R[RRR_T]);
1455 tcg_gen_mul_i64(r, s, t);
1456 tcg_gen_shri_i64(r, r, 32);
1457 tcg_gen_trunc_i64_i32(cpu_R[RRR_R], r);
1459 tcg_temp_free_i64(r);
1460 tcg_temp_free_i64(s);
1461 tcg_temp_free_i64(t);
1463 break;
1465 case 12: /*QUOUi*/
1466 tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1467 break;
1469 case 13: /*QUOSi*/
1470 case 15: /*REMSi*/
1472 int label1 = gen_new_label();
1473 int label2 = gen_new_label();
1475 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000,
1476 label1);
1477 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff,
1478 label1);
1479 tcg_gen_movi_i32(cpu_R[RRR_R],
1480 OP2 == 13 ? 0x80000000 : 0);
1481 tcg_gen_br(label2);
1482 gen_set_label(label1);
1483 if (OP2 == 13) {
1484 tcg_gen_div_i32(cpu_R[RRR_R],
1485 cpu_R[RRR_S], cpu_R[RRR_T]);
1486 } else {
1487 tcg_gen_rem_i32(cpu_R[RRR_R],
1488 cpu_R[RRR_S], cpu_R[RRR_T]);
1490 gen_set_label(label2);
1492 break;
1494 case 14: /*REMUi*/
1495 tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1496 break;
1498 default: /*reserved*/
1499 RESERVED();
1500 break;
1502 break;
1504 case 3: /*RST3*/
1505 switch (OP2) {
1506 case 0: /*RSR*/
1507 if (RSR_SR >= 64) {
1508 gen_check_privilege(dc);
1510 gen_window_check1(dc, RRR_T);
1511 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
1512 if (!sregnames[RSR_SR]) {
1513 TBD();
1515 break;
1517 case 1: /*WSR*/
1518 if (RSR_SR >= 64) {
1519 gen_check_privilege(dc);
1521 gen_window_check1(dc, RRR_T);
1522 gen_wsr(dc, RSR_SR, cpu_R[RRR_T]);
1523 if (!sregnames[RSR_SR]) {
1524 TBD();
1526 break;
1528 case 2: /*SEXTu*/
1529 HAS_OPTION(XTENSA_OPTION_MISC_OP_SEXT);
1530 gen_window_check2(dc, RRR_R, RRR_S);
1532 int shift = 24 - RRR_T;
1534 if (shift == 24) {
1535 tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1536 } else if (shift == 16) {
1537 tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1538 } else {
1539 TCGv_i32 tmp = tcg_temp_new_i32();
1540 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift);
1541 tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift);
1542 tcg_temp_free(tmp);
1545 break;
1547 case 3: /*CLAMPSu*/
1548 HAS_OPTION(XTENSA_OPTION_MISC_OP_CLAMPS);
1549 gen_window_check2(dc, RRR_R, RRR_S);
1551 TCGv_i32 tmp1 = tcg_temp_new_i32();
1552 TCGv_i32 tmp2 = tcg_temp_new_i32();
1553 int label = gen_new_label();
1555 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T);
1556 tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]);
1557 tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7));
1558 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1559 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp2, 0, label);
1561 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31);
1562 tcg_gen_xori_i32(cpu_R[RRR_R], tmp1,
1563 0xffffffff >> (25 - RRR_T));
1565 gen_set_label(label);
1567 tcg_temp_free(tmp1);
1568 tcg_temp_free(tmp2);
1570 break;
1572 case 4: /*MINu*/
1573 case 5: /*MAXu*/
1574 case 6: /*MINUu*/
1575 case 7: /*MAXUu*/
1576 HAS_OPTION(XTENSA_OPTION_MISC_OP_MINMAX);
1577 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1579 static const TCGCond cond[] = {
1580 TCG_COND_LE,
1581 TCG_COND_GE,
1582 TCG_COND_LEU,
1583 TCG_COND_GEU
1585 int label = gen_new_label();
1587 if (RRR_R != RRR_T) {
1588 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1589 tcg_gen_brcond_i32(cond[OP2 - 4],
1590 cpu_R[RRR_S], cpu_R[RRR_T], label);
1591 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1592 } else {
1593 tcg_gen_brcond_i32(cond[OP2 - 4],
1594 cpu_R[RRR_T], cpu_R[RRR_S], label);
1595 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1597 gen_set_label(label);
1599 break;
1601 case 8: /*MOVEQZ*/
1602 case 9: /*MOVNEZ*/
1603 case 10: /*MOVLTZ*/
1604 case 11: /*MOVGEZ*/
1605 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1607 static const TCGCond cond[] = {
1608 TCG_COND_NE,
1609 TCG_COND_EQ,
1610 TCG_COND_GE,
1611 TCG_COND_LT
1613 int label = gen_new_label();
1614 tcg_gen_brcondi_i32(cond[OP2 - 8], cpu_R[RRR_T], 0, label);
1615 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1616 gen_set_label(label);
1618 break;
1620 case 12: /*MOVFp*/
1621 case 13: /*MOVTp*/
1622 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1623 gen_window_check2(dc, RRR_R, RRR_S);
1625 int label = gen_new_label();
1626 TCGv_i32 tmp = tcg_temp_new_i32();
1628 tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
1629 tcg_gen_brcondi_i32(
1630 OP2 & 1 ? TCG_COND_EQ : TCG_COND_NE,
1631 tmp, 0, label);
1632 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1633 gen_set_label(label);
1634 tcg_temp_free(tmp);
1636 break;
1638 case 14: /*RUR*/
1639 gen_window_check1(dc, RRR_R);
1641 int st = (RRR_S << 4) + RRR_T;
1642 if (uregnames[st]) {
1643 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]);
1644 } else {
1645 qemu_log("RUR %d not implemented, ", st);
1646 TBD();
1649 break;
1651 case 15: /*WUR*/
1652 gen_window_check1(dc, RRR_T);
1654 if (uregnames[RSR_SR]) {
1655 tcg_gen_mov_i32(cpu_UR[RSR_SR], cpu_R[RRR_T]);
1656 } else {
1657 qemu_log("WUR %d not implemented, ", RSR_SR);
1658 TBD();
1661 break;
1664 break;
1666 case 4: /*EXTUI*/
1667 case 5:
1668 gen_window_check2(dc, RRR_R, RRR_T);
1670 int shiftimm = RRR_S | (OP1 << 4);
1671 int maskimm = (1 << (OP2 + 1)) - 1;
1673 TCGv_i32 tmp = tcg_temp_new_i32();
1674 tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm);
1675 tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm);
1676 tcg_temp_free(tmp);
1678 break;
1680 case 6: /*CUST0*/
1681 RESERVED();
1682 break;
1684 case 7: /*CUST1*/
1685 RESERVED();
1686 break;
1688 case 8: /*LSCXp*/
1689 HAS_OPTION(XTENSA_OPTION_COPROCESSOR);
1690 TBD();
1691 break;
1693 case 9: /*LSC4*/
1694 gen_window_check2(dc, RRR_S, RRR_T);
1695 switch (OP2) {
1696 case 0: /*L32E*/
1697 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1698 gen_check_privilege(dc);
1700 TCGv_i32 addr = tcg_temp_new_i32();
1701 tcg_gen_addi_i32(addr, cpu_R[RRR_S],
1702 (0xffffffc0 | (RRR_R << 2)));
1703 tcg_gen_qemu_ld32u(cpu_R[RRR_T], addr, dc->ring);
1704 tcg_temp_free(addr);
1706 break;
1708 case 4: /*S32E*/
1709 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1710 gen_check_privilege(dc);
1712 TCGv_i32 addr = tcg_temp_new_i32();
1713 tcg_gen_addi_i32(addr, cpu_R[RRR_S],
1714 (0xffffffc0 | (RRR_R << 2)));
1715 tcg_gen_qemu_st32(cpu_R[RRR_T], addr, dc->ring);
1716 tcg_temp_free(addr);
1718 break;
1720 default:
1721 RESERVED();
1722 break;
1724 break;
1726 case 10: /*FP0*/
1727 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1728 TBD();
1729 break;
1731 case 11: /*FP1*/
1732 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1733 TBD();
1734 break;
1736 default: /*reserved*/
1737 RESERVED();
1738 break;
1740 break;
1742 case 1: /*L32R*/
1743 gen_window_check1(dc, RRR_T);
1745 TCGv_i32 tmp = tcg_const_i32(
1746 ((dc->tb->flags & XTENSA_TBFLAG_LITBASE) ?
1747 0 : ((dc->pc + 3) & ~3)) +
1748 (0xfffc0000 | (RI16_IMM16 << 2)));
1750 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
1751 tcg_gen_add_i32(tmp, tmp, dc->litbase);
1753 tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring);
1754 tcg_temp_free(tmp);
1756 break;
1758 case 2: /*LSAI*/
1759 #define gen_load_store(type, shift) do { \
1760 TCGv_i32 addr = tcg_temp_new_i32(); \
1761 gen_window_check2(dc, RRI8_S, RRI8_T); \
1762 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \
1763 if (shift) { \
1764 gen_load_store_alignment(dc, shift, addr, false); \
1766 tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
1767 tcg_temp_free(addr); \
1768 } while (0)
1770 switch (RRI8_R) {
1771 case 0: /*L8UI*/
1772 gen_load_store(ld8u, 0);
1773 break;
1775 case 1: /*L16UI*/
1776 gen_load_store(ld16u, 1);
1777 break;
1779 case 2: /*L32I*/
1780 gen_load_store(ld32u, 2);
1781 break;
1783 case 4: /*S8I*/
1784 gen_load_store(st8, 0);
1785 break;
1787 case 5: /*S16I*/
1788 gen_load_store(st16, 1);
1789 break;
1791 case 6: /*S32I*/
1792 gen_load_store(st32, 2);
1793 break;
1795 case 7: /*CACHEc*/
1796 if (RRI8_T < 8) {
1797 HAS_OPTION(XTENSA_OPTION_DCACHE);
1800 switch (RRI8_T) {
1801 case 0: /*DPFRc*/
1802 break;
1804 case 1: /*DPFWc*/
1805 break;
1807 case 2: /*DPFROc*/
1808 break;
1810 case 3: /*DPFWOc*/
1811 break;
1813 case 4: /*DHWBc*/
1814 break;
1816 case 5: /*DHWBIc*/
1817 break;
1819 case 6: /*DHIc*/
1820 break;
1822 case 7: /*DIIc*/
1823 break;
1825 case 8: /*DCEc*/
1826 switch (OP1) {
1827 case 0: /*DPFLl*/
1828 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
1829 break;
1831 case 2: /*DHUl*/
1832 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
1833 break;
1835 case 3: /*DIUl*/
1836 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
1837 break;
1839 case 4: /*DIWBc*/
1840 HAS_OPTION(XTENSA_OPTION_DCACHE);
1841 break;
1843 case 5: /*DIWBIc*/
1844 HAS_OPTION(XTENSA_OPTION_DCACHE);
1845 break;
1847 default: /*reserved*/
1848 RESERVED();
1849 break;
1852 break;
1854 case 12: /*IPFc*/
1855 HAS_OPTION(XTENSA_OPTION_ICACHE);
1856 break;
1858 case 13: /*ICEc*/
1859 switch (OP1) {
1860 case 0: /*IPFLl*/
1861 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
1862 break;
1864 case 2: /*IHUl*/
1865 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
1866 break;
1868 case 3: /*IIUl*/
1869 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
1870 break;
1872 default: /*reserved*/
1873 RESERVED();
1874 break;
1876 break;
1878 case 14: /*IHIc*/
1879 HAS_OPTION(XTENSA_OPTION_ICACHE);
1880 break;
1882 case 15: /*IIIc*/
1883 HAS_OPTION(XTENSA_OPTION_ICACHE);
1884 break;
1886 default: /*reserved*/
1887 RESERVED();
1888 break;
1890 break;
1892 case 9: /*L16SI*/
1893 gen_load_store(ld16s, 1);
1894 break;
1895 #undef gen_load_store
1897 case 10: /*MOVI*/
1898 gen_window_check1(dc, RRI8_T);
1899 tcg_gen_movi_i32(cpu_R[RRI8_T],
1900 RRI8_IMM8 | (RRI8_S << 8) |
1901 ((RRI8_S & 0x8) ? 0xfffff000 : 0));
1902 break;
1904 #define gen_load_store_no_hw_align(type) do { \
1905 TCGv_i32 addr = tcg_temp_local_new_i32(); \
1906 gen_window_check2(dc, RRI8_S, RRI8_T); \
1907 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); \
1908 gen_load_store_alignment(dc, 2, addr, true); \
1909 tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
1910 tcg_temp_free(addr); \
1911 } while (0)
1913 case 11: /*L32AIy*/
1914 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
1915 gen_load_store_no_hw_align(ld32u); /*TODO acquire?*/
1916 break;
1918 case 12: /*ADDI*/
1919 gen_window_check2(dc, RRI8_S, RRI8_T);
1920 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE);
1921 break;
1923 case 13: /*ADDMI*/
1924 gen_window_check2(dc, RRI8_S, RRI8_T);
1925 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE << 8);
1926 break;
1928 case 14: /*S32C1Iy*/
1929 HAS_OPTION(XTENSA_OPTION_CONDITIONAL_STORE);
1930 gen_window_check2(dc, RRI8_S, RRI8_T);
1932 int label = gen_new_label();
1933 TCGv_i32 tmp = tcg_temp_local_new_i32();
1934 TCGv_i32 addr = tcg_temp_local_new_i32();
1936 tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]);
1937 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
1938 gen_load_store_alignment(dc, 2, addr, true);
1939 tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring);
1940 tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T],
1941 cpu_SR[SCOMPARE1], label);
1943 tcg_gen_qemu_st32(tmp, addr, dc->cring);
1945 gen_set_label(label);
1946 tcg_temp_free(addr);
1947 tcg_temp_free(tmp);
1949 break;
1951 case 15: /*S32RIy*/
1952 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
1953 gen_load_store_no_hw_align(st32); /*TODO release?*/
1954 break;
1955 #undef gen_load_store_no_hw_align
1957 default: /*reserved*/
1958 RESERVED();
1959 break;
1961 break;
1963 case 3: /*LSCIp*/
1964 HAS_OPTION(XTENSA_OPTION_COPROCESSOR);
1965 TBD();
1966 break;
1968 case 4: /*MAC16d*/
1969 HAS_OPTION(XTENSA_OPTION_MAC16);
1971 enum {
1972 MAC16_UMUL = 0x0,
1973 MAC16_MUL = 0x4,
1974 MAC16_MULA = 0x8,
1975 MAC16_MULS = 0xc,
1976 MAC16_NONE = 0xf,
1977 } op = OP1 & 0xc;
1978 bool is_m1_sr = (OP2 & 0x3) == 2;
1979 bool is_m2_sr = (OP2 & 0xc) == 0;
1980 uint32_t ld_offset = 0;
1982 if (OP2 > 9) {
1983 RESERVED();
1986 switch (OP2 & 2) {
1987 case 0: /*MACI?/MACC?*/
1988 is_m1_sr = true;
1989 ld_offset = (OP2 & 1) ? -4 : 4;
1991 if (OP2 >= 8) { /*MACI/MACC*/
1992 if (OP1 == 0) { /*LDINC/LDDEC*/
1993 op = MAC16_NONE;
1994 } else {
1995 RESERVED();
1997 } else if (op != MAC16_MULA) { /*MULA.*.*.LDINC/LDDEC*/
1998 RESERVED();
2000 break;
2002 case 2: /*MACD?/MACA?*/
2003 if (op == MAC16_UMUL && OP2 != 7) { /*UMUL only in MACAA*/
2004 RESERVED();
2006 break;
2009 if (op != MAC16_NONE) {
2010 if (!is_m1_sr) {
2011 gen_window_check1(dc, RRR_S);
2013 if (!is_m2_sr) {
2014 gen_window_check1(dc, RRR_T);
2019 TCGv_i32 vaddr = tcg_temp_new_i32();
2020 TCGv_i32 mem32 = tcg_temp_new_i32();
2022 if (ld_offset) {
2023 gen_window_check1(dc, RRR_S);
2024 tcg_gen_addi_i32(vaddr, cpu_R[RRR_S], ld_offset);
2025 gen_load_store_alignment(dc, 2, vaddr, false);
2026 tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring);
2028 if (op != MAC16_NONE) {
2029 TCGv_i32 m1 = gen_mac16_m(
2030 is_m1_sr ? cpu_SR[MR + RRR_X] : cpu_R[RRR_S],
2031 OP1 & 1, op == MAC16_UMUL);
2032 TCGv_i32 m2 = gen_mac16_m(
2033 is_m2_sr ? cpu_SR[MR + 2 + RRR_Y] : cpu_R[RRR_T],
2034 OP1 & 2, op == MAC16_UMUL);
2036 if (op == MAC16_MUL || op == MAC16_UMUL) {
2037 tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2);
2038 if (op == MAC16_UMUL) {
2039 tcg_gen_movi_i32(cpu_SR[ACCHI], 0);
2040 } else {
2041 tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31);
2043 } else {
2044 TCGv_i32 res = tcg_temp_new_i32();
2045 TCGv_i64 res64 = tcg_temp_new_i64();
2046 TCGv_i64 tmp = tcg_temp_new_i64();
2048 tcg_gen_mul_i32(res, m1, m2);
2049 tcg_gen_ext_i32_i64(res64, res);
2050 tcg_gen_concat_i32_i64(tmp,
2051 cpu_SR[ACCLO], cpu_SR[ACCHI]);
2052 if (op == MAC16_MULA) {
2053 tcg_gen_add_i64(tmp, tmp, res64);
2054 } else {
2055 tcg_gen_sub_i64(tmp, tmp, res64);
2057 tcg_gen_trunc_i64_i32(cpu_SR[ACCLO], tmp);
2058 tcg_gen_shri_i64(tmp, tmp, 32);
2059 tcg_gen_trunc_i64_i32(cpu_SR[ACCHI], tmp);
2060 tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]);
2062 tcg_temp_free(res);
2063 tcg_temp_free_i64(res64);
2064 tcg_temp_free_i64(tmp);
2066 tcg_temp_free(m1);
2067 tcg_temp_free(m2);
2069 if (ld_offset) {
2070 tcg_gen_mov_i32(cpu_R[RRR_S], vaddr);
2071 tcg_gen_mov_i32(cpu_SR[MR + RRR_W], mem32);
2073 tcg_temp_free(vaddr);
2074 tcg_temp_free(mem32);
2077 break;
2079 case 5: /*CALLN*/
2080 switch (CALL_N) {
2081 case 0: /*CALL0*/
2082 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
2083 gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
2084 break;
2086 case 1: /*CALL4w*/
2087 case 2: /*CALL8w*/
2088 case 3: /*CALL12w*/
2089 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2090 gen_window_check1(dc, CALL_N << 2);
2091 gen_callwi(dc, CALL_N,
2092 (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
2093 break;
2095 break;
2097 case 6: /*SI*/
2098 switch (CALL_N) {
2099 case 0: /*J*/
2100 gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0);
2101 break;
2103 case 1: /*BZ*/
2104 gen_window_check1(dc, BRI12_S);
2106 static const TCGCond cond[] = {
2107 TCG_COND_EQ, /*BEQZ*/
2108 TCG_COND_NE, /*BNEZ*/
2109 TCG_COND_LT, /*BLTZ*/
2110 TCG_COND_GE, /*BGEZ*/
2113 gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0,
2114 4 + BRI12_IMM12_SE);
2116 break;
2118 case 2: /*BI0*/
2119 gen_window_check1(dc, BRI8_S);
2121 static const TCGCond cond[] = {
2122 TCG_COND_EQ, /*BEQI*/
2123 TCG_COND_NE, /*BNEI*/
2124 TCG_COND_LT, /*BLTI*/
2125 TCG_COND_GE, /*BGEI*/
2128 gen_brcondi(dc, cond[BRI8_M & 3],
2129 cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE);
2131 break;
2133 case 3: /*BI1*/
2134 switch (BRI8_M) {
2135 case 0: /*ENTRYw*/
2136 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2138 TCGv_i32 pc = tcg_const_i32(dc->pc);
2139 TCGv_i32 s = tcg_const_i32(BRI12_S);
2140 TCGv_i32 imm = tcg_const_i32(BRI12_IMM12);
2141 gen_advance_ccount(dc);
2142 gen_helper_entry(pc, s, imm);
2143 tcg_temp_free(imm);
2144 tcg_temp_free(s);
2145 tcg_temp_free(pc);
2146 reset_used_window(dc);
2148 break;
2150 case 1: /*B1*/
2151 switch (BRI8_R) {
2152 case 0: /*BFp*/
2153 case 1: /*BTp*/
2154 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
2156 TCGv_i32 tmp = tcg_temp_new_i32();
2157 tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRI8_S);
2158 gen_brcondi(dc,
2159 BRI8_R == 1 ? TCG_COND_NE : TCG_COND_EQ,
2160 tmp, 0, 4 + RRI8_IMM8_SE);
2161 tcg_temp_free(tmp);
2163 break;
2165 case 8: /*LOOP*/
2166 case 9: /*LOOPNEZ*/
2167 case 10: /*LOOPGTZ*/
2168 HAS_OPTION(XTENSA_OPTION_LOOP);
2169 gen_window_check1(dc, RRI8_S);
2171 uint32_t lend = dc->pc + RRI8_IMM8 + 4;
2172 TCGv_i32 tmp = tcg_const_i32(lend);
2174 tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1);
2175 tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc);
2176 gen_wsr_lend(dc, LEND, tmp);
2177 tcg_temp_free(tmp);
2179 if (BRI8_R > 8) {
2180 int label = gen_new_label();
2181 tcg_gen_brcondi_i32(
2182 BRI8_R == 9 ? TCG_COND_NE : TCG_COND_GT,
2183 cpu_R[RRI8_S], 0, label);
2184 gen_jumpi(dc, lend, 1);
2185 gen_set_label(label);
2188 gen_jumpi(dc, dc->next_pc, 0);
2190 break;
2192 default: /*reserved*/
2193 RESERVED();
2194 break;
2197 break;
2199 case 2: /*BLTUI*/
2200 case 3: /*BGEUI*/
2201 gen_window_check1(dc, BRI8_S);
2202 gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU,
2203 cpu_R[BRI8_S], B4CONSTU[BRI8_R], 4 + BRI8_IMM8_SE);
2204 break;
2206 break;
2209 break;
2211 case 7: /*B*/
2213 TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ;
2215 switch (RRI8_R & 7) {
2216 case 0: /*BNONE*/ /*BANY*/
2217 gen_window_check2(dc, RRI8_S, RRI8_T);
2219 TCGv_i32 tmp = tcg_temp_new_i32();
2220 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
2221 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2222 tcg_temp_free(tmp);
2224 break;
2226 case 1: /*BEQ*/ /*BNE*/
2227 case 2: /*BLT*/ /*BGE*/
2228 case 3: /*BLTU*/ /*BGEU*/
2229 gen_window_check2(dc, RRI8_S, RRI8_T);
2231 static const TCGCond cond[] = {
2232 [1] = TCG_COND_EQ,
2233 [2] = TCG_COND_LT,
2234 [3] = TCG_COND_LTU,
2235 [9] = TCG_COND_NE,
2236 [10] = TCG_COND_GE,
2237 [11] = TCG_COND_GEU,
2239 gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T],
2240 4 + RRI8_IMM8_SE);
2242 break;
2244 case 4: /*BALL*/ /*BNALL*/
2245 gen_window_check2(dc, RRI8_S, RRI8_T);
2247 TCGv_i32 tmp = tcg_temp_new_i32();
2248 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
2249 gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T],
2250 4 + RRI8_IMM8_SE);
2251 tcg_temp_free(tmp);
2253 break;
2255 case 5: /*BBC*/ /*BBS*/
2256 gen_window_check2(dc, RRI8_S, RRI8_T);
2258 TCGv_i32 bit = tcg_const_i32(1);
2259 TCGv_i32 tmp = tcg_temp_new_i32();
2260 tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f);
2261 tcg_gen_shl_i32(bit, bit, tmp);
2262 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit);
2263 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2264 tcg_temp_free(tmp);
2265 tcg_temp_free(bit);
2267 break;
2269 case 6: /*BBCI*/ /*BBSI*/
2270 case 7:
2271 gen_window_check1(dc, RRI8_S);
2273 TCGv_i32 tmp = tcg_temp_new_i32();
2274 tcg_gen_andi_i32(tmp, cpu_R[RRI8_S],
2275 1 << (((RRI8_R & 1) << 4) | RRI8_T));
2276 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2277 tcg_temp_free(tmp);
2279 break;
2283 break;
2285 #define gen_narrow_load_store(type) do { \
2286 TCGv_i32 addr = tcg_temp_new_i32(); \
2287 gen_window_check2(dc, RRRN_S, RRRN_T); \
2288 tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \
2289 gen_load_store_alignment(dc, 2, addr, false); \
2290 tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \
2291 tcg_temp_free(addr); \
2292 } while (0)
2294 case 8: /*L32I.Nn*/
2295 gen_narrow_load_store(ld32u);
2296 break;
2298 case 9: /*S32I.Nn*/
2299 gen_narrow_load_store(st32);
2300 break;
2301 #undef gen_narrow_load_store
2303 case 10: /*ADD.Nn*/
2304 gen_window_check3(dc, RRRN_R, RRRN_S, RRRN_T);
2305 tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]);
2306 break;
2308 case 11: /*ADDI.Nn*/
2309 gen_window_check2(dc, RRRN_R, RRRN_S);
2310 tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], RRRN_T ? RRRN_T : -1);
2311 break;
2313 case 12: /*ST2n*/
2314 gen_window_check1(dc, RRRN_S);
2315 if (RRRN_T < 8) { /*MOVI.Nn*/
2316 tcg_gen_movi_i32(cpu_R[RRRN_S],
2317 RRRN_R | (RRRN_T << 4) |
2318 ((RRRN_T & 6) == 6 ? 0xffffff80 : 0));
2319 } else { /*BEQZ.Nn*/ /*BNEZ.Nn*/
2320 TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ;
2322 gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0,
2323 4 + (RRRN_R | ((RRRN_T & 3) << 4)));
2325 break;
2327 case 13: /*ST3n*/
2328 switch (RRRN_R) {
2329 case 0: /*MOV.Nn*/
2330 gen_window_check2(dc, RRRN_S, RRRN_T);
2331 tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]);
2332 break;
2334 case 15: /*S3*/
2335 switch (RRRN_T) {
2336 case 0: /*RET.Nn*/
2337 gen_jump(dc, cpu_R[0]);
2338 break;
2340 case 1: /*RETW.Nn*/
2341 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2343 TCGv_i32 tmp = tcg_const_i32(dc->pc);
2344 gen_advance_ccount(dc);
2345 gen_helper_retw(tmp, tmp);
2346 gen_jump(dc, tmp);
2347 tcg_temp_free(tmp);
2349 break;
2351 case 2: /*BREAK.Nn*/
2352 TBD();
2353 break;
2355 case 3: /*NOP.Nn*/
2356 break;
2358 case 6: /*ILL.Nn*/
2359 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
2360 break;
2362 default: /*reserved*/
2363 RESERVED();
2364 break;
2366 break;
2368 default: /*reserved*/
2369 RESERVED();
2370 break;
2372 break;
2374 default: /*reserved*/
2375 RESERVED();
2376 break;
2379 gen_check_loop_end(dc, 0);
2380 dc->pc = dc->next_pc;
2382 return;
2384 invalid_opcode:
2385 qemu_log("INVALID(pc = %08x)\n", dc->pc);
2386 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
2387 #undef HAS_OPTION
2390 static void check_breakpoint(CPUState *env, DisasContext *dc)
2392 CPUBreakpoint *bp;
2394 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2395 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2396 if (bp->pc == dc->pc) {
2397 tcg_gen_movi_i32(cpu_pc, dc->pc);
2398 gen_exception(dc, EXCP_DEBUG);
2399 dc->is_jmp = DISAS_UPDATE;
2405 static void gen_intermediate_code_internal(
2406 CPUState *env, TranslationBlock *tb, int search_pc)
2408 DisasContext dc;
2409 int insn_count = 0;
2410 int j, lj = -1;
2411 uint16_t *gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2412 int max_insns = tb->cflags & CF_COUNT_MASK;
2413 uint32_t pc_start = tb->pc;
2414 uint32_t next_page_start =
2415 (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2417 if (max_insns == 0) {
2418 max_insns = CF_COUNT_MASK;
2421 dc.config = env->config;
2422 dc.singlestep_enabled = env->singlestep_enabled;
2423 dc.tb = tb;
2424 dc.pc = pc_start;
2425 dc.ring = tb->flags & XTENSA_TBFLAG_RING_MASK;
2426 dc.cring = (tb->flags & XTENSA_TBFLAG_EXCM) ? 0 : dc.ring;
2427 dc.lbeg = env->sregs[LBEG];
2428 dc.lend = env->sregs[LEND];
2429 dc.is_jmp = DISAS_NEXT;
2430 dc.ccount_delta = 0;
2432 init_litbase(&dc);
2433 init_sar_tracker(&dc);
2434 reset_used_window(&dc);
2436 gen_icount_start();
2438 if (env->singlestep_enabled && env->exception_taken) {
2439 env->exception_taken = 0;
2440 tcg_gen_movi_i32(cpu_pc, dc.pc);
2441 gen_exception(&dc, EXCP_DEBUG);
2444 do {
2445 check_breakpoint(env, &dc);
2447 if (search_pc) {
2448 j = gen_opc_ptr - gen_opc_buf;
2449 if (lj < j) {
2450 lj++;
2451 while (lj < j) {
2452 gen_opc_instr_start[lj++] = 0;
2455 gen_opc_pc[lj] = dc.pc;
2456 gen_opc_instr_start[lj] = 1;
2457 gen_opc_icount[lj] = insn_count;
2460 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
2461 tcg_gen_debug_insn_start(dc.pc);
2464 ++dc.ccount_delta;
2466 if (insn_count + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2467 gen_io_start();
2470 disas_xtensa_insn(&dc);
2471 ++insn_count;
2472 if (env->singlestep_enabled) {
2473 tcg_gen_movi_i32(cpu_pc, dc.pc);
2474 gen_exception(&dc, EXCP_DEBUG);
2475 break;
2477 } while (dc.is_jmp == DISAS_NEXT &&
2478 insn_count < max_insns &&
2479 dc.pc < next_page_start &&
2480 gen_opc_ptr < gen_opc_end);
2482 reset_litbase(&dc);
2483 reset_sar_tracker(&dc);
2485 if (tb->cflags & CF_LAST_IO) {
2486 gen_io_end();
2489 if (dc.is_jmp == DISAS_NEXT) {
2490 gen_jumpi(&dc, dc.pc, 0);
2492 gen_icount_end(tb, insn_count);
2493 *gen_opc_ptr = INDEX_op_end;
2495 if (!search_pc) {
2496 tb->size = dc.pc - pc_start;
2497 tb->icount = insn_count;
2501 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2503 gen_intermediate_code_internal(env, tb, 0);
2506 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2508 gen_intermediate_code_internal(env, tb, 1);
2511 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
2512 int flags)
2514 int i, j;
2516 cpu_fprintf(f, "PC=%08x\n\n", env->pc);
2518 for (i = j = 0; i < 256; ++i) {
2519 if (sregnames[i]) {
2520 cpu_fprintf(f, "%s=%08x%c", sregnames[i], env->sregs[i],
2521 (j++ % 4) == 3 ? '\n' : ' ');
2525 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
2527 for (i = j = 0; i < 256; ++i) {
2528 if (uregnames[i]) {
2529 cpu_fprintf(f, "%s=%08x%c", uregnames[i], env->uregs[i],
2530 (j++ % 4) == 3 ? '\n' : ' ');
2534 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
2536 for (i = 0; i < 16; ++i) {
2537 cpu_fprintf(f, "A%02d=%08x%c", i, env->regs[i],
2538 (i % 4) == 3 ? '\n' : ' ');
2541 cpu_fprintf(f, "\n");
2543 for (i = 0; i < env->config->nareg; ++i) {
2544 cpu_fprintf(f, "AR%02d=%08x%c", i, env->phys_regs[i],
2545 (i % 4) == 3 ? '\n' : ' ');
2549 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
2551 env->pc = gen_opc_pc[pc_pos];