petalogix-ml605: added SPI controller with n25q128
[qemu-kvm.git] / target-xtensa / translate.c
blob82e8cccadcb863c0069962061092fffc12672be0
1 /*
2 * Xtensa ISA:
3 * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
5 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of the Open Source and Linux Lab nor the
16 * names of its contributors may be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <stdio.h>
33 #include "cpu.h"
34 #include "exec-all.h"
35 #include "disas.h"
36 #include "tcg-op.h"
37 #include "qemu-log.h"
38 #include "sysemu.h"
40 #include "helper.h"
41 #define GEN_HELPER 1
42 #include "helper.h"
44 typedef struct DisasContext {
45 const XtensaConfig *config;
46 TranslationBlock *tb;
47 uint32_t pc;
48 uint32_t next_pc;
49 int cring;
50 int ring;
51 uint32_t lbeg;
52 uint32_t lend;
53 TCGv_i32 litbase;
54 int is_jmp;
55 int singlestep_enabled;
57 bool sar_5bit;
58 bool sar_m32_5bit;
59 bool sar_m32_allocated;
60 TCGv_i32 sar_m32;
62 uint32_t ccount_delta;
63 unsigned used_window;
65 bool debug;
66 bool icount;
67 TCGv_i32 next_icount;
69 unsigned cpenable;
70 } DisasContext;
72 static TCGv_ptr cpu_env;
73 static TCGv_i32 cpu_pc;
74 static TCGv_i32 cpu_R[16];
75 static TCGv_i32 cpu_FR[16];
76 static TCGv_i32 cpu_SR[256];
77 static TCGv_i32 cpu_UR[256];
79 #include "gen-icount.h"
81 static const char * const sregnames[256] = {
82 [LBEG] = "LBEG",
83 [LEND] = "LEND",
84 [LCOUNT] = "LCOUNT",
85 [SAR] = "SAR",
86 [BR] = "BR",
87 [LITBASE] = "LITBASE",
88 [SCOMPARE1] = "SCOMPARE1",
89 [ACCLO] = "ACCLO",
90 [ACCHI] = "ACCHI",
91 [MR] = "MR0",
92 [MR + 1] = "MR1",
93 [MR + 2] = "MR2",
94 [MR + 3] = "MR3",
95 [WINDOW_BASE] = "WINDOW_BASE",
96 [WINDOW_START] = "WINDOW_START",
97 [PTEVADDR] = "PTEVADDR",
98 [RASID] = "RASID",
99 [ITLBCFG] = "ITLBCFG",
100 [DTLBCFG] = "DTLBCFG",
101 [IBREAKENABLE] = "IBREAKENABLE",
102 [IBREAKA] = "IBREAKA0",
103 [IBREAKA + 1] = "IBREAKA1",
104 [DBREAKA] = "DBREAKA0",
105 [DBREAKA + 1] = "DBREAKA1",
106 [DBREAKC] = "DBREAKC0",
107 [DBREAKC + 1] = "DBREAKC1",
108 [EPC1] = "EPC1",
109 [EPC1 + 1] = "EPC2",
110 [EPC1 + 2] = "EPC3",
111 [EPC1 + 3] = "EPC4",
112 [EPC1 + 4] = "EPC5",
113 [EPC1 + 5] = "EPC6",
114 [EPC1 + 6] = "EPC7",
115 [DEPC] = "DEPC",
116 [EPS2] = "EPS2",
117 [EPS2 + 1] = "EPS3",
118 [EPS2 + 2] = "EPS4",
119 [EPS2 + 3] = "EPS5",
120 [EPS2 + 4] = "EPS6",
121 [EPS2 + 5] = "EPS7",
122 [EXCSAVE1] = "EXCSAVE1",
123 [EXCSAVE1 + 1] = "EXCSAVE2",
124 [EXCSAVE1 + 2] = "EXCSAVE3",
125 [EXCSAVE1 + 3] = "EXCSAVE4",
126 [EXCSAVE1 + 4] = "EXCSAVE5",
127 [EXCSAVE1 + 5] = "EXCSAVE6",
128 [EXCSAVE1 + 6] = "EXCSAVE7",
129 [CPENABLE] = "CPENABLE",
130 [INTSET] = "INTSET",
131 [INTCLEAR] = "INTCLEAR",
132 [INTENABLE] = "INTENABLE",
133 [PS] = "PS",
134 [VECBASE] = "VECBASE",
135 [EXCCAUSE] = "EXCCAUSE",
136 [DEBUGCAUSE] = "DEBUGCAUSE",
137 [CCOUNT] = "CCOUNT",
138 [PRID] = "PRID",
139 [ICOUNT] = "ICOUNT",
140 [ICOUNTLEVEL] = "ICOUNTLEVEL",
141 [EXCVADDR] = "EXCVADDR",
142 [CCOMPARE] = "CCOMPARE0",
143 [CCOMPARE + 1] = "CCOMPARE1",
144 [CCOMPARE + 2] = "CCOMPARE2",
147 static const char * const uregnames[256] = {
148 [THREADPTR] = "THREADPTR",
149 [FCR] = "FCR",
150 [FSR] = "FSR",
153 void xtensa_translate_init(void)
155 static const char * const regnames[] = {
156 "ar0", "ar1", "ar2", "ar3",
157 "ar4", "ar5", "ar6", "ar7",
158 "ar8", "ar9", "ar10", "ar11",
159 "ar12", "ar13", "ar14", "ar15",
161 static const char * const fregnames[] = {
162 "f0", "f1", "f2", "f3",
163 "f4", "f5", "f6", "f7",
164 "f8", "f9", "f10", "f11",
165 "f12", "f13", "f14", "f15",
167 int i;
169 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
170 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
171 offsetof(CPUXtensaState, pc), "pc");
173 for (i = 0; i < 16; i++) {
174 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
175 offsetof(CPUXtensaState, regs[i]),
176 regnames[i]);
179 for (i = 0; i < 16; i++) {
180 cpu_FR[i] = tcg_global_mem_new_i32(TCG_AREG0,
181 offsetof(CPUXtensaState, fregs[i]),
182 fregnames[i]);
185 for (i = 0; i < 256; ++i) {
186 if (sregnames[i]) {
187 cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0,
188 offsetof(CPUXtensaState, sregs[i]),
189 sregnames[i]);
193 for (i = 0; i < 256; ++i) {
194 if (uregnames[i]) {
195 cpu_UR[i] = tcg_global_mem_new_i32(TCG_AREG0,
196 offsetof(CPUXtensaState, uregs[i]),
197 uregnames[i]);
200 #define GEN_HELPER 2
201 #include "helper.h"
204 static inline bool option_bits_enabled(DisasContext *dc, uint64_t opt)
206 return xtensa_option_bits_enabled(dc->config, opt);
209 static inline bool option_enabled(DisasContext *dc, int opt)
211 return xtensa_option_enabled(dc->config, opt);
214 static void init_litbase(DisasContext *dc)
216 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
217 dc->litbase = tcg_temp_local_new_i32();
218 tcg_gen_andi_i32(dc->litbase, cpu_SR[LITBASE], 0xfffff000);
222 static void reset_litbase(DisasContext *dc)
224 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
225 tcg_temp_free(dc->litbase);
229 static void init_sar_tracker(DisasContext *dc)
231 dc->sar_5bit = false;
232 dc->sar_m32_5bit = false;
233 dc->sar_m32_allocated = false;
236 static void reset_sar_tracker(DisasContext *dc)
238 if (dc->sar_m32_allocated) {
239 tcg_temp_free(dc->sar_m32);
243 static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa)
245 tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
246 if (dc->sar_m32_5bit) {
247 tcg_gen_discard_i32(dc->sar_m32);
249 dc->sar_5bit = true;
250 dc->sar_m32_5bit = false;
253 static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa)
255 TCGv_i32 tmp = tcg_const_i32(32);
256 if (!dc->sar_m32_allocated) {
257 dc->sar_m32 = tcg_temp_local_new_i32();
258 dc->sar_m32_allocated = true;
260 tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
261 tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32);
262 dc->sar_5bit = false;
263 dc->sar_m32_5bit = true;
264 tcg_temp_free(tmp);
267 static void gen_advance_ccount(DisasContext *dc)
269 if (dc->ccount_delta > 0) {
270 TCGv_i32 tmp = tcg_const_i32(dc->ccount_delta);
271 dc->ccount_delta = 0;
272 gen_helper_advance_ccount(cpu_env, tmp);
273 tcg_temp_free(tmp);
277 static void reset_used_window(DisasContext *dc)
279 dc->used_window = 0;
282 static void gen_exception(DisasContext *dc, int excp)
284 TCGv_i32 tmp = tcg_const_i32(excp);
285 gen_advance_ccount(dc);
286 gen_helper_exception(cpu_env, tmp);
287 tcg_temp_free(tmp);
290 static void gen_exception_cause(DisasContext *dc, uint32_t cause)
292 TCGv_i32 tpc = tcg_const_i32(dc->pc);
293 TCGv_i32 tcause = tcg_const_i32(cause);
294 gen_advance_ccount(dc);
295 gen_helper_exception_cause(cpu_env, tpc, tcause);
296 tcg_temp_free(tpc);
297 tcg_temp_free(tcause);
298 if (cause == ILLEGAL_INSTRUCTION_CAUSE ||
299 cause == SYSCALL_CAUSE) {
300 dc->is_jmp = DISAS_UPDATE;
304 static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause,
305 TCGv_i32 vaddr)
307 TCGv_i32 tpc = tcg_const_i32(dc->pc);
308 TCGv_i32 tcause = tcg_const_i32(cause);
309 gen_advance_ccount(dc);
310 gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr);
311 tcg_temp_free(tpc);
312 tcg_temp_free(tcause);
315 static void gen_debug_exception(DisasContext *dc, uint32_t cause)
317 TCGv_i32 tpc = tcg_const_i32(dc->pc);
318 TCGv_i32 tcause = tcg_const_i32(cause);
319 gen_advance_ccount(dc);
320 gen_helper_debug_exception(cpu_env, tpc, tcause);
321 tcg_temp_free(tpc);
322 tcg_temp_free(tcause);
323 if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) {
324 dc->is_jmp = DISAS_UPDATE;
328 static void gen_check_privilege(DisasContext *dc)
330 if (dc->cring) {
331 gen_exception_cause(dc, PRIVILEGED_CAUSE);
332 dc->is_jmp = DISAS_UPDATE;
336 static void gen_check_cpenable(DisasContext *dc, unsigned cp)
338 if (option_enabled(dc, XTENSA_OPTION_COPROCESSOR) &&
339 !(dc->cpenable & (1 << cp))) {
340 gen_exception_cause(dc, COPROCESSOR0_DISABLED + cp);
341 dc->is_jmp = DISAS_UPDATE;
345 static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
347 tcg_gen_mov_i32(cpu_pc, dest);
348 gen_advance_ccount(dc);
349 if (dc->icount) {
350 tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
352 if (dc->singlestep_enabled) {
353 gen_exception(dc, EXCP_DEBUG);
354 } else {
355 if (slot >= 0) {
356 tcg_gen_goto_tb(slot);
357 tcg_gen_exit_tb((tcg_target_long)dc->tb + slot);
358 } else {
359 tcg_gen_exit_tb(0);
362 dc->is_jmp = DISAS_UPDATE;
365 static void gen_jump(DisasContext *dc, TCGv dest)
367 gen_jump_slot(dc, dest, -1);
370 static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
372 TCGv_i32 tmp = tcg_const_i32(dest);
373 if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
374 slot = -1;
376 gen_jump_slot(dc, tmp, slot);
377 tcg_temp_free(tmp);
380 static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest,
381 int slot)
383 TCGv_i32 tcallinc = tcg_const_i32(callinc);
385 tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS],
386 tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN);
387 tcg_temp_free(tcallinc);
388 tcg_gen_movi_i32(cpu_R[callinc << 2],
389 (callinc << 30) | (dc->next_pc & 0x3fffffff));
390 gen_jump_slot(dc, dest, slot);
393 static void gen_callw(DisasContext *dc, int callinc, TCGv_i32 dest)
395 gen_callw_slot(dc, callinc, dest, -1);
398 static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot)
400 TCGv_i32 tmp = tcg_const_i32(dest);
401 if (((dc->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
402 slot = -1;
404 gen_callw_slot(dc, callinc, tmp, slot);
405 tcg_temp_free(tmp);
408 static bool gen_check_loop_end(DisasContext *dc, int slot)
410 if (option_enabled(dc, XTENSA_OPTION_LOOP) &&
411 !(dc->tb->flags & XTENSA_TBFLAG_EXCM) &&
412 dc->next_pc == dc->lend) {
413 int label = gen_new_label();
415 gen_advance_ccount(dc);
416 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label);
417 tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1);
418 gen_jumpi(dc, dc->lbeg, slot);
419 gen_set_label(label);
420 gen_jumpi(dc, dc->next_pc, -1);
421 return true;
423 return false;
426 static void gen_jumpi_check_loop_end(DisasContext *dc, int slot)
428 if (!gen_check_loop_end(dc, slot)) {
429 gen_jumpi(dc, dc->next_pc, slot);
433 static void gen_brcond(DisasContext *dc, TCGCond cond,
434 TCGv_i32 t0, TCGv_i32 t1, uint32_t offset)
436 int label = gen_new_label();
438 gen_advance_ccount(dc);
439 tcg_gen_brcond_i32(cond, t0, t1, label);
440 gen_jumpi_check_loop_end(dc, 0);
441 gen_set_label(label);
442 gen_jumpi(dc, dc->pc + offset, 1);
445 static void gen_brcondi(DisasContext *dc, TCGCond cond,
446 TCGv_i32 t0, uint32_t t1, uint32_t offset)
448 TCGv_i32 tmp = tcg_const_i32(t1);
449 gen_brcond(dc, cond, t0, tmp, offset);
450 tcg_temp_free(tmp);
453 static void gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr)
455 gen_advance_ccount(dc);
456 tcg_gen_mov_i32(d, cpu_SR[sr]);
459 static void gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
461 tcg_gen_shri_i32(d, cpu_SR[EXCVADDR], 10);
462 tcg_gen_or_i32(d, d, cpu_SR[sr]);
463 tcg_gen_andi_i32(d, d, 0xfffffffc);
466 static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
468 static void (* const rsr_handler[256])(DisasContext *dc,
469 TCGv_i32 d, uint32_t sr) = {
470 [CCOUNT] = gen_rsr_ccount,
471 [PTEVADDR] = gen_rsr_ptevaddr,
474 if (sregnames[sr]) {
475 if (rsr_handler[sr]) {
476 rsr_handler[sr](dc, d, sr);
477 } else {
478 tcg_gen_mov_i32(d, cpu_SR[sr]);
480 } else {
481 qemu_log("RSR %d not implemented, ", sr);
485 static void gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s)
487 gen_helper_wsr_lbeg(cpu_env, s);
488 gen_jumpi_check_loop_end(dc, 0);
491 static void gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s)
493 gen_helper_wsr_lend(cpu_env, s);
494 gen_jumpi_check_loop_end(dc, 0);
497 static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s)
499 tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f);
500 if (dc->sar_m32_5bit) {
501 tcg_gen_discard_i32(dc->sar_m32);
503 dc->sar_5bit = false;
504 dc->sar_m32_5bit = false;
507 static void gen_wsr_br(DisasContext *dc, uint32_t sr, TCGv_i32 s)
509 tcg_gen_andi_i32(cpu_SR[sr], s, 0xffff);
512 static void gen_wsr_litbase(DisasContext *dc, uint32_t sr, TCGv_i32 s)
514 tcg_gen_andi_i32(cpu_SR[sr], s, 0xfffff001);
515 /* This can change tb->flags, so exit tb */
516 gen_jumpi_check_loop_end(dc, -1);
519 static void gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s)
521 tcg_gen_ext8s_i32(cpu_SR[sr], s);
524 static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v)
526 gen_helper_wsr_windowbase(cpu_env, v);
527 reset_used_window(dc);
530 static void gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v)
532 tcg_gen_andi_i32(cpu_SR[sr], v, (1 << dc->config->nareg / 4) - 1);
533 reset_used_window(dc);
536 static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v)
538 tcg_gen_andi_i32(cpu_SR[sr], v, 0xffc00000);
541 static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
543 gen_helper_wsr_rasid(cpu_env, v);
544 /* This can change tb->flags, so exit tb */
545 gen_jumpi_check_loop_end(dc, -1);
548 static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v)
550 tcg_gen_andi_i32(cpu_SR[sr], v, 0x01130000);
553 static void gen_wsr_ibreakenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
555 gen_helper_wsr_ibreakenable(cpu_env, v);
556 gen_jumpi_check_loop_end(dc, 0);
559 static void gen_wsr_ibreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v)
561 unsigned id = sr - IBREAKA;
563 if (id < dc->config->nibreak) {
564 TCGv_i32 tmp = tcg_const_i32(id);
565 gen_helper_wsr_ibreaka(cpu_env, tmp, v);
566 tcg_temp_free(tmp);
567 gen_jumpi_check_loop_end(dc, 0);
571 static void gen_wsr_dbreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v)
573 unsigned id = sr - DBREAKA;
575 if (id < dc->config->ndbreak) {
576 TCGv_i32 tmp = tcg_const_i32(id);
577 gen_helper_wsr_dbreaka(cpu_env, tmp, v);
578 tcg_temp_free(tmp);
582 static void gen_wsr_dbreakc(DisasContext *dc, uint32_t sr, TCGv_i32 v)
584 unsigned id = sr - DBREAKC;
586 if (id < dc->config->ndbreak) {
587 TCGv_i32 tmp = tcg_const_i32(id);
588 gen_helper_wsr_dbreakc(cpu_env, tmp, v);
589 tcg_temp_free(tmp);
593 static void gen_wsr_cpenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
595 tcg_gen_andi_i32(cpu_SR[sr], v, 0xff);
596 /* This can change tb->flags, so exit tb */
597 gen_jumpi_check_loop_end(dc, -1);
600 static void gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v)
602 tcg_gen_andi_i32(cpu_SR[sr], v,
603 dc->config->inttype_mask[INTTYPE_SOFTWARE]);
604 gen_helper_check_interrupts(cpu_env);
605 gen_jumpi_check_loop_end(dc, 0);
608 static void gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v)
610 TCGv_i32 tmp = tcg_temp_new_i32();
612 tcg_gen_andi_i32(tmp, v,
613 dc->config->inttype_mask[INTTYPE_EDGE] |
614 dc->config->inttype_mask[INTTYPE_NMI] |
615 dc->config->inttype_mask[INTTYPE_SOFTWARE]);
616 tcg_gen_andc_i32(cpu_SR[INTSET], cpu_SR[INTSET], tmp);
617 tcg_temp_free(tmp);
618 gen_helper_check_interrupts(cpu_env);
621 static void gen_wsr_intenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
623 tcg_gen_mov_i32(cpu_SR[sr], v);
624 gen_helper_check_interrupts(cpu_env);
625 gen_jumpi_check_loop_end(dc, 0);
628 static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v)
630 uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB |
631 PS_UM | PS_EXCM | PS_INTLEVEL;
633 if (option_enabled(dc, XTENSA_OPTION_MMU)) {
634 mask |= PS_RING;
636 tcg_gen_andi_i32(cpu_SR[sr], v, mask);
637 reset_used_window(dc);
638 gen_helper_check_interrupts(cpu_env);
639 /* This can change mmu index and tb->flags, so exit tb */
640 gen_jumpi_check_loop_end(dc, -1);
643 static void gen_wsr_debugcause(DisasContext *dc, uint32_t sr, TCGv_i32 v)
647 static void gen_wsr_prid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
651 static void gen_wsr_icount(DisasContext *dc, uint32_t sr, TCGv_i32 v)
653 if (dc->icount) {
654 tcg_gen_mov_i32(dc->next_icount, v);
655 } else {
656 tcg_gen_mov_i32(cpu_SR[sr], v);
660 static void gen_wsr_icountlevel(DisasContext *dc, uint32_t sr, TCGv_i32 v)
662 tcg_gen_andi_i32(cpu_SR[sr], v, 0xf);
663 /* This can change tb->flags, so exit tb */
664 gen_jumpi_check_loop_end(dc, -1);
667 static void gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v)
669 uint32_t id = sr - CCOMPARE;
670 if (id < dc->config->nccompare) {
671 uint32_t int_bit = 1 << dc->config->timerint[id];
672 gen_advance_ccount(dc);
673 tcg_gen_mov_i32(cpu_SR[sr], v);
674 tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit);
675 gen_helper_check_interrupts(cpu_env);
679 static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s)
681 static void (* const wsr_handler[256])(DisasContext *dc,
682 uint32_t sr, TCGv_i32 v) = {
683 [LBEG] = gen_wsr_lbeg,
684 [LEND] = gen_wsr_lend,
685 [SAR] = gen_wsr_sar,
686 [BR] = gen_wsr_br,
687 [LITBASE] = gen_wsr_litbase,
688 [ACCHI] = gen_wsr_acchi,
689 [WINDOW_BASE] = gen_wsr_windowbase,
690 [WINDOW_START] = gen_wsr_windowstart,
691 [PTEVADDR] = gen_wsr_ptevaddr,
692 [RASID] = gen_wsr_rasid,
693 [ITLBCFG] = gen_wsr_tlbcfg,
694 [DTLBCFG] = gen_wsr_tlbcfg,
695 [IBREAKENABLE] = gen_wsr_ibreakenable,
696 [IBREAKA] = gen_wsr_ibreaka,
697 [IBREAKA + 1] = gen_wsr_ibreaka,
698 [DBREAKA] = gen_wsr_dbreaka,
699 [DBREAKA + 1] = gen_wsr_dbreaka,
700 [DBREAKC] = gen_wsr_dbreakc,
701 [DBREAKC + 1] = gen_wsr_dbreakc,
702 [CPENABLE] = gen_wsr_cpenable,
703 [INTSET] = gen_wsr_intset,
704 [INTCLEAR] = gen_wsr_intclear,
705 [INTENABLE] = gen_wsr_intenable,
706 [PS] = gen_wsr_ps,
707 [DEBUGCAUSE] = gen_wsr_debugcause,
708 [PRID] = gen_wsr_prid,
709 [ICOUNT] = gen_wsr_icount,
710 [ICOUNTLEVEL] = gen_wsr_icountlevel,
711 [CCOMPARE] = gen_wsr_ccompare,
712 [CCOMPARE + 1] = gen_wsr_ccompare,
713 [CCOMPARE + 2] = gen_wsr_ccompare,
716 if (sregnames[sr]) {
717 if (wsr_handler[sr]) {
718 wsr_handler[sr](dc, sr, s);
719 } else {
720 tcg_gen_mov_i32(cpu_SR[sr], s);
722 } else {
723 qemu_log("WSR %d not implemented, ", sr);
727 static void gen_wur(uint32_t ur, TCGv_i32 s)
729 switch (ur) {
730 case FCR:
731 gen_helper_wur_fcr(cpu_env, s);
732 break;
734 case FSR:
735 tcg_gen_andi_i32(cpu_UR[ur], s, 0xffffff80);
736 break;
738 default:
739 tcg_gen_mov_i32(cpu_UR[ur], s);
740 break;
744 static void gen_load_store_alignment(DisasContext *dc, int shift,
745 TCGv_i32 addr, bool no_hw_alignment)
747 if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
748 tcg_gen_andi_i32(addr, addr, ~0 << shift);
749 } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) &&
750 no_hw_alignment) {
751 int label = gen_new_label();
752 TCGv_i32 tmp = tcg_temp_new_i32();
753 tcg_gen_andi_i32(tmp, addr, ~(~0 << shift));
754 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
755 gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
756 gen_set_label(label);
757 tcg_temp_free(tmp);
761 static void gen_waiti(DisasContext *dc, uint32_t imm4)
763 TCGv_i32 pc = tcg_const_i32(dc->next_pc);
764 TCGv_i32 intlevel = tcg_const_i32(imm4);
765 gen_advance_ccount(dc);
766 gen_helper_waiti(cpu_env, pc, intlevel);
767 tcg_temp_free(pc);
768 tcg_temp_free(intlevel);
771 static void gen_window_check1(DisasContext *dc, unsigned r1)
773 if (dc->tb->flags & XTENSA_TBFLAG_EXCM) {
774 return;
776 if (option_enabled(dc, XTENSA_OPTION_WINDOWED_REGISTER) &&
777 r1 / 4 > dc->used_window) {
778 TCGv_i32 pc = tcg_const_i32(dc->pc);
779 TCGv_i32 w = tcg_const_i32(r1 / 4);
781 dc->used_window = r1 / 4;
782 gen_advance_ccount(dc);
783 gen_helper_window_check(cpu_env, pc, w);
785 tcg_temp_free(w);
786 tcg_temp_free(pc);
790 static void gen_window_check2(DisasContext *dc, unsigned r1, unsigned r2)
792 gen_window_check1(dc, r1 > r2 ? r1 : r2);
795 static void gen_window_check3(DisasContext *dc, unsigned r1, unsigned r2,
796 unsigned r3)
798 gen_window_check2(dc, r1, r2 > r3 ? r2 : r3);
801 static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned)
803 TCGv_i32 m = tcg_temp_new_i32();
805 if (hi) {
806 (is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16);
807 } else {
808 (is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v);
810 return m;
813 static void disas_xtensa_insn(DisasContext *dc)
815 #define HAS_OPTION_BITS(opt) do { \
816 if (!option_bits_enabled(dc, opt)) { \
817 qemu_log("Option is not enabled %s:%d\n", \
818 __FILE__, __LINE__); \
819 goto invalid_opcode; \
821 } while (0)
823 #define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt))
825 #define TBD() qemu_log("TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__)
826 #define RESERVED() do { \
827 qemu_log("RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \
828 dc->pc, b0, b1, b2, __FILE__, __LINE__); \
829 goto invalid_opcode; \
830 } while (0)
833 #ifdef TARGET_WORDS_BIGENDIAN
834 #define OP0 (((b0) & 0xf0) >> 4)
835 #define OP1 (((b2) & 0xf0) >> 4)
836 #define OP2 ((b2) & 0xf)
837 #define RRR_R ((b1) & 0xf)
838 #define RRR_S (((b1) & 0xf0) >> 4)
839 #define RRR_T ((b0) & 0xf)
840 #else
841 #define OP0 (((b0) & 0xf))
842 #define OP1 (((b2) & 0xf))
843 #define OP2 (((b2) & 0xf0) >> 4)
844 #define RRR_R (((b1) & 0xf0) >> 4)
845 #define RRR_S (((b1) & 0xf))
846 #define RRR_T (((b0) & 0xf0) >> 4)
847 #endif
848 #define RRR_X ((RRR_R & 0x4) >> 2)
849 #define RRR_Y ((RRR_T & 0x4) >> 2)
850 #define RRR_W (RRR_R & 0x3)
852 #define RRRN_R RRR_R
853 #define RRRN_S RRR_S
854 #define RRRN_T RRR_T
856 #define RRI8_R RRR_R
857 #define RRI8_S RRR_S
858 #define RRI8_T RRR_T
859 #define RRI8_IMM8 (b2)
860 #define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8)
862 #ifdef TARGET_WORDS_BIGENDIAN
863 #define RI16_IMM16 (((b1) << 8) | (b2))
864 #else
865 #define RI16_IMM16 (((b2) << 8) | (b1))
866 #endif
868 #ifdef TARGET_WORDS_BIGENDIAN
869 #define CALL_N (((b0) & 0xc) >> 2)
870 #define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2))
871 #else
872 #define CALL_N (((b0) & 0x30) >> 4)
873 #define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10))
874 #endif
875 #define CALL_OFFSET_SE \
876 (((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET)
878 #define CALLX_N CALL_N
879 #ifdef TARGET_WORDS_BIGENDIAN
880 #define CALLX_M ((b0) & 0x3)
881 #else
882 #define CALLX_M (((b0) & 0xc0) >> 6)
883 #endif
884 #define CALLX_S RRR_S
886 #define BRI12_M CALLX_M
887 #define BRI12_S RRR_S
888 #ifdef TARGET_WORDS_BIGENDIAN
889 #define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2))
890 #else
891 #define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4))
892 #endif
893 #define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12)
895 #define BRI8_M BRI12_M
896 #define BRI8_R RRI8_R
897 #define BRI8_S RRI8_S
898 #define BRI8_IMM8 RRI8_IMM8
899 #define BRI8_IMM8_SE RRI8_IMM8_SE
901 #define RSR_SR (b1)
903 uint8_t b0 = cpu_ldub_code(cpu_single_env, dc->pc);
904 uint8_t b1 = cpu_ldub_code(cpu_single_env, dc->pc + 1);
905 uint8_t b2 = 0;
907 static const uint32_t B4CONST[] = {
908 0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
911 static const uint32_t B4CONSTU[] = {
912 32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
915 if (OP0 >= 8) {
916 dc->next_pc = dc->pc + 2;
917 HAS_OPTION(XTENSA_OPTION_CODE_DENSITY);
918 } else {
919 dc->next_pc = dc->pc + 3;
920 b2 = cpu_ldub_code(cpu_single_env, dc->pc + 2);
923 switch (OP0) {
924 case 0: /*QRST*/
925 switch (OP1) {
926 case 0: /*RST0*/
927 switch (OP2) {
928 case 0: /*ST0*/
929 if ((RRR_R & 0xc) == 0x8) {
930 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
933 switch (RRR_R) {
934 case 0: /*SNM0*/
935 switch (CALLX_M) {
936 case 0: /*ILL*/
937 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
938 break;
940 case 1: /*reserved*/
941 RESERVED();
942 break;
944 case 2: /*JR*/
945 switch (CALLX_N) {
946 case 0: /*RET*/
947 case 2: /*JX*/
948 gen_window_check1(dc, CALLX_S);
949 gen_jump(dc, cpu_R[CALLX_S]);
950 break;
952 case 1: /*RETWw*/
953 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
955 TCGv_i32 tmp = tcg_const_i32(dc->pc);
956 gen_advance_ccount(dc);
957 gen_helper_retw(tmp, cpu_env, tmp);
958 gen_jump(dc, tmp);
959 tcg_temp_free(tmp);
961 break;
963 case 3: /*reserved*/
964 RESERVED();
965 break;
967 break;
969 case 3: /*CALLX*/
970 gen_window_check2(dc, CALLX_S, CALLX_N << 2);
971 switch (CALLX_N) {
972 case 0: /*CALLX0*/
974 TCGv_i32 tmp = tcg_temp_new_i32();
975 tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
976 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
977 gen_jump(dc, tmp);
978 tcg_temp_free(tmp);
980 break;
982 case 1: /*CALLX4w*/
983 case 2: /*CALLX8w*/
984 case 3: /*CALLX12w*/
985 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
987 TCGv_i32 tmp = tcg_temp_new_i32();
989 tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
990 gen_callw(dc, CALLX_N, tmp);
991 tcg_temp_free(tmp);
993 break;
995 break;
997 break;
999 case 1: /*MOVSPw*/
1000 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1001 gen_window_check2(dc, RRR_T, RRR_S);
1003 TCGv_i32 pc = tcg_const_i32(dc->pc);
1004 gen_advance_ccount(dc);
1005 gen_helper_movsp(cpu_env, pc);
1006 tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]);
1007 tcg_temp_free(pc);
1009 break;
1011 case 2: /*SYNC*/
1012 switch (RRR_T) {
1013 case 0: /*ISYNC*/
1014 break;
1016 case 1: /*RSYNC*/
1017 break;
1019 case 2: /*ESYNC*/
1020 break;
1022 case 3: /*DSYNC*/
1023 break;
1025 case 8: /*EXCW*/
1026 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
1027 break;
1029 case 12: /*MEMW*/
1030 break;
1032 case 13: /*EXTW*/
1033 break;
1035 case 15: /*NOP*/
1036 break;
1038 default: /*reserved*/
1039 RESERVED();
1040 break;
1042 break;
1044 case 3: /*RFEIx*/
1045 switch (RRR_T) {
1046 case 0: /*RFETx*/
1047 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
1048 switch (RRR_S) {
1049 case 0: /*RFEx*/
1050 gen_check_privilege(dc);
1051 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
1052 gen_helper_check_interrupts(cpu_env);
1053 gen_jump(dc, cpu_SR[EPC1]);
1054 break;
1056 case 1: /*RFUEx*/
1057 RESERVED();
1058 break;
1060 case 2: /*RFDEx*/
1061 gen_check_privilege(dc);
1062 gen_jump(dc, cpu_SR[
1063 dc->config->ndepc ? DEPC : EPC1]);
1064 break;
1066 case 4: /*RFWOw*/
1067 case 5: /*RFWUw*/
1068 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1069 gen_check_privilege(dc);
1071 TCGv_i32 tmp = tcg_const_i32(1);
1073 tcg_gen_andi_i32(
1074 cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
1075 tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]);
1077 if (RRR_S == 4) {
1078 tcg_gen_andc_i32(cpu_SR[WINDOW_START],
1079 cpu_SR[WINDOW_START], tmp);
1080 } else {
1081 tcg_gen_or_i32(cpu_SR[WINDOW_START],
1082 cpu_SR[WINDOW_START], tmp);
1085 gen_helper_restore_owb(cpu_env);
1086 gen_helper_check_interrupts(cpu_env);
1087 gen_jump(dc, cpu_SR[EPC1]);
1089 tcg_temp_free(tmp);
1091 break;
1093 default: /*reserved*/
1094 RESERVED();
1095 break;
1097 break;
1099 case 1: /*RFIx*/
1100 HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT);
1101 if (RRR_S >= 2 && RRR_S <= dc->config->nlevel) {
1102 gen_check_privilege(dc);
1103 tcg_gen_mov_i32(cpu_SR[PS],
1104 cpu_SR[EPS2 + RRR_S - 2]);
1105 gen_helper_check_interrupts(cpu_env);
1106 gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]);
1107 } else {
1108 qemu_log("RFI %d is illegal\n", RRR_S);
1109 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
1111 break;
1113 case 2: /*RFME*/
1114 TBD();
1115 break;
1117 default: /*reserved*/
1118 RESERVED();
1119 break;
1122 break;
1124 case 4: /*BREAKx*/
1125 HAS_OPTION(XTENSA_OPTION_DEBUG);
1126 if (dc->debug) {
1127 gen_debug_exception(dc, DEBUGCAUSE_BI);
1129 break;
1131 case 5: /*SYSCALLx*/
1132 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
1133 switch (RRR_S) {
1134 case 0: /*SYSCALLx*/
1135 gen_exception_cause(dc, SYSCALL_CAUSE);
1136 break;
1138 case 1: /*SIMCALL*/
1139 if (semihosting_enabled) {
1140 gen_check_privilege(dc);
1141 gen_helper_simcall(cpu_env);
1142 } else {
1143 qemu_log("SIMCALL but semihosting is disabled\n");
1144 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
1146 break;
1148 default:
1149 RESERVED();
1150 break;
1152 break;
1154 case 6: /*RSILx*/
1155 HAS_OPTION(XTENSA_OPTION_INTERRUPT);
1156 gen_check_privilege(dc);
1157 gen_window_check1(dc, RRR_T);
1158 tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]);
1159 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL);
1160 tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S);
1161 gen_helper_check_interrupts(cpu_env);
1162 gen_jumpi_check_loop_end(dc, 0);
1163 break;
1165 case 7: /*WAITIx*/
1166 HAS_OPTION(XTENSA_OPTION_INTERRUPT);
1167 gen_check_privilege(dc);
1168 gen_waiti(dc, RRR_S);
1169 break;
1171 case 8: /*ANY4p*/
1172 case 9: /*ALL4p*/
1173 case 10: /*ANY8p*/
1174 case 11: /*ALL8p*/
1175 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1177 const unsigned shift = (RRR_R & 2) ? 8 : 4;
1178 TCGv_i32 mask = tcg_const_i32(
1179 ((1 << shift) - 1) << RRR_S);
1180 TCGv_i32 tmp = tcg_temp_new_i32();
1182 tcg_gen_and_i32(tmp, cpu_SR[BR], mask);
1183 if (RRR_R & 1) { /*ALL*/
1184 tcg_gen_addi_i32(tmp, tmp, 1 << RRR_S);
1185 } else { /*ANY*/
1186 tcg_gen_add_i32(tmp, tmp, mask);
1188 tcg_gen_shri_i32(tmp, tmp, RRR_S + shift);
1189 tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR],
1190 tmp, RRR_T, 1);
1191 tcg_temp_free(mask);
1192 tcg_temp_free(tmp);
1194 break;
1196 default: /*reserved*/
1197 RESERVED();
1198 break;
1201 break;
1203 case 1: /*AND*/
1204 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1205 tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1206 break;
1208 case 2: /*OR*/
1209 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1210 tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1211 break;
1213 case 3: /*XOR*/
1214 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1215 tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1216 break;
1218 case 4: /*ST1*/
1219 switch (RRR_R) {
1220 case 0: /*SSR*/
1221 gen_window_check1(dc, RRR_S);
1222 gen_right_shift_sar(dc, cpu_R[RRR_S]);
1223 break;
1225 case 1: /*SSL*/
1226 gen_window_check1(dc, RRR_S);
1227 gen_left_shift_sar(dc, cpu_R[RRR_S]);
1228 break;
1230 case 2: /*SSA8L*/
1231 gen_window_check1(dc, RRR_S);
1233 TCGv_i32 tmp = tcg_temp_new_i32();
1234 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
1235 gen_right_shift_sar(dc, tmp);
1236 tcg_temp_free(tmp);
1238 break;
1240 case 3: /*SSA8B*/
1241 gen_window_check1(dc, RRR_S);
1243 TCGv_i32 tmp = tcg_temp_new_i32();
1244 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
1245 gen_left_shift_sar(dc, tmp);
1246 tcg_temp_free(tmp);
1248 break;
1250 case 4: /*SSAI*/
1252 TCGv_i32 tmp = tcg_const_i32(
1253 RRR_S | ((RRR_T & 1) << 4));
1254 gen_right_shift_sar(dc, tmp);
1255 tcg_temp_free(tmp);
1257 break;
1259 case 6: /*RER*/
1260 TBD();
1261 break;
1263 case 7: /*WER*/
1264 TBD();
1265 break;
1267 case 8: /*ROTWw*/
1268 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1269 gen_check_privilege(dc);
1271 TCGv_i32 tmp = tcg_const_i32(
1272 RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0));
1273 gen_helper_rotw(cpu_env, tmp);
1274 tcg_temp_free(tmp);
1275 reset_used_window(dc);
1277 break;
1279 case 14: /*NSAu*/
1280 HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
1281 gen_window_check2(dc, RRR_S, RRR_T);
1282 gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]);
1283 break;
1285 case 15: /*NSAUu*/
1286 HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
1287 gen_window_check2(dc, RRR_S, RRR_T);
1288 gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]);
1289 break;
1291 default: /*reserved*/
1292 RESERVED();
1293 break;
1295 break;
1297 case 5: /*TLB*/
1298 HAS_OPTION_BITS(
1299 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) |
1300 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
1301 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION));
1302 gen_check_privilege(dc);
1303 gen_window_check2(dc, RRR_S, RRR_T);
1305 TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0);
1307 switch (RRR_R & 7) {
1308 case 3: /*RITLB0*/ /*RDTLB0*/
1309 gen_helper_rtlb0(cpu_R[RRR_T],
1310 cpu_env, cpu_R[RRR_S], dtlb);
1311 break;
1313 case 4: /*IITLB*/ /*IDTLB*/
1314 gen_helper_itlb(cpu_env, cpu_R[RRR_S], dtlb);
1315 /* This could change memory mapping, so exit tb */
1316 gen_jumpi_check_loop_end(dc, -1);
1317 break;
1319 case 5: /*PITLB*/ /*PDTLB*/
1320 tcg_gen_movi_i32(cpu_pc, dc->pc);
1321 gen_helper_ptlb(cpu_R[RRR_T],
1322 cpu_env, cpu_R[RRR_S], dtlb);
1323 break;
1325 case 6: /*WITLB*/ /*WDTLB*/
1326 gen_helper_wtlb(
1327 cpu_env, cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1328 /* This could change memory mapping, so exit tb */
1329 gen_jumpi_check_loop_end(dc, -1);
1330 break;
1332 case 7: /*RITLB1*/ /*RDTLB1*/
1333 gen_helper_rtlb1(cpu_R[RRR_T],
1334 cpu_env, cpu_R[RRR_S], dtlb);
1335 break;
1337 default:
1338 tcg_temp_free(dtlb);
1339 RESERVED();
1340 break;
1342 tcg_temp_free(dtlb);
1344 break;
1346 case 6: /*RT0*/
1347 gen_window_check2(dc, RRR_R, RRR_T);
1348 switch (RRR_S) {
1349 case 0: /*NEG*/
1350 tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1351 break;
1353 case 1: /*ABS*/
1355 int label = gen_new_label();
1356 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1357 tcg_gen_brcondi_i32(
1358 TCG_COND_GE, cpu_R[RRR_R], 0, label);
1359 tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1360 gen_set_label(label);
1362 break;
1364 default: /*reserved*/
1365 RESERVED();
1366 break;
1368 break;
1370 case 7: /*reserved*/
1371 RESERVED();
1372 break;
1374 case 8: /*ADD*/
1375 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1376 tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1377 break;
1379 case 9: /*ADD**/
1380 case 10:
1381 case 11:
1382 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1384 TCGv_i32 tmp = tcg_temp_new_i32();
1385 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8);
1386 tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
1387 tcg_temp_free(tmp);
1389 break;
1391 case 12: /*SUB*/
1392 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1393 tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1394 break;
1396 case 13: /*SUB**/
1397 case 14:
1398 case 15:
1399 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1401 TCGv_i32 tmp = tcg_temp_new_i32();
1402 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12);
1403 tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
1404 tcg_temp_free(tmp);
1406 break;
1408 break;
1410 case 1: /*RST1*/
1411 switch (OP2) {
1412 case 0: /*SLLI*/
1413 case 1:
1414 gen_window_check2(dc, RRR_R, RRR_S);
1415 tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S],
1416 32 - (RRR_T | ((OP2 & 1) << 4)));
1417 break;
1419 case 2: /*SRAI*/
1420 case 3:
1421 gen_window_check2(dc, RRR_R, RRR_T);
1422 tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T],
1423 RRR_S | ((OP2 & 1) << 4));
1424 break;
1426 case 4: /*SRLI*/
1427 gen_window_check2(dc, RRR_R, RRR_T);
1428 tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S);
1429 break;
1431 case 6: /*XSR*/
1433 TCGv_i32 tmp = tcg_temp_new_i32();
1434 if (RSR_SR >= 64) {
1435 gen_check_privilege(dc);
1437 gen_window_check1(dc, RRR_T);
1438 tcg_gen_mov_i32(tmp, cpu_R[RRR_T]);
1439 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
1440 gen_wsr(dc, RSR_SR, tmp);
1441 tcg_temp_free(tmp);
1442 if (!sregnames[RSR_SR]) {
1443 TBD();
1446 break;
1449 * Note: 64 bit ops are used here solely because SAR values
1450 * have range 0..63
1452 #define gen_shift_reg(cmd, reg) do { \
1453 TCGv_i64 tmp = tcg_temp_new_i64(); \
1454 tcg_gen_extu_i32_i64(tmp, reg); \
1455 tcg_gen_##cmd##_i64(v, v, tmp); \
1456 tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \
1457 tcg_temp_free_i64(v); \
1458 tcg_temp_free_i64(tmp); \
1459 } while (0)
1461 #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
1463 case 8: /*SRC*/
1464 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1466 TCGv_i64 v = tcg_temp_new_i64();
1467 tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]);
1468 gen_shift(shr);
1470 break;
1472 case 9: /*SRL*/
1473 gen_window_check2(dc, RRR_R, RRR_T);
1474 if (dc->sar_5bit) {
1475 tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
1476 } else {
1477 TCGv_i64 v = tcg_temp_new_i64();
1478 tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]);
1479 gen_shift(shr);
1481 break;
1483 case 10: /*SLL*/
1484 gen_window_check2(dc, RRR_R, RRR_S);
1485 if (dc->sar_m32_5bit) {
1486 tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32);
1487 } else {
1488 TCGv_i64 v = tcg_temp_new_i64();
1489 TCGv_i32 s = tcg_const_i32(32);
1490 tcg_gen_sub_i32(s, s, cpu_SR[SAR]);
1491 tcg_gen_andi_i32(s, s, 0x3f);
1492 tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]);
1493 gen_shift_reg(shl, s);
1494 tcg_temp_free(s);
1496 break;
1498 case 11: /*SRA*/
1499 gen_window_check2(dc, RRR_R, RRR_T);
1500 if (dc->sar_5bit) {
1501 tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
1502 } else {
1503 TCGv_i64 v = tcg_temp_new_i64();
1504 tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]);
1505 gen_shift(sar);
1507 break;
1508 #undef gen_shift
1509 #undef gen_shift_reg
1511 case 12: /*MUL16U*/
1512 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
1513 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1515 TCGv_i32 v1 = tcg_temp_new_i32();
1516 TCGv_i32 v2 = tcg_temp_new_i32();
1517 tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]);
1518 tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]);
1519 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
1520 tcg_temp_free(v2);
1521 tcg_temp_free(v1);
1523 break;
1525 case 13: /*MUL16S*/
1526 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
1527 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1529 TCGv_i32 v1 = tcg_temp_new_i32();
1530 TCGv_i32 v2 = tcg_temp_new_i32();
1531 tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]);
1532 tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]);
1533 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
1534 tcg_temp_free(v2);
1535 tcg_temp_free(v1);
1537 break;
1539 default: /*reserved*/
1540 RESERVED();
1541 break;
1543 break;
1545 case 2: /*RST2*/
1546 if (OP2 >= 8) {
1547 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1550 if (OP2 >= 12) {
1551 HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV);
1552 int label = gen_new_label();
1553 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label);
1554 gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE);
1555 gen_set_label(label);
1558 switch (OP2) {
1559 #define BOOLEAN_LOGIC(fn, r, s, t) \
1560 do { \
1561 HAS_OPTION(XTENSA_OPTION_BOOLEAN); \
1562 TCGv_i32 tmp1 = tcg_temp_new_i32(); \
1563 TCGv_i32 tmp2 = tcg_temp_new_i32(); \
1565 tcg_gen_shri_i32(tmp1, cpu_SR[BR], s); \
1566 tcg_gen_shri_i32(tmp2, cpu_SR[BR], t); \
1567 tcg_gen_##fn##_i32(tmp1, tmp1, tmp2); \
1568 tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp1, r, 1); \
1569 tcg_temp_free(tmp1); \
1570 tcg_temp_free(tmp2); \
1571 } while (0)
1573 case 0: /*ANDBp*/
1574 BOOLEAN_LOGIC(and, RRR_R, RRR_S, RRR_T);
1575 break;
1577 case 1: /*ANDBCp*/
1578 BOOLEAN_LOGIC(andc, RRR_R, RRR_S, RRR_T);
1579 break;
1581 case 2: /*ORBp*/
1582 BOOLEAN_LOGIC(or, RRR_R, RRR_S, RRR_T);
1583 break;
1585 case 3: /*ORBCp*/
1586 BOOLEAN_LOGIC(orc, RRR_R, RRR_S, RRR_T);
1587 break;
1589 case 4: /*XORBp*/
1590 BOOLEAN_LOGIC(xor, RRR_R, RRR_S, RRR_T);
1591 break;
1593 #undef BOOLEAN_LOGIC
1595 case 8: /*MULLi*/
1596 HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL);
1597 tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1598 break;
1600 case 10: /*MULUHi*/
1601 case 11: /*MULSHi*/
1602 HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL_HIGH);
1604 TCGv_i64 r = tcg_temp_new_i64();
1605 TCGv_i64 s = tcg_temp_new_i64();
1606 TCGv_i64 t = tcg_temp_new_i64();
1608 if (OP2 == 10) {
1609 tcg_gen_extu_i32_i64(s, cpu_R[RRR_S]);
1610 tcg_gen_extu_i32_i64(t, cpu_R[RRR_T]);
1611 } else {
1612 tcg_gen_ext_i32_i64(s, cpu_R[RRR_S]);
1613 tcg_gen_ext_i32_i64(t, cpu_R[RRR_T]);
1615 tcg_gen_mul_i64(r, s, t);
1616 tcg_gen_shri_i64(r, r, 32);
1617 tcg_gen_trunc_i64_i32(cpu_R[RRR_R], r);
1619 tcg_temp_free_i64(r);
1620 tcg_temp_free_i64(s);
1621 tcg_temp_free_i64(t);
1623 break;
1625 case 12: /*QUOUi*/
1626 tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1627 break;
1629 case 13: /*QUOSi*/
1630 case 15: /*REMSi*/
1632 int label1 = gen_new_label();
1633 int label2 = gen_new_label();
1635 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000,
1636 label1);
1637 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff,
1638 label1);
1639 tcg_gen_movi_i32(cpu_R[RRR_R],
1640 OP2 == 13 ? 0x80000000 : 0);
1641 tcg_gen_br(label2);
1642 gen_set_label(label1);
1643 if (OP2 == 13) {
1644 tcg_gen_div_i32(cpu_R[RRR_R],
1645 cpu_R[RRR_S], cpu_R[RRR_T]);
1646 } else {
1647 tcg_gen_rem_i32(cpu_R[RRR_R],
1648 cpu_R[RRR_S], cpu_R[RRR_T]);
1650 gen_set_label(label2);
1652 break;
1654 case 14: /*REMUi*/
1655 tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1656 break;
1658 default: /*reserved*/
1659 RESERVED();
1660 break;
1662 break;
1664 case 3: /*RST3*/
1665 switch (OP2) {
1666 case 0: /*RSR*/
1667 if (RSR_SR >= 64) {
1668 gen_check_privilege(dc);
1670 gen_window_check1(dc, RRR_T);
1671 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
1672 if (!sregnames[RSR_SR]) {
1673 TBD();
1675 break;
1677 case 1: /*WSR*/
1678 if (RSR_SR >= 64) {
1679 gen_check_privilege(dc);
1681 gen_window_check1(dc, RRR_T);
1682 gen_wsr(dc, RSR_SR, cpu_R[RRR_T]);
1683 if (!sregnames[RSR_SR]) {
1684 TBD();
1686 break;
1688 case 2: /*SEXTu*/
1689 HAS_OPTION(XTENSA_OPTION_MISC_OP_SEXT);
1690 gen_window_check2(dc, RRR_R, RRR_S);
1692 int shift = 24 - RRR_T;
1694 if (shift == 24) {
1695 tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1696 } else if (shift == 16) {
1697 tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1698 } else {
1699 TCGv_i32 tmp = tcg_temp_new_i32();
1700 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift);
1701 tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift);
1702 tcg_temp_free(tmp);
1705 break;
1707 case 3: /*CLAMPSu*/
1708 HAS_OPTION(XTENSA_OPTION_MISC_OP_CLAMPS);
1709 gen_window_check2(dc, RRR_R, RRR_S);
1711 TCGv_i32 tmp1 = tcg_temp_new_i32();
1712 TCGv_i32 tmp2 = tcg_temp_new_i32();
1713 int label = gen_new_label();
1715 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T);
1716 tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]);
1717 tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7));
1718 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1719 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp2, 0, label);
1721 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31);
1722 tcg_gen_xori_i32(cpu_R[RRR_R], tmp1,
1723 0xffffffff >> (25 - RRR_T));
1725 gen_set_label(label);
1727 tcg_temp_free(tmp1);
1728 tcg_temp_free(tmp2);
1730 break;
1732 case 4: /*MINu*/
1733 case 5: /*MAXu*/
1734 case 6: /*MINUu*/
1735 case 7: /*MAXUu*/
1736 HAS_OPTION(XTENSA_OPTION_MISC_OP_MINMAX);
1737 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1739 static const TCGCond cond[] = {
1740 TCG_COND_LE,
1741 TCG_COND_GE,
1742 TCG_COND_LEU,
1743 TCG_COND_GEU
1745 int label = gen_new_label();
1747 if (RRR_R != RRR_T) {
1748 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1749 tcg_gen_brcond_i32(cond[OP2 - 4],
1750 cpu_R[RRR_S], cpu_R[RRR_T], label);
1751 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1752 } else {
1753 tcg_gen_brcond_i32(cond[OP2 - 4],
1754 cpu_R[RRR_T], cpu_R[RRR_S], label);
1755 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1757 gen_set_label(label);
1759 break;
1761 case 8: /*MOVEQZ*/
1762 case 9: /*MOVNEZ*/
1763 case 10: /*MOVLTZ*/
1764 case 11: /*MOVGEZ*/
1765 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1767 static const TCGCond cond[] = {
1768 TCG_COND_NE,
1769 TCG_COND_EQ,
1770 TCG_COND_GE,
1771 TCG_COND_LT
1773 int label = gen_new_label();
1774 tcg_gen_brcondi_i32(cond[OP2 - 8], cpu_R[RRR_T], 0, label);
1775 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1776 gen_set_label(label);
1778 break;
1780 case 12: /*MOVFp*/
1781 case 13: /*MOVTp*/
1782 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1783 gen_window_check2(dc, RRR_R, RRR_S);
1785 int label = gen_new_label();
1786 TCGv_i32 tmp = tcg_temp_new_i32();
1788 tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
1789 tcg_gen_brcondi_i32(
1790 OP2 & 1 ? TCG_COND_EQ : TCG_COND_NE,
1791 tmp, 0, label);
1792 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1793 gen_set_label(label);
1794 tcg_temp_free(tmp);
1796 break;
1798 case 14: /*RUR*/
1799 gen_window_check1(dc, RRR_R);
1801 int st = (RRR_S << 4) + RRR_T;
1802 if (uregnames[st]) {
1803 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]);
1804 } else {
1805 qemu_log("RUR %d not implemented, ", st);
1806 TBD();
1809 break;
1811 case 15: /*WUR*/
1812 gen_window_check1(dc, RRR_T);
1813 if (uregnames[RSR_SR]) {
1814 gen_wur(RSR_SR, cpu_R[RRR_T]);
1815 } else {
1816 qemu_log("WUR %d not implemented, ", RSR_SR);
1817 TBD();
1819 break;
1822 break;
1824 case 4: /*EXTUI*/
1825 case 5:
1826 gen_window_check2(dc, RRR_R, RRR_T);
1828 int shiftimm = RRR_S | ((OP1 & 1) << 4);
1829 int maskimm = (1 << (OP2 + 1)) - 1;
1831 TCGv_i32 tmp = tcg_temp_new_i32();
1832 tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm);
1833 tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm);
1834 tcg_temp_free(tmp);
1836 break;
1838 case 6: /*CUST0*/
1839 RESERVED();
1840 break;
1842 case 7: /*CUST1*/
1843 RESERVED();
1844 break;
1846 case 8: /*LSCXp*/
1847 switch (OP2) {
1848 case 0: /*LSXf*/
1849 case 1: /*LSXUf*/
1850 case 4: /*SSXf*/
1851 case 5: /*SSXUf*/
1852 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1853 gen_window_check2(dc, RRR_S, RRR_T);
1854 gen_check_cpenable(dc, 0);
1856 TCGv_i32 addr = tcg_temp_new_i32();
1857 tcg_gen_add_i32(addr, cpu_R[RRR_S], cpu_R[RRR_T]);
1858 gen_load_store_alignment(dc, 2, addr, false);
1859 if (OP2 & 0x4) {
1860 tcg_gen_qemu_st32(cpu_FR[RRR_R], addr, dc->cring);
1861 } else {
1862 tcg_gen_qemu_ld32u(cpu_FR[RRR_R], addr, dc->cring);
1864 if (OP2 & 0x1) {
1865 tcg_gen_mov_i32(cpu_R[RRR_S], addr);
1867 tcg_temp_free(addr);
1869 break;
1871 default: /*reserved*/
1872 RESERVED();
1873 break;
1875 break;
1877 case 9: /*LSC4*/
1878 gen_window_check2(dc, RRR_S, RRR_T);
1879 switch (OP2) {
1880 case 0: /*L32E*/
1881 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1882 gen_check_privilege(dc);
1884 TCGv_i32 addr = tcg_temp_new_i32();
1885 tcg_gen_addi_i32(addr, cpu_R[RRR_S],
1886 (0xffffffc0 | (RRR_R << 2)));
1887 tcg_gen_qemu_ld32u(cpu_R[RRR_T], addr, dc->ring);
1888 tcg_temp_free(addr);
1890 break;
1892 case 4: /*S32E*/
1893 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1894 gen_check_privilege(dc);
1896 TCGv_i32 addr = tcg_temp_new_i32();
1897 tcg_gen_addi_i32(addr, cpu_R[RRR_S],
1898 (0xffffffc0 | (RRR_R << 2)));
1899 tcg_gen_qemu_st32(cpu_R[RRR_T], addr, dc->ring);
1900 tcg_temp_free(addr);
1902 break;
1904 default:
1905 RESERVED();
1906 break;
1908 break;
1910 case 10: /*FP0*/
1911 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1912 switch (OP2) {
1913 case 0: /*ADD.Sf*/
1914 gen_check_cpenable(dc, 0);
1915 gen_helper_add_s(cpu_FR[RRR_R], cpu_env,
1916 cpu_FR[RRR_S], cpu_FR[RRR_T]);
1917 break;
1919 case 1: /*SUB.Sf*/
1920 gen_check_cpenable(dc, 0);
1921 gen_helper_sub_s(cpu_FR[RRR_R], cpu_env,
1922 cpu_FR[RRR_S], cpu_FR[RRR_T]);
1923 break;
1925 case 2: /*MUL.Sf*/
1926 gen_check_cpenable(dc, 0);
1927 gen_helper_mul_s(cpu_FR[RRR_R], cpu_env,
1928 cpu_FR[RRR_S], cpu_FR[RRR_T]);
1929 break;
1931 case 4: /*MADD.Sf*/
1932 gen_check_cpenable(dc, 0);
1933 gen_helper_madd_s(cpu_FR[RRR_R], cpu_env,
1934 cpu_FR[RRR_R], cpu_FR[RRR_S], cpu_FR[RRR_T]);
1935 break;
1937 case 5: /*MSUB.Sf*/
1938 gen_check_cpenable(dc, 0);
1939 gen_helper_msub_s(cpu_FR[RRR_R], cpu_env,
1940 cpu_FR[RRR_R], cpu_FR[RRR_S], cpu_FR[RRR_T]);
1941 break;
1943 case 8: /*ROUND.Sf*/
1944 case 9: /*TRUNC.Sf*/
1945 case 10: /*FLOOR.Sf*/
1946 case 11: /*CEIL.Sf*/
1947 case 14: /*UTRUNC.Sf*/
1948 gen_window_check1(dc, RRR_R);
1949 gen_check_cpenable(dc, 0);
1951 static const unsigned rounding_mode_const[] = {
1952 float_round_nearest_even,
1953 float_round_to_zero,
1954 float_round_down,
1955 float_round_up,
1956 [6] = float_round_to_zero,
1958 TCGv_i32 rounding_mode = tcg_const_i32(
1959 rounding_mode_const[OP2 & 7]);
1960 TCGv_i32 scale = tcg_const_i32(RRR_T);
1962 if (OP2 == 14) {
1963 gen_helper_ftoui(cpu_R[RRR_R], cpu_FR[RRR_S],
1964 rounding_mode, scale);
1965 } else {
1966 gen_helper_ftoi(cpu_R[RRR_R], cpu_FR[RRR_S],
1967 rounding_mode, scale);
1970 tcg_temp_free(rounding_mode);
1971 tcg_temp_free(scale);
1973 break;
1975 case 12: /*FLOAT.Sf*/
1976 case 13: /*UFLOAT.Sf*/
1977 gen_window_check1(dc, RRR_S);
1978 gen_check_cpenable(dc, 0);
1980 TCGv_i32 scale = tcg_const_i32(-RRR_T);
1982 if (OP2 == 13) {
1983 gen_helper_uitof(cpu_FR[RRR_R], cpu_env,
1984 cpu_R[RRR_S], scale);
1985 } else {
1986 gen_helper_itof(cpu_FR[RRR_R], cpu_env,
1987 cpu_R[RRR_S], scale);
1989 tcg_temp_free(scale);
1991 break;
1993 case 15: /*FP1OP*/
1994 switch (RRR_T) {
1995 case 0: /*MOV.Sf*/
1996 gen_check_cpenable(dc, 0);
1997 tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_FR[RRR_S]);
1998 break;
2000 case 1: /*ABS.Sf*/
2001 gen_check_cpenable(dc, 0);
2002 gen_helper_abs_s(cpu_FR[RRR_R], cpu_FR[RRR_S]);
2003 break;
2005 case 4: /*RFRf*/
2006 gen_window_check1(dc, RRR_R);
2007 gen_check_cpenable(dc, 0);
2008 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_FR[RRR_S]);
2009 break;
2011 case 5: /*WFRf*/
2012 gen_window_check1(dc, RRR_S);
2013 gen_check_cpenable(dc, 0);
2014 tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_R[RRR_S]);
2015 break;
2017 case 6: /*NEG.Sf*/
2018 gen_check_cpenable(dc, 0);
2019 gen_helper_neg_s(cpu_FR[RRR_R], cpu_FR[RRR_S]);
2020 break;
2022 default: /*reserved*/
2023 RESERVED();
2024 break;
2026 break;
2028 default: /*reserved*/
2029 RESERVED();
2030 break;
2032 break;
2034 case 11: /*FP1*/
2035 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
2037 #define gen_compare(rel, br, a, b) \
2038 do { \
2039 TCGv_i32 bit = tcg_const_i32(1 << br); \
2041 gen_check_cpenable(dc, 0); \
2042 gen_helper_##rel(cpu_env, bit, cpu_FR[a], cpu_FR[b]); \
2043 tcg_temp_free(bit); \
2044 } while (0)
2046 switch (OP2) {
2047 case 1: /*UN.Sf*/
2048 gen_compare(un_s, RRR_R, RRR_S, RRR_T);
2049 break;
2051 case 2: /*OEQ.Sf*/
2052 gen_compare(oeq_s, RRR_R, RRR_S, RRR_T);
2053 break;
2055 case 3: /*UEQ.Sf*/
2056 gen_compare(ueq_s, RRR_R, RRR_S, RRR_T);
2057 break;
2059 case 4: /*OLT.Sf*/
2060 gen_compare(olt_s, RRR_R, RRR_S, RRR_T);
2061 break;
2063 case 5: /*ULT.Sf*/
2064 gen_compare(ult_s, RRR_R, RRR_S, RRR_T);
2065 break;
2067 case 6: /*OLE.Sf*/
2068 gen_compare(ole_s, RRR_R, RRR_S, RRR_T);
2069 break;
2071 case 7: /*ULE.Sf*/
2072 gen_compare(ule_s, RRR_R, RRR_S, RRR_T);
2073 break;
2075 #undef gen_compare
2077 case 8: /*MOVEQZ.Sf*/
2078 case 9: /*MOVNEZ.Sf*/
2079 case 10: /*MOVLTZ.Sf*/
2080 case 11: /*MOVGEZ.Sf*/
2081 gen_window_check1(dc, RRR_T);
2082 gen_check_cpenable(dc, 0);
2084 static const TCGCond cond[] = {
2085 TCG_COND_NE,
2086 TCG_COND_EQ,
2087 TCG_COND_GE,
2088 TCG_COND_LT
2090 int label = gen_new_label();
2091 tcg_gen_brcondi_i32(cond[OP2 - 8], cpu_R[RRR_T], 0, label);
2092 tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_FR[RRR_S]);
2093 gen_set_label(label);
2095 break;
2097 case 12: /*MOVF.Sf*/
2098 case 13: /*MOVT.Sf*/
2099 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
2100 gen_check_cpenable(dc, 0);
2102 int label = gen_new_label();
2103 TCGv_i32 tmp = tcg_temp_new_i32();
2105 tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
2106 tcg_gen_brcondi_i32(
2107 OP2 & 1 ? TCG_COND_EQ : TCG_COND_NE,
2108 tmp, 0, label);
2109 tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_FR[RRR_S]);
2110 gen_set_label(label);
2111 tcg_temp_free(tmp);
2113 break;
2115 default: /*reserved*/
2116 RESERVED();
2117 break;
2119 break;
2121 default: /*reserved*/
2122 RESERVED();
2123 break;
2125 break;
2127 case 1: /*L32R*/
2128 gen_window_check1(dc, RRR_T);
2130 TCGv_i32 tmp = tcg_const_i32(
2131 ((dc->tb->flags & XTENSA_TBFLAG_LITBASE) ?
2132 0 : ((dc->pc + 3) & ~3)) +
2133 (0xfffc0000 | (RI16_IMM16 << 2)));
2135 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
2136 tcg_gen_add_i32(tmp, tmp, dc->litbase);
2138 tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring);
2139 tcg_temp_free(tmp);
2141 break;
2143 case 2: /*LSAI*/
2144 #define gen_load_store(type, shift) do { \
2145 TCGv_i32 addr = tcg_temp_new_i32(); \
2146 gen_window_check2(dc, RRI8_S, RRI8_T); \
2147 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \
2148 if (shift) { \
2149 gen_load_store_alignment(dc, shift, addr, false); \
2151 tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
2152 tcg_temp_free(addr); \
2153 } while (0)
2155 switch (RRI8_R) {
2156 case 0: /*L8UI*/
2157 gen_load_store(ld8u, 0);
2158 break;
2160 case 1: /*L16UI*/
2161 gen_load_store(ld16u, 1);
2162 break;
2164 case 2: /*L32I*/
2165 gen_load_store(ld32u, 2);
2166 break;
2168 case 4: /*S8I*/
2169 gen_load_store(st8, 0);
2170 break;
2172 case 5: /*S16I*/
2173 gen_load_store(st16, 1);
2174 break;
2176 case 6: /*S32I*/
2177 gen_load_store(st32, 2);
2178 break;
2180 case 7: /*CACHEc*/
2181 if (RRI8_T < 8) {
2182 HAS_OPTION(XTENSA_OPTION_DCACHE);
2185 switch (RRI8_T) {
2186 case 0: /*DPFRc*/
2187 break;
2189 case 1: /*DPFWc*/
2190 break;
2192 case 2: /*DPFROc*/
2193 break;
2195 case 3: /*DPFWOc*/
2196 break;
2198 case 4: /*DHWBc*/
2199 break;
2201 case 5: /*DHWBIc*/
2202 break;
2204 case 6: /*DHIc*/
2205 break;
2207 case 7: /*DIIc*/
2208 break;
2210 case 8: /*DCEc*/
2211 switch (OP1) {
2212 case 0: /*DPFLl*/
2213 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
2214 break;
2216 case 2: /*DHUl*/
2217 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
2218 break;
2220 case 3: /*DIUl*/
2221 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
2222 break;
2224 case 4: /*DIWBc*/
2225 HAS_OPTION(XTENSA_OPTION_DCACHE);
2226 break;
2228 case 5: /*DIWBIc*/
2229 HAS_OPTION(XTENSA_OPTION_DCACHE);
2230 break;
2232 default: /*reserved*/
2233 RESERVED();
2234 break;
2237 break;
2239 case 12: /*IPFc*/
2240 HAS_OPTION(XTENSA_OPTION_ICACHE);
2241 break;
2243 case 13: /*ICEc*/
2244 switch (OP1) {
2245 case 0: /*IPFLl*/
2246 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
2247 break;
2249 case 2: /*IHUl*/
2250 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
2251 break;
2253 case 3: /*IIUl*/
2254 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
2255 break;
2257 default: /*reserved*/
2258 RESERVED();
2259 break;
2261 break;
2263 case 14: /*IHIc*/
2264 HAS_OPTION(XTENSA_OPTION_ICACHE);
2265 break;
2267 case 15: /*IIIc*/
2268 HAS_OPTION(XTENSA_OPTION_ICACHE);
2269 break;
2271 default: /*reserved*/
2272 RESERVED();
2273 break;
2275 break;
2277 case 9: /*L16SI*/
2278 gen_load_store(ld16s, 1);
2279 break;
2280 #undef gen_load_store
2282 case 10: /*MOVI*/
2283 gen_window_check1(dc, RRI8_T);
2284 tcg_gen_movi_i32(cpu_R[RRI8_T],
2285 RRI8_IMM8 | (RRI8_S << 8) |
2286 ((RRI8_S & 0x8) ? 0xfffff000 : 0));
2287 break;
2289 #define gen_load_store_no_hw_align(type) do { \
2290 TCGv_i32 addr = tcg_temp_local_new_i32(); \
2291 gen_window_check2(dc, RRI8_S, RRI8_T); \
2292 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); \
2293 gen_load_store_alignment(dc, 2, addr, true); \
2294 tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
2295 tcg_temp_free(addr); \
2296 } while (0)
2298 case 11: /*L32AIy*/
2299 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
2300 gen_load_store_no_hw_align(ld32u); /*TODO acquire?*/
2301 break;
2303 case 12: /*ADDI*/
2304 gen_window_check2(dc, RRI8_S, RRI8_T);
2305 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE);
2306 break;
2308 case 13: /*ADDMI*/
2309 gen_window_check2(dc, RRI8_S, RRI8_T);
2310 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE << 8);
2311 break;
2313 case 14: /*S32C1Iy*/
2314 HAS_OPTION(XTENSA_OPTION_CONDITIONAL_STORE);
2315 gen_window_check2(dc, RRI8_S, RRI8_T);
2317 int label = gen_new_label();
2318 TCGv_i32 tmp = tcg_temp_local_new_i32();
2319 TCGv_i32 addr = tcg_temp_local_new_i32();
2321 tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]);
2322 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
2323 gen_load_store_alignment(dc, 2, addr, true);
2324 tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring);
2325 tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T],
2326 cpu_SR[SCOMPARE1], label);
2328 tcg_gen_qemu_st32(tmp, addr, dc->cring);
2330 gen_set_label(label);
2331 tcg_temp_free(addr);
2332 tcg_temp_free(tmp);
2334 break;
2336 case 15: /*S32RIy*/
2337 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
2338 gen_load_store_no_hw_align(st32); /*TODO release?*/
2339 break;
2340 #undef gen_load_store_no_hw_align
2342 default: /*reserved*/
2343 RESERVED();
2344 break;
2346 break;
2348 case 3: /*LSCIp*/
2349 switch (RRI8_R) {
2350 case 0: /*LSIf*/
2351 case 4: /*SSIf*/
2352 case 8: /*LSIUf*/
2353 case 12: /*SSIUf*/
2354 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
2355 gen_window_check1(dc, RRI8_S);
2356 gen_check_cpenable(dc, 0);
2358 TCGv_i32 addr = tcg_temp_new_i32();
2359 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
2360 gen_load_store_alignment(dc, 2, addr, false);
2361 if (RRI8_R & 0x4) {
2362 tcg_gen_qemu_st32(cpu_FR[RRI8_T], addr, dc->cring);
2363 } else {
2364 tcg_gen_qemu_ld32u(cpu_FR[RRI8_T], addr, dc->cring);
2366 if (RRI8_R & 0x8) {
2367 tcg_gen_mov_i32(cpu_R[RRI8_S], addr);
2369 tcg_temp_free(addr);
2371 break;
2373 default: /*reserved*/
2374 RESERVED();
2375 break;
2377 break;
2379 case 4: /*MAC16d*/
2380 HAS_OPTION(XTENSA_OPTION_MAC16);
2382 enum {
2383 MAC16_UMUL = 0x0,
2384 MAC16_MUL = 0x4,
2385 MAC16_MULA = 0x8,
2386 MAC16_MULS = 0xc,
2387 MAC16_NONE = 0xf,
2388 } op = OP1 & 0xc;
2389 bool is_m1_sr = (OP2 & 0x3) == 2;
2390 bool is_m2_sr = (OP2 & 0xc) == 0;
2391 uint32_t ld_offset = 0;
2393 if (OP2 > 9) {
2394 RESERVED();
2397 switch (OP2 & 2) {
2398 case 0: /*MACI?/MACC?*/
2399 is_m1_sr = true;
2400 ld_offset = (OP2 & 1) ? -4 : 4;
2402 if (OP2 >= 8) { /*MACI/MACC*/
2403 if (OP1 == 0) { /*LDINC/LDDEC*/
2404 op = MAC16_NONE;
2405 } else {
2406 RESERVED();
2408 } else if (op != MAC16_MULA) { /*MULA.*.*.LDINC/LDDEC*/
2409 RESERVED();
2411 break;
2413 case 2: /*MACD?/MACA?*/
2414 if (op == MAC16_UMUL && OP2 != 7) { /*UMUL only in MACAA*/
2415 RESERVED();
2417 break;
2420 if (op != MAC16_NONE) {
2421 if (!is_m1_sr) {
2422 gen_window_check1(dc, RRR_S);
2424 if (!is_m2_sr) {
2425 gen_window_check1(dc, RRR_T);
2430 TCGv_i32 vaddr = tcg_temp_new_i32();
2431 TCGv_i32 mem32 = tcg_temp_new_i32();
2433 if (ld_offset) {
2434 gen_window_check1(dc, RRR_S);
2435 tcg_gen_addi_i32(vaddr, cpu_R[RRR_S], ld_offset);
2436 gen_load_store_alignment(dc, 2, vaddr, false);
2437 tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring);
2439 if (op != MAC16_NONE) {
2440 TCGv_i32 m1 = gen_mac16_m(
2441 is_m1_sr ? cpu_SR[MR + RRR_X] : cpu_R[RRR_S],
2442 OP1 & 1, op == MAC16_UMUL);
2443 TCGv_i32 m2 = gen_mac16_m(
2444 is_m2_sr ? cpu_SR[MR + 2 + RRR_Y] : cpu_R[RRR_T],
2445 OP1 & 2, op == MAC16_UMUL);
2447 if (op == MAC16_MUL || op == MAC16_UMUL) {
2448 tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2);
2449 if (op == MAC16_UMUL) {
2450 tcg_gen_movi_i32(cpu_SR[ACCHI], 0);
2451 } else {
2452 tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31);
2454 } else {
2455 TCGv_i32 res = tcg_temp_new_i32();
2456 TCGv_i64 res64 = tcg_temp_new_i64();
2457 TCGv_i64 tmp = tcg_temp_new_i64();
2459 tcg_gen_mul_i32(res, m1, m2);
2460 tcg_gen_ext_i32_i64(res64, res);
2461 tcg_gen_concat_i32_i64(tmp,
2462 cpu_SR[ACCLO], cpu_SR[ACCHI]);
2463 if (op == MAC16_MULA) {
2464 tcg_gen_add_i64(tmp, tmp, res64);
2465 } else {
2466 tcg_gen_sub_i64(tmp, tmp, res64);
2468 tcg_gen_trunc_i64_i32(cpu_SR[ACCLO], tmp);
2469 tcg_gen_shri_i64(tmp, tmp, 32);
2470 tcg_gen_trunc_i64_i32(cpu_SR[ACCHI], tmp);
2471 tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]);
2473 tcg_temp_free(res);
2474 tcg_temp_free_i64(res64);
2475 tcg_temp_free_i64(tmp);
2477 tcg_temp_free(m1);
2478 tcg_temp_free(m2);
2480 if (ld_offset) {
2481 tcg_gen_mov_i32(cpu_R[RRR_S], vaddr);
2482 tcg_gen_mov_i32(cpu_SR[MR + RRR_W], mem32);
2484 tcg_temp_free(vaddr);
2485 tcg_temp_free(mem32);
2488 break;
2490 case 5: /*CALLN*/
2491 switch (CALL_N) {
2492 case 0: /*CALL0*/
2493 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
2494 gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
2495 break;
2497 case 1: /*CALL4w*/
2498 case 2: /*CALL8w*/
2499 case 3: /*CALL12w*/
2500 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2501 gen_window_check1(dc, CALL_N << 2);
2502 gen_callwi(dc, CALL_N,
2503 (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
2504 break;
2506 break;
2508 case 6: /*SI*/
2509 switch (CALL_N) {
2510 case 0: /*J*/
2511 gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0);
2512 break;
2514 case 1: /*BZ*/
2515 gen_window_check1(dc, BRI12_S);
2517 static const TCGCond cond[] = {
2518 TCG_COND_EQ, /*BEQZ*/
2519 TCG_COND_NE, /*BNEZ*/
2520 TCG_COND_LT, /*BLTZ*/
2521 TCG_COND_GE, /*BGEZ*/
2524 gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0,
2525 4 + BRI12_IMM12_SE);
2527 break;
2529 case 2: /*BI0*/
2530 gen_window_check1(dc, BRI8_S);
2532 static const TCGCond cond[] = {
2533 TCG_COND_EQ, /*BEQI*/
2534 TCG_COND_NE, /*BNEI*/
2535 TCG_COND_LT, /*BLTI*/
2536 TCG_COND_GE, /*BGEI*/
2539 gen_brcondi(dc, cond[BRI8_M & 3],
2540 cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE);
2542 break;
2544 case 3: /*BI1*/
2545 switch (BRI8_M) {
2546 case 0: /*ENTRYw*/
2547 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2549 TCGv_i32 pc = tcg_const_i32(dc->pc);
2550 TCGv_i32 s = tcg_const_i32(BRI12_S);
2551 TCGv_i32 imm = tcg_const_i32(BRI12_IMM12);
2552 gen_advance_ccount(dc);
2553 gen_helper_entry(cpu_env, pc, s, imm);
2554 tcg_temp_free(imm);
2555 tcg_temp_free(s);
2556 tcg_temp_free(pc);
2557 reset_used_window(dc);
2559 break;
2561 case 1: /*B1*/
2562 switch (BRI8_R) {
2563 case 0: /*BFp*/
2564 case 1: /*BTp*/
2565 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
2567 TCGv_i32 tmp = tcg_temp_new_i32();
2568 tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRI8_S);
2569 gen_brcondi(dc,
2570 BRI8_R == 1 ? TCG_COND_NE : TCG_COND_EQ,
2571 tmp, 0, 4 + RRI8_IMM8_SE);
2572 tcg_temp_free(tmp);
2574 break;
2576 case 8: /*LOOP*/
2577 case 9: /*LOOPNEZ*/
2578 case 10: /*LOOPGTZ*/
2579 HAS_OPTION(XTENSA_OPTION_LOOP);
2580 gen_window_check1(dc, RRI8_S);
2582 uint32_t lend = dc->pc + RRI8_IMM8 + 4;
2583 TCGv_i32 tmp = tcg_const_i32(lend);
2585 tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1);
2586 tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc);
2587 gen_helper_wsr_lend(cpu_env, tmp);
2588 tcg_temp_free(tmp);
2590 if (BRI8_R > 8) {
2591 int label = gen_new_label();
2592 tcg_gen_brcondi_i32(
2593 BRI8_R == 9 ? TCG_COND_NE : TCG_COND_GT,
2594 cpu_R[RRI8_S], 0, label);
2595 gen_jumpi(dc, lend, 1);
2596 gen_set_label(label);
2599 gen_jumpi(dc, dc->next_pc, 0);
2601 break;
2603 default: /*reserved*/
2604 RESERVED();
2605 break;
2608 break;
2610 case 2: /*BLTUI*/
2611 case 3: /*BGEUI*/
2612 gen_window_check1(dc, BRI8_S);
2613 gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU,
2614 cpu_R[BRI8_S], B4CONSTU[BRI8_R], 4 + BRI8_IMM8_SE);
2615 break;
2617 break;
2620 break;
2622 case 7: /*B*/
2624 TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ;
2626 switch (RRI8_R & 7) {
2627 case 0: /*BNONE*/ /*BANY*/
2628 gen_window_check2(dc, RRI8_S, RRI8_T);
2630 TCGv_i32 tmp = tcg_temp_new_i32();
2631 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
2632 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2633 tcg_temp_free(tmp);
2635 break;
2637 case 1: /*BEQ*/ /*BNE*/
2638 case 2: /*BLT*/ /*BGE*/
2639 case 3: /*BLTU*/ /*BGEU*/
2640 gen_window_check2(dc, RRI8_S, RRI8_T);
2642 static const TCGCond cond[] = {
2643 [1] = TCG_COND_EQ,
2644 [2] = TCG_COND_LT,
2645 [3] = TCG_COND_LTU,
2646 [9] = TCG_COND_NE,
2647 [10] = TCG_COND_GE,
2648 [11] = TCG_COND_GEU,
2650 gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T],
2651 4 + RRI8_IMM8_SE);
2653 break;
2655 case 4: /*BALL*/ /*BNALL*/
2656 gen_window_check2(dc, RRI8_S, RRI8_T);
2658 TCGv_i32 tmp = tcg_temp_new_i32();
2659 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
2660 gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T],
2661 4 + RRI8_IMM8_SE);
2662 tcg_temp_free(tmp);
2664 break;
2666 case 5: /*BBC*/ /*BBS*/
2667 gen_window_check2(dc, RRI8_S, RRI8_T);
2669 #ifdef TARGET_WORDS_BIGENDIAN
2670 TCGv_i32 bit = tcg_const_i32(0x80000000);
2671 #else
2672 TCGv_i32 bit = tcg_const_i32(0x00000001);
2673 #endif
2674 TCGv_i32 tmp = tcg_temp_new_i32();
2675 tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f);
2676 #ifdef TARGET_WORDS_BIGENDIAN
2677 tcg_gen_shr_i32(bit, bit, tmp);
2678 #else
2679 tcg_gen_shl_i32(bit, bit, tmp);
2680 #endif
2681 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit);
2682 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2683 tcg_temp_free(tmp);
2684 tcg_temp_free(bit);
2686 break;
2688 case 6: /*BBCI*/ /*BBSI*/
2689 case 7:
2690 gen_window_check1(dc, RRI8_S);
2692 TCGv_i32 tmp = tcg_temp_new_i32();
2693 tcg_gen_andi_i32(tmp, cpu_R[RRI8_S],
2694 #ifdef TARGET_WORDS_BIGENDIAN
2695 0x80000000 >> (((RRI8_R & 1) << 4) | RRI8_T));
2696 #else
2697 0x00000001 << (((RRI8_R & 1) << 4) | RRI8_T));
2698 #endif
2699 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2700 tcg_temp_free(tmp);
2702 break;
2706 break;
2708 #define gen_narrow_load_store(type) do { \
2709 TCGv_i32 addr = tcg_temp_new_i32(); \
2710 gen_window_check2(dc, RRRN_S, RRRN_T); \
2711 tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \
2712 gen_load_store_alignment(dc, 2, addr, false); \
2713 tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \
2714 tcg_temp_free(addr); \
2715 } while (0)
2717 case 8: /*L32I.Nn*/
2718 gen_narrow_load_store(ld32u);
2719 break;
2721 case 9: /*S32I.Nn*/
2722 gen_narrow_load_store(st32);
2723 break;
2724 #undef gen_narrow_load_store
2726 case 10: /*ADD.Nn*/
2727 gen_window_check3(dc, RRRN_R, RRRN_S, RRRN_T);
2728 tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]);
2729 break;
2731 case 11: /*ADDI.Nn*/
2732 gen_window_check2(dc, RRRN_R, RRRN_S);
2733 tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], RRRN_T ? RRRN_T : -1);
2734 break;
2736 case 12: /*ST2n*/
2737 gen_window_check1(dc, RRRN_S);
2738 if (RRRN_T < 8) { /*MOVI.Nn*/
2739 tcg_gen_movi_i32(cpu_R[RRRN_S],
2740 RRRN_R | (RRRN_T << 4) |
2741 ((RRRN_T & 6) == 6 ? 0xffffff80 : 0));
2742 } else { /*BEQZ.Nn*/ /*BNEZ.Nn*/
2743 TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ;
2745 gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0,
2746 4 + (RRRN_R | ((RRRN_T & 3) << 4)));
2748 break;
2750 case 13: /*ST3n*/
2751 switch (RRRN_R) {
2752 case 0: /*MOV.Nn*/
2753 gen_window_check2(dc, RRRN_S, RRRN_T);
2754 tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]);
2755 break;
2757 case 15: /*S3*/
2758 switch (RRRN_T) {
2759 case 0: /*RET.Nn*/
2760 gen_jump(dc, cpu_R[0]);
2761 break;
2763 case 1: /*RETW.Nn*/
2764 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2766 TCGv_i32 tmp = tcg_const_i32(dc->pc);
2767 gen_advance_ccount(dc);
2768 gen_helper_retw(tmp, cpu_env, tmp);
2769 gen_jump(dc, tmp);
2770 tcg_temp_free(tmp);
2772 break;
2774 case 2: /*BREAK.Nn*/
2775 HAS_OPTION(XTENSA_OPTION_DEBUG);
2776 if (dc->debug) {
2777 gen_debug_exception(dc, DEBUGCAUSE_BN);
2779 break;
2781 case 3: /*NOP.Nn*/
2782 break;
2784 case 6: /*ILL.Nn*/
2785 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
2786 break;
2788 default: /*reserved*/
2789 RESERVED();
2790 break;
2792 break;
2794 default: /*reserved*/
2795 RESERVED();
2796 break;
2798 break;
2800 default: /*reserved*/
2801 RESERVED();
2802 break;
2805 if (dc->is_jmp == DISAS_NEXT) {
2806 gen_check_loop_end(dc, 0);
2808 dc->pc = dc->next_pc;
2810 return;
2812 invalid_opcode:
2813 qemu_log("INVALID(pc = %08x)\n", dc->pc);
2814 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
2815 #undef HAS_OPTION
2818 static void check_breakpoint(CPUXtensaState *env, DisasContext *dc)
2820 CPUBreakpoint *bp;
2822 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2823 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2824 if (bp->pc == dc->pc) {
2825 tcg_gen_movi_i32(cpu_pc, dc->pc);
2826 gen_exception(dc, EXCP_DEBUG);
2827 dc->is_jmp = DISAS_UPDATE;
2833 static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc)
2835 unsigned i;
2837 for (i = 0; i < dc->config->nibreak; ++i) {
2838 if ((env->sregs[IBREAKENABLE] & (1 << i)) &&
2839 env->sregs[IBREAKA + i] == dc->pc) {
2840 gen_debug_exception(dc, DEBUGCAUSE_IB);
2841 break;
2846 static void gen_intermediate_code_internal(
2847 CPUXtensaState *env, TranslationBlock *tb, int search_pc)
2849 DisasContext dc;
2850 int insn_count = 0;
2851 int j, lj = -1;
2852 uint16_t *gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2853 int max_insns = tb->cflags & CF_COUNT_MASK;
2854 uint32_t pc_start = tb->pc;
2855 uint32_t next_page_start =
2856 (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2858 if (max_insns == 0) {
2859 max_insns = CF_COUNT_MASK;
2862 dc.config = env->config;
2863 dc.singlestep_enabled = env->singlestep_enabled;
2864 dc.tb = tb;
2865 dc.pc = pc_start;
2866 dc.ring = tb->flags & XTENSA_TBFLAG_RING_MASK;
2867 dc.cring = (tb->flags & XTENSA_TBFLAG_EXCM) ? 0 : dc.ring;
2868 dc.lbeg = env->sregs[LBEG];
2869 dc.lend = env->sregs[LEND];
2870 dc.is_jmp = DISAS_NEXT;
2871 dc.ccount_delta = 0;
2872 dc.debug = tb->flags & XTENSA_TBFLAG_DEBUG;
2873 dc.icount = tb->flags & XTENSA_TBFLAG_ICOUNT;
2874 dc.cpenable = (tb->flags & XTENSA_TBFLAG_CPENABLE_MASK) >>
2875 XTENSA_TBFLAG_CPENABLE_SHIFT;
2877 init_litbase(&dc);
2878 init_sar_tracker(&dc);
2879 reset_used_window(&dc);
2880 if (dc.icount) {
2881 dc.next_icount = tcg_temp_local_new_i32();
2884 gen_icount_start();
2886 if (env->singlestep_enabled && env->exception_taken) {
2887 env->exception_taken = 0;
2888 tcg_gen_movi_i32(cpu_pc, dc.pc);
2889 gen_exception(&dc, EXCP_DEBUG);
2892 do {
2893 check_breakpoint(env, &dc);
2895 if (search_pc) {
2896 j = gen_opc_ptr - gen_opc_buf;
2897 if (lj < j) {
2898 lj++;
2899 while (lj < j) {
2900 gen_opc_instr_start[lj++] = 0;
2903 gen_opc_pc[lj] = dc.pc;
2904 gen_opc_instr_start[lj] = 1;
2905 gen_opc_icount[lj] = insn_count;
2908 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2909 tcg_gen_debug_insn_start(dc.pc);
2912 ++dc.ccount_delta;
2914 if (insn_count + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2915 gen_io_start();
2918 if (dc.icount) {
2919 int label = gen_new_label();
2921 tcg_gen_addi_i32(dc.next_icount, cpu_SR[ICOUNT], 1);
2922 tcg_gen_brcondi_i32(TCG_COND_NE, dc.next_icount, 0, label);
2923 tcg_gen_mov_i32(dc.next_icount, cpu_SR[ICOUNT]);
2924 if (dc.debug) {
2925 gen_debug_exception(&dc, DEBUGCAUSE_IC);
2927 gen_set_label(label);
2930 if (dc.debug) {
2931 gen_ibreak_check(env, &dc);
2934 disas_xtensa_insn(&dc);
2935 ++insn_count;
2936 if (dc.icount) {
2937 tcg_gen_mov_i32(cpu_SR[ICOUNT], dc.next_icount);
2939 if (env->singlestep_enabled) {
2940 tcg_gen_movi_i32(cpu_pc, dc.pc);
2941 gen_exception(&dc, EXCP_DEBUG);
2942 break;
2944 } while (dc.is_jmp == DISAS_NEXT &&
2945 insn_count < max_insns &&
2946 dc.pc < next_page_start &&
2947 gen_opc_ptr < gen_opc_end);
2949 reset_litbase(&dc);
2950 reset_sar_tracker(&dc);
2951 if (dc.icount) {
2952 tcg_temp_free(dc.next_icount);
2955 if (tb->cflags & CF_LAST_IO) {
2956 gen_io_end();
2959 if (dc.is_jmp == DISAS_NEXT) {
2960 gen_jumpi(&dc, dc.pc, 0);
2962 gen_icount_end(tb, insn_count);
2963 *gen_opc_ptr = INDEX_op_end;
2965 if (!search_pc) {
2966 tb->size = dc.pc - pc_start;
2967 tb->icount = insn_count;
2971 void gen_intermediate_code(CPUXtensaState *env, TranslationBlock *tb)
2973 gen_intermediate_code_internal(env, tb, 0);
2976 void gen_intermediate_code_pc(CPUXtensaState *env, TranslationBlock *tb)
2978 gen_intermediate_code_internal(env, tb, 1);
2981 void cpu_dump_state(CPUXtensaState *env, FILE *f, fprintf_function cpu_fprintf,
2982 int flags)
2984 int i, j;
2986 cpu_fprintf(f, "PC=%08x\n\n", env->pc);
2988 for (i = j = 0; i < 256; ++i) {
2989 if (sregnames[i]) {
2990 cpu_fprintf(f, "%s=%08x%c", sregnames[i], env->sregs[i],
2991 (j++ % 4) == 3 ? '\n' : ' ');
2995 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
2997 for (i = j = 0; i < 256; ++i) {
2998 if (uregnames[i]) {
2999 cpu_fprintf(f, "%s=%08x%c", uregnames[i], env->uregs[i],
3000 (j++ % 4) == 3 ? '\n' : ' ');
3004 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
3006 for (i = 0; i < 16; ++i) {
3007 cpu_fprintf(f, "A%02d=%08x%c", i, env->regs[i],
3008 (i % 4) == 3 ? '\n' : ' ');
3011 cpu_fprintf(f, "\n");
3013 for (i = 0; i < env->config->nareg; ++i) {
3014 cpu_fprintf(f, "AR%02d=%08x%c", i, env->phys_regs[i],
3015 (i % 4) == 3 ? '\n' : ' ');
3018 if (xtensa_option_enabled(env->config, XTENSA_OPTION_FP_COPROCESSOR)) {
3019 cpu_fprintf(f, "\n");
3021 for (i = 0; i < 16; ++i) {
3022 cpu_fprintf(f, "F%02d=%08x (%+10.8e)%c", i,
3023 float32_val(env->fregs[i]),
3024 *(float *)&env->fregs[i], (i % 2) == 1 ? '\n' : ' ');
3029 void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb, int pc_pos)
3031 env->pc = gen_opc_pc[pc_pos];