qemu-char: fix tcp_get_fds
[qemu/qmp-unstable.git] / target-xtensa / translate.c
blobbadca195f40d81c7cb6217a0bea0b46dff396c4f
1 /*
2 * Xtensa ISA:
3 * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
5 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of the Open Source and Linux Lab nor the
16 * names of its contributors may be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <stdio.h>
33 #include "cpu.h"
34 #include "exec/exec-all.h"
35 #include "disas/disas.h"
36 #include "tcg-op.h"
37 #include "qemu/log.h"
38 #include "sysemu/sysemu.h"
39 #include "exec/cpu_ldst.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
47 typedef struct DisasContext {
48 const XtensaConfig *config;
49 TranslationBlock *tb;
50 uint32_t pc;
51 uint32_t next_pc;
52 int cring;
53 int ring;
54 uint32_t lbeg;
55 uint32_t lend;
56 TCGv_i32 litbase;
57 int is_jmp;
58 int singlestep_enabled;
60 bool sar_5bit;
61 bool sar_m32_5bit;
62 bool sar_m32_allocated;
63 TCGv_i32 sar_m32;
65 uint32_t ccount_delta;
66 unsigned used_window;
68 bool debug;
69 bool icount;
70 TCGv_i32 next_icount;
72 unsigned cpenable;
73 } DisasContext;
75 static TCGv_ptr cpu_env;
76 static TCGv_i32 cpu_pc;
77 static TCGv_i32 cpu_R[16];
78 static TCGv_i32 cpu_FR[16];
79 static TCGv_i32 cpu_SR[256];
80 static TCGv_i32 cpu_UR[256];
82 #include "exec/gen-icount.h"
84 typedef struct XtensaReg {
85 const char *name;
86 uint64_t opt_bits;
87 enum {
88 SR_R = 1,
89 SR_W = 2,
90 SR_X = 4,
91 SR_RW = 3,
92 SR_RWX = 7,
93 } access;
94 } XtensaReg;
96 #define XTENSA_REG_ACCESS(regname, opt, acc) { \
97 .name = (regname), \
98 .opt_bits = XTENSA_OPTION_BIT(opt), \
99 .access = (acc), \
102 #define XTENSA_REG(regname, opt) XTENSA_REG_ACCESS(regname, opt, SR_RWX)
104 #define XTENSA_REG_BITS_ACCESS(regname, opt, acc) { \
105 .name = (regname), \
106 .opt_bits = (opt), \
107 .access = (acc), \
110 #define XTENSA_REG_BITS(regname, opt) \
111 XTENSA_REG_BITS_ACCESS(regname, opt, SR_RWX)
113 static const XtensaReg sregnames[256] = {
114 [LBEG] = XTENSA_REG("LBEG", XTENSA_OPTION_LOOP),
115 [LEND] = XTENSA_REG("LEND", XTENSA_OPTION_LOOP),
116 [LCOUNT] = XTENSA_REG("LCOUNT", XTENSA_OPTION_LOOP),
117 [SAR] = XTENSA_REG_BITS("SAR", XTENSA_OPTION_ALL),
118 [BR] = XTENSA_REG("BR", XTENSA_OPTION_BOOLEAN),
119 [LITBASE] = XTENSA_REG("LITBASE", XTENSA_OPTION_EXTENDED_L32R),
120 [SCOMPARE1] = XTENSA_REG("SCOMPARE1", XTENSA_OPTION_CONDITIONAL_STORE),
121 [ACCLO] = XTENSA_REG("ACCLO", XTENSA_OPTION_MAC16),
122 [ACCHI] = XTENSA_REG("ACCHI", XTENSA_OPTION_MAC16),
123 [MR] = XTENSA_REG("MR0", XTENSA_OPTION_MAC16),
124 [MR + 1] = XTENSA_REG("MR1", XTENSA_OPTION_MAC16),
125 [MR + 2] = XTENSA_REG("MR2", XTENSA_OPTION_MAC16),
126 [MR + 3] = XTENSA_REG("MR3", XTENSA_OPTION_MAC16),
127 [WINDOW_BASE] = XTENSA_REG("WINDOW_BASE", XTENSA_OPTION_WINDOWED_REGISTER),
128 [WINDOW_START] = XTENSA_REG("WINDOW_START",
129 XTENSA_OPTION_WINDOWED_REGISTER),
130 [PTEVADDR] = XTENSA_REG("PTEVADDR", XTENSA_OPTION_MMU),
131 [RASID] = XTENSA_REG("RASID", XTENSA_OPTION_MMU),
132 [ITLBCFG] = XTENSA_REG("ITLBCFG", XTENSA_OPTION_MMU),
133 [DTLBCFG] = XTENSA_REG("DTLBCFG", XTENSA_OPTION_MMU),
134 [IBREAKENABLE] = XTENSA_REG("IBREAKENABLE", XTENSA_OPTION_DEBUG),
135 [CACHEATTR] = XTENSA_REG("CACHEATTR", XTENSA_OPTION_CACHEATTR),
136 [ATOMCTL] = XTENSA_REG("ATOMCTL", XTENSA_OPTION_ATOMCTL),
137 [IBREAKA] = XTENSA_REG("IBREAKA0", XTENSA_OPTION_DEBUG),
138 [IBREAKA + 1] = XTENSA_REG("IBREAKA1", XTENSA_OPTION_DEBUG),
139 [DBREAKA] = XTENSA_REG("DBREAKA0", XTENSA_OPTION_DEBUG),
140 [DBREAKA + 1] = XTENSA_REG("DBREAKA1", XTENSA_OPTION_DEBUG),
141 [DBREAKC] = XTENSA_REG("DBREAKC0", XTENSA_OPTION_DEBUG),
142 [DBREAKC + 1] = XTENSA_REG("DBREAKC1", XTENSA_OPTION_DEBUG),
143 [CONFIGID0] = XTENSA_REG_BITS_ACCESS("CONFIGID0", XTENSA_OPTION_ALL, SR_R),
144 [EPC1] = XTENSA_REG("EPC1", XTENSA_OPTION_EXCEPTION),
145 [EPC1 + 1] = XTENSA_REG("EPC2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
146 [EPC1 + 2] = XTENSA_REG("EPC3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
147 [EPC1 + 3] = XTENSA_REG("EPC4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
148 [EPC1 + 4] = XTENSA_REG("EPC5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
149 [EPC1 + 5] = XTENSA_REG("EPC6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
150 [EPC1 + 6] = XTENSA_REG("EPC7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
151 [DEPC] = XTENSA_REG("DEPC", XTENSA_OPTION_EXCEPTION),
152 [EPS2] = XTENSA_REG("EPS2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
153 [EPS2 + 1] = XTENSA_REG("EPS3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
154 [EPS2 + 2] = XTENSA_REG("EPS4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
155 [EPS2 + 3] = XTENSA_REG("EPS5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
156 [EPS2 + 4] = XTENSA_REG("EPS6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
157 [EPS2 + 5] = XTENSA_REG("EPS7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
158 [CONFIGID1] = XTENSA_REG_BITS_ACCESS("CONFIGID1", XTENSA_OPTION_ALL, SR_R),
159 [EXCSAVE1] = XTENSA_REG("EXCSAVE1", XTENSA_OPTION_EXCEPTION),
160 [EXCSAVE1 + 1] = XTENSA_REG("EXCSAVE2",
161 XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
162 [EXCSAVE1 + 2] = XTENSA_REG("EXCSAVE3",
163 XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
164 [EXCSAVE1 + 3] = XTENSA_REG("EXCSAVE4",
165 XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
166 [EXCSAVE1 + 4] = XTENSA_REG("EXCSAVE5",
167 XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
168 [EXCSAVE1 + 5] = XTENSA_REG("EXCSAVE6",
169 XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
170 [EXCSAVE1 + 6] = XTENSA_REG("EXCSAVE7",
171 XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
172 [CPENABLE] = XTENSA_REG("CPENABLE", XTENSA_OPTION_COPROCESSOR),
173 [INTSET] = XTENSA_REG_ACCESS("INTSET", XTENSA_OPTION_INTERRUPT, SR_RW),
174 [INTCLEAR] = XTENSA_REG_ACCESS("INTCLEAR", XTENSA_OPTION_INTERRUPT, SR_W),
175 [INTENABLE] = XTENSA_REG("INTENABLE", XTENSA_OPTION_INTERRUPT),
176 [PS] = XTENSA_REG_BITS("PS", XTENSA_OPTION_ALL),
177 [VECBASE] = XTENSA_REG("VECBASE", XTENSA_OPTION_RELOCATABLE_VECTOR),
178 [EXCCAUSE] = XTENSA_REG("EXCCAUSE", XTENSA_OPTION_EXCEPTION),
179 [DEBUGCAUSE] = XTENSA_REG_ACCESS("DEBUGCAUSE", XTENSA_OPTION_DEBUG, SR_R),
180 [CCOUNT] = XTENSA_REG("CCOUNT", XTENSA_OPTION_TIMER_INTERRUPT),
181 [PRID] = XTENSA_REG_ACCESS("PRID", XTENSA_OPTION_PROCESSOR_ID, SR_R),
182 [ICOUNT] = XTENSA_REG("ICOUNT", XTENSA_OPTION_DEBUG),
183 [ICOUNTLEVEL] = XTENSA_REG("ICOUNTLEVEL", XTENSA_OPTION_DEBUG),
184 [EXCVADDR] = XTENSA_REG("EXCVADDR", XTENSA_OPTION_EXCEPTION),
185 [CCOMPARE] = XTENSA_REG("CCOMPARE0", XTENSA_OPTION_TIMER_INTERRUPT),
186 [CCOMPARE + 1] = XTENSA_REG("CCOMPARE1",
187 XTENSA_OPTION_TIMER_INTERRUPT),
188 [CCOMPARE + 2] = XTENSA_REG("CCOMPARE2",
189 XTENSA_OPTION_TIMER_INTERRUPT),
190 [MISC] = XTENSA_REG("MISC0", XTENSA_OPTION_MISC_SR),
191 [MISC + 1] = XTENSA_REG("MISC1", XTENSA_OPTION_MISC_SR),
192 [MISC + 2] = XTENSA_REG("MISC2", XTENSA_OPTION_MISC_SR),
193 [MISC + 3] = XTENSA_REG("MISC3", XTENSA_OPTION_MISC_SR),
196 static const XtensaReg uregnames[256] = {
197 [THREADPTR] = XTENSA_REG("THREADPTR", XTENSA_OPTION_THREAD_POINTER),
198 [FCR] = XTENSA_REG("FCR", XTENSA_OPTION_FP_COPROCESSOR),
199 [FSR] = XTENSA_REG("FSR", XTENSA_OPTION_FP_COPROCESSOR),
202 void xtensa_translate_init(void)
204 static const char * const regnames[] = {
205 "ar0", "ar1", "ar2", "ar3",
206 "ar4", "ar5", "ar6", "ar7",
207 "ar8", "ar9", "ar10", "ar11",
208 "ar12", "ar13", "ar14", "ar15",
210 static const char * const fregnames[] = {
211 "f0", "f1", "f2", "f3",
212 "f4", "f5", "f6", "f7",
213 "f8", "f9", "f10", "f11",
214 "f12", "f13", "f14", "f15",
216 int i;
218 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
219 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
220 offsetof(CPUXtensaState, pc), "pc");
222 for (i = 0; i < 16; i++) {
223 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
224 offsetof(CPUXtensaState, regs[i]),
225 regnames[i]);
228 for (i = 0; i < 16; i++) {
229 cpu_FR[i] = tcg_global_mem_new_i32(TCG_AREG0,
230 offsetof(CPUXtensaState, fregs[i]),
231 fregnames[i]);
234 for (i = 0; i < 256; ++i) {
235 if (sregnames[i].name) {
236 cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0,
237 offsetof(CPUXtensaState, sregs[i]),
238 sregnames[i].name);
242 for (i = 0; i < 256; ++i) {
243 if (uregnames[i].name) {
244 cpu_UR[i] = tcg_global_mem_new_i32(TCG_AREG0,
245 offsetof(CPUXtensaState, uregs[i]),
246 uregnames[i].name);
251 static inline bool option_bits_enabled(DisasContext *dc, uint64_t opt)
253 return xtensa_option_bits_enabled(dc->config, opt);
256 static inline bool option_enabled(DisasContext *dc, int opt)
258 return xtensa_option_enabled(dc->config, opt);
261 static void init_litbase(DisasContext *dc)
263 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
264 dc->litbase = tcg_temp_local_new_i32();
265 tcg_gen_andi_i32(dc->litbase, cpu_SR[LITBASE], 0xfffff000);
269 static void reset_litbase(DisasContext *dc)
271 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
272 tcg_temp_free(dc->litbase);
276 static void init_sar_tracker(DisasContext *dc)
278 dc->sar_5bit = false;
279 dc->sar_m32_5bit = false;
280 dc->sar_m32_allocated = false;
283 static void reset_sar_tracker(DisasContext *dc)
285 if (dc->sar_m32_allocated) {
286 tcg_temp_free(dc->sar_m32);
290 static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa)
292 tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
293 if (dc->sar_m32_5bit) {
294 tcg_gen_discard_i32(dc->sar_m32);
296 dc->sar_5bit = true;
297 dc->sar_m32_5bit = false;
300 static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa)
302 TCGv_i32 tmp = tcg_const_i32(32);
303 if (!dc->sar_m32_allocated) {
304 dc->sar_m32 = tcg_temp_local_new_i32();
305 dc->sar_m32_allocated = true;
307 tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
308 tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32);
309 dc->sar_5bit = false;
310 dc->sar_m32_5bit = true;
311 tcg_temp_free(tmp);
314 static void gen_advance_ccount_cond(DisasContext *dc)
316 if (dc->ccount_delta > 0) {
317 TCGv_i32 tmp = tcg_const_i32(dc->ccount_delta);
318 gen_helper_advance_ccount(cpu_env, tmp);
319 tcg_temp_free(tmp);
323 static void gen_advance_ccount(DisasContext *dc)
325 gen_advance_ccount_cond(dc);
326 dc->ccount_delta = 0;
329 static void reset_used_window(DisasContext *dc)
331 dc->used_window = 0;
334 static void gen_exception(DisasContext *dc, int excp)
336 TCGv_i32 tmp = tcg_const_i32(excp);
337 gen_advance_ccount(dc);
338 gen_helper_exception(cpu_env, tmp);
339 tcg_temp_free(tmp);
342 static void gen_exception_cause(DisasContext *dc, uint32_t cause)
344 TCGv_i32 tpc = tcg_const_i32(dc->pc);
345 TCGv_i32 tcause = tcg_const_i32(cause);
346 gen_advance_ccount(dc);
347 gen_helper_exception_cause(cpu_env, tpc, tcause);
348 tcg_temp_free(tpc);
349 tcg_temp_free(tcause);
350 if (cause == ILLEGAL_INSTRUCTION_CAUSE ||
351 cause == SYSCALL_CAUSE) {
352 dc->is_jmp = DISAS_UPDATE;
356 static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause,
357 TCGv_i32 vaddr)
359 TCGv_i32 tpc = tcg_const_i32(dc->pc);
360 TCGv_i32 tcause = tcg_const_i32(cause);
361 gen_advance_ccount(dc);
362 gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr);
363 tcg_temp_free(tpc);
364 tcg_temp_free(tcause);
367 static void gen_debug_exception(DisasContext *dc, uint32_t cause)
369 TCGv_i32 tpc = tcg_const_i32(dc->pc);
370 TCGv_i32 tcause = tcg_const_i32(cause);
371 gen_advance_ccount(dc);
372 gen_helper_debug_exception(cpu_env, tpc, tcause);
373 tcg_temp_free(tpc);
374 tcg_temp_free(tcause);
375 if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) {
376 dc->is_jmp = DISAS_UPDATE;
380 static void gen_check_privilege(DisasContext *dc)
382 if (dc->cring) {
383 gen_exception_cause(dc, PRIVILEGED_CAUSE);
384 dc->is_jmp = DISAS_UPDATE;
388 static void gen_check_cpenable(DisasContext *dc, unsigned cp)
390 if (option_enabled(dc, XTENSA_OPTION_COPROCESSOR) &&
391 !(dc->cpenable & (1 << cp))) {
392 gen_exception_cause(dc, COPROCESSOR0_DISABLED + cp);
393 dc->is_jmp = DISAS_UPDATE;
397 static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
399 tcg_gen_mov_i32(cpu_pc, dest);
400 gen_advance_ccount(dc);
401 if (dc->icount) {
402 tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
404 if (dc->singlestep_enabled) {
405 gen_exception(dc, EXCP_DEBUG);
406 } else {
407 if (slot >= 0) {
408 tcg_gen_goto_tb(slot);
409 tcg_gen_exit_tb((uintptr_t)dc->tb + slot);
410 } else {
411 tcg_gen_exit_tb(0);
414 dc->is_jmp = DISAS_UPDATE;
417 static void gen_jump(DisasContext *dc, TCGv dest)
419 gen_jump_slot(dc, dest, -1);
422 static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
424 TCGv_i32 tmp = tcg_const_i32(dest);
425 if (((dc->tb->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
426 slot = -1;
428 gen_jump_slot(dc, tmp, slot);
429 tcg_temp_free(tmp);
432 static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest,
433 int slot)
435 TCGv_i32 tcallinc = tcg_const_i32(callinc);
437 tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS],
438 tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN);
439 tcg_temp_free(tcallinc);
440 tcg_gen_movi_i32(cpu_R[callinc << 2],
441 (callinc << 30) | (dc->next_pc & 0x3fffffff));
442 gen_jump_slot(dc, dest, slot);
445 static void gen_callw(DisasContext *dc, int callinc, TCGv_i32 dest)
447 gen_callw_slot(dc, callinc, dest, -1);
450 static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot)
452 TCGv_i32 tmp = tcg_const_i32(dest);
453 if (((dc->tb->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
454 slot = -1;
456 gen_callw_slot(dc, callinc, tmp, slot);
457 tcg_temp_free(tmp);
460 static bool gen_check_loop_end(DisasContext *dc, int slot)
462 if (option_enabled(dc, XTENSA_OPTION_LOOP) &&
463 !(dc->tb->flags & XTENSA_TBFLAG_EXCM) &&
464 dc->next_pc == dc->lend) {
465 int label = gen_new_label();
467 gen_advance_ccount(dc);
468 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label);
469 tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1);
470 gen_jumpi(dc, dc->lbeg, slot);
471 gen_set_label(label);
472 gen_jumpi(dc, dc->next_pc, -1);
473 return true;
475 return false;
478 static void gen_jumpi_check_loop_end(DisasContext *dc, int slot)
480 if (!gen_check_loop_end(dc, slot)) {
481 gen_jumpi(dc, dc->next_pc, slot);
485 static void gen_brcond(DisasContext *dc, TCGCond cond,
486 TCGv_i32 t0, TCGv_i32 t1, uint32_t offset)
488 int label = gen_new_label();
490 gen_advance_ccount(dc);
491 tcg_gen_brcond_i32(cond, t0, t1, label);
492 gen_jumpi_check_loop_end(dc, 0);
493 gen_set_label(label);
494 gen_jumpi(dc, dc->pc + offset, 1);
497 static void gen_brcondi(DisasContext *dc, TCGCond cond,
498 TCGv_i32 t0, uint32_t t1, uint32_t offset)
500 TCGv_i32 tmp = tcg_const_i32(t1);
501 gen_brcond(dc, cond, t0, tmp, offset);
502 tcg_temp_free(tmp);
505 static bool gen_check_sr(DisasContext *dc, uint32_t sr, unsigned access)
507 if (!xtensa_option_bits_enabled(dc->config, sregnames[sr].opt_bits)) {
508 if (sregnames[sr].name) {
509 qemu_log("SR %s is not configured\n", sregnames[sr].name);
510 } else {
511 qemu_log("SR %d is not implemented\n", sr);
513 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
514 return false;
515 } else if (!(sregnames[sr].access & access)) {
516 static const char * const access_text[] = {
517 [SR_R] = "rsr",
518 [SR_W] = "wsr",
519 [SR_X] = "xsr",
521 assert(access < ARRAY_SIZE(access_text) && access_text[access]);
522 qemu_log("SR %s is not available for %s\n", sregnames[sr].name,
523 access_text[access]);
524 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
525 return false;
527 return true;
530 static void gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr)
532 gen_advance_ccount(dc);
533 tcg_gen_mov_i32(d, cpu_SR[sr]);
536 static void gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
538 tcg_gen_shri_i32(d, cpu_SR[EXCVADDR], 10);
539 tcg_gen_or_i32(d, d, cpu_SR[sr]);
540 tcg_gen_andi_i32(d, d, 0xfffffffc);
543 static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
545 static void (* const rsr_handler[256])(DisasContext *dc,
546 TCGv_i32 d, uint32_t sr) = {
547 [CCOUNT] = gen_rsr_ccount,
548 [PTEVADDR] = gen_rsr_ptevaddr,
551 if (rsr_handler[sr]) {
552 rsr_handler[sr](dc, d, sr);
553 } else {
554 tcg_gen_mov_i32(d, cpu_SR[sr]);
558 static void gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s)
560 gen_helper_wsr_lbeg(cpu_env, s);
561 gen_jumpi_check_loop_end(dc, 0);
564 static void gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s)
566 gen_helper_wsr_lend(cpu_env, s);
567 gen_jumpi_check_loop_end(dc, 0);
570 static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s)
572 tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f);
573 if (dc->sar_m32_5bit) {
574 tcg_gen_discard_i32(dc->sar_m32);
576 dc->sar_5bit = false;
577 dc->sar_m32_5bit = false;
580 static void gen_wsr_br(DisasContext *dc, uint32_t sr, TCGv_i32 s)
582 tcg_gen_andi_i32(cpu_SR[sr], s, 0xffff);
585 static void gen_wsr_litbase(DisasContext *dc, uint32_t sr, TCGv_i32 s)
587 tcg_gen_andi_i32(cpu_SR[sr], s, 0xfffff001);
588 /* This can change tb->flags, so exit tb */
589 gen_jumpi_check_loop_end(dc, -1);
592 static void gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s)
594 tcg_gen_ext8s_i32(cpu_SR[sr], s);
597 static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v)
599 gen_helper_wsr_windowbase(cpu_env, v);
600 reset_used_window(dc);
603 static void gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v)
605 tcg_gen_andi_i32(cpu_SR[sr], v, (1 << dc->config->nareg / 4) - 1);
606 reset_used_window(dc);
609 static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v)
611 tcg_gen_andi_i32(cpu_SR[sr], v, 0xffc00000);
614 static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
616 gen_helper_wsr_rasid(cpu_env, v);
617 /* This can change tb->flags, so exit tb */
618 gen_jumpi_check_loop_end(dc, -1);
621 static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v)
623 tcg_gen_andi_i32(cpu_SR[sr], v, 0x01130000);
626 static void gen_wsr_ibreakenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
628 gen_helper_wsr_ibreakenable(cpu_env, v);
629 gen_jumpi_check_loop_end(dc, 0);
632 static void gen_wsr_atomctl(DisasContext *dc, uint32_t sr, TCGv_i32 v)
634 tcg_gen_andi_i32(cpu_SR[sr], v, 0x3f);
637 static void gen_wsr_ibreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v)
639 unsigned id = sr - IBREAKA;
641 if (id < dc->config->nibreak) {
642 TCGv_i32 tmp = tcg_const_i32(id);
643 gen_helper_wsr_ibreaka(cpu_env, tmp, v);
644 tcg_temp_free(tmp);
645 gen_jumpi_check_loop_end(dc, 0);
649 static void gen_wsr_dbreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v)
651 unsigned id = sr - DBREAKA;
653 if (id < dc->config->ndbreak) {
654 TCGv_i32 tmp = tcg_const_i32(id);
655 gen_helper_wsr_dbreaka(cpu_env, tmp, v);
656 tcg_temp_free(tmp);
660 static void gen_wsr_dbreakc(DisasContext *dc, uint32_t sr, TCGv_i32 v)
662 unsigned id = sr - DBREAKC;
664 if (id < dc->config->ndbreak) {
665 TCGv_i32 tmp = tcg_const_i32(id);
666 gen_helper_wsr_dbreakc(cpu_env, tmp, v);
667 tcg_temp_free(tmp);
671 static void gen_wsr_cpenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
673 tcg_gen_andi_i32(cpu_SR[sr], v, 0xff);
674 /* This can change tb->flags, so exit tb */
675 gen_jumpi_check_loop_end(dc, -1);
678 static void gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v)
680 tcg_gen_andi_i32(cpu_SR[sr], v,
681 dc->config->inttype_mask[INTTYPE_SOFTWARE]);
682 gen_helper_check_interrupts(cpu_env);
683 gen_jumpi_check_loop_end(dc, 0);
686 static void gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v)
688 TCGv_i32 tmp = tcg_temp_new_i32();
690 tcg_gen_andi_i32(tmp, v,
691 dc->config->inttype_mask[INTTYPE_EDGE] |
692 dc->config->inttype_mask[INTTYPE_NMI] |
693 dc->config->inttype_mask[INTTYPE_SOFTWARE]);
694 tcg_gen_andc_i32(cpu_SR[INTSET], cpu_SR[INTSET], tmp);
695 tcg_temp_free(tmp);
696 gen_helper_check_interrupts(cpu_env);
699 static void gen_wsr_intenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
701 tcg_gen_mov_i32(cpu_SR[sr], v);
702 gen_helper_check_interrupts(cpu_env);
703 gen_jumpi_check_loop_end(dc, 0);
706 static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v)
708 uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB |
709 PS_UM | PS_EXCM | PS_INTLEVEL;
711 if (option_enabled(dc, XTENSA_OPTION_MMU)) {
712 mask |= PS_RING;
714 tcg_gen_andi_i32(cpu_SR[sr], v, mask);
715 reset_used_window(dc);
716 gen_helper_check_interrupts(cpu_env);
717 /* This can change mmu index and tb->flags, so exit tb */
718 gen_jumpi_check_loop_end(dc, -1);
721 static void gen_wsr_icount(DisasContext *dc, uint32_t sr, TCGv_i32 v)
723 if (dc->icount) {
724 tcg_gen_mov_i32(dc->next_icount, v);
725 } else {
726 tcg_gen_mov_i32(cpu_SR[sr], v);
730 static void gen_wsr_icountlevel(DisasContext *dc, uint32_t sr, TCGv_i32 v)
732 tcg_gen_andi_i32(cpu_SR[sr], v, 0xf);
733 /* This can change tb->flags, so exit tb */
734 gen_jumpi_check_loop_end(dc, -1);
737 static void gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v)
739 uint32_t id = sr - CCOMPARE;
740 if (id < dc->config->nccompare) {
741 uint32_t int_bit = 1 << dc->config->timerint[id];
742 gen_advance_ccount(dc);
743 tcg_gen_mov_i32(cpu_SR[sr], v);
744 tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit);
745 gen_helper_check_interrupts(cpu_env);
749 static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s)
751 static void (* const wsr_handler[256])(DisasContext *dc,
752 uint32_t sr, TCGv_i32 v) = {
753 [LBEG] = gen_wsr_lbeg,
754 [LEND] = gen_wsr_lend,
755 [SAR] = gen_wsr_sar,
756 [BR] = gen_wsr_br,
757 [LITBASE] = gen_wsr_litbase,
758 [ACCHI] = gen_wsr_acchi,
759 [WINDOW_BASE] = gen_wsr_windowbase,
760 [WINDOW_START] = gen_wsr_windowstart,
761 [PTEVADDR] = gen_wsr_ptevaddr,
762 [RASID] = gen_wsr_rasid,
763 [ITLBCFG] = gen_wsr_tlbcfg,
764 [DTLBCFG] = gen_wsr_tlbcfg,
765 [IBREAKENABLE] = gen_wsr_ibreakenable,
766 [ATOMCTL] = gen_wsr_atomctl,
767 [IBREAKA] = gen_wsr_ibreaka,
768 [IBREAKA + 1] = gen_wsr_ibreaka,
769 [DBREAKA] = gen_wsr_dbreaka,
770 [DBREAKA + 1] = gen_wsr_dbreaka,
771 [DBREAKC] = gen_wsr_dbreakc,
772 [DBREAKC + 1] = gen_wsr_dbreakc,
773 [CPENABLE] = gen_wsr_cpenable,
774 [INTSET] = gen_wsr_intset,
775 [INTCLEAR] = gen_wsr_intclear,
776 [INTENABLE] = gen_wsr_intenable,
777 [PS] = gen_wsr_ps,
778 [ICOUNT] = gen_wsr_icount,
779 [ICOUNTLEVEL] = gen_wsr_icountlevel,
780 [CCOMPARE] = gen_wsr_ccompare,
781 [CCOMPARE + 1] = gen_wsr_ccompare,
782 [CCOMPARE + 2] = gen_wsr_ccompare,
785 if (wsr_handler[sr]) {
786 wsr_handler[sr](dc, sr, s);
787 } else {
788 tcg_gen_mov_i32(cpu_SR[sr], s);
792 static void gen_wur(uint32_t ur, TCGv_i32 s)
794 switch (ur) {
795 case FCR:
796 gen_helper_wur_fcr(cpu_env, s);
797 break;
799 case FSR:
800 tcg_gen_andi_i32(cpu_UR[ur], s, 0xffffff80);
801 break;
803 default:
804 tcg_gen_mov_i32(cpu_UR[ur], s);
805 break;
809 static void gen_load_store_alignment(DisasContext *dc, int shift,
810 TCGv_i32 addr, bool no_hw_alignment)
812 if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
813 tcg_gen_andi_i32(addr, addr, ~0 << shift);
814 } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) &&
815 no_hw_alignment) {
816 int label = gen_new_label();
817 TCGv_i32 tmp = tcg_temp_new_i32();
818 tcg_gen_andi_i32(tmp, addr, ~(~0 << shift));
819 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
820 gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
821 gen_set_label(label);
822 tcg_temp_free(tmp);
826 static void gen_waiti(DisasContext *dc, uint32_t imm4)
828 TCGv_i32 pc = tcg_const_i32(dc->next_pc);
829 TCGv_i32 intlevel = tcg_const_i32(imm4);
830 gen_advance_ccount(dc);
831 gen_helper_waiti(cpu_env, pc, intlevel);
832 tcg_temp_free(pc);
833 tcg_temp_free(intlevel);
836 static void gen_window_check1(DisasContext *dc, unsigned r1)
838 if (dc->tb->flags & XTENSA_TBFLAG_EXCM) {
839 return;
841 if (option_enabled(dc, XTENSA_OPTION_WINDOWED_REGISTER) &&
842 r1 / 4 > dc->used_window) {
843 int label = gen_new_label();
844 TCGv_i32 ws = tcg_temp_new_i32();
846 dc->used_window = r1 / 4;
847 tcg_gen_deposit_i32(ws, cpu_SR[WINDOW_START], cpu_SR[WINDOW_START],
848 dc->config->nareg / 4, dc->config->nareg / 4);
849 tcg_gen_shr_i32(ws, ws, cpu_SR[WINDOW_BASE]);
850 tcg_gen_andi_i32(ws, ws, (2 << (r1 / 4)) - 2);
851 tcg_gen_brcondi_i32(TCG_COND_EQ, ws, 0, label);
853 TCGv_i32 pc = tcg_const_i32(dc->pc);
854 TCGv_i32 w = tcg_const_i32(r1 / 4);
856 gen_advance_ccount_cond(dc);
857 gen_helper_window_check(cpu_env, pc, w);
859 tcg_temp_free(w);
860 tcg_temp_free(pc);
862 gen_set_label(label);
863 tcg_temp_free(ws);
867 static void gen_window_check2(DisasContext *dc, unsigned r1, unsigned r2)
869 gen_window_check1(dc, r1 > r2 ? r1 : r2);
872 static void gen_window_check3(DisasContext *dc, unsigned r1, unsigned r2,
873 unsigned r3)
875 gen_window_check2(dc, r1, r2 > r3 ? r2 : r3);
878 static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned)
880 TCGv_i32 m = tcg_temp_new_i32();
882 if (hi) {
883 (is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16);
884 } else {
885 (is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v);
887 return m;
890 static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
892 #define HAS_OPTION_BITS(opt) do { \
893 if (!option_bits_enabled(dc, opt)) { \
894 qemu_log("Option is not enabled %s:%d\n", \
895 __FILE__, __LINE__); \
896 goto invalid_opcode; \
898 } while (0)
900 #define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt))
902 #define TBD() qemu_log("TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__)
903 #define RESERVED() do { \
904 qemu_log("RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \
905 dc->pc, b0, b1, b2, __FILE__, __LINE__); \
906 goto invalid_opcode; \
907 } while (0)
910 #ifdef TARGET_WORDS_BIGENDIAN
911 #define OP0 (((b0) & 0xf0) >> 4)
912 #define OP1 (((b2) & 0xf0) >> 4)
913 #define OP2 ((b2) & 0xf)
914 #define RRR_R ((b1) & 0xf)
915 #define RRR_S (((b1) & 0xf0) >> 4)
916 #define RRR_T ((b0) & 0xf)
917 #else
918 #define OP0 (((b0) & 0xf))
919 #define OP1 (((b2) & 0xf))
920 #define OP2 (((b2) & 0xf0) >> 4)
921 #define RRR_R (((b1) & 0xf0) >> 4)
922 #define RRR_S (((b1) & 0xf))
923 #define RRR_T (((b0) & 0xf0) >> 4)
924 #endif
925 #define RRR_X ((RRR_R & 0x4) >> 2)
926 #define RRR_Y ((RRR_T & 0x4) >> 2)
927 #define RRR_W (RRR_R & 0x3)
929 #define RRRN_R RRR_R
930 #define RRRN_S RRR_S
931 #define RRRN_T RRR_T
933 #define RRI4_R RRR_R
934 #define RRI4_S RRR_S
935 #define RRI4_T RRR_T
936 #ifdef TARGET_WORDS_BIGENDIAN
937 #define RRI4_IMM4 ((b2) & 0xf)
938 #else
939 #define RRI4_IMM4 (((b2) & 0xf0) >> 4)
940 #endif
942 #define RRI8_R RRR_R
943 #define RRI8_S RRR_S
944 #define RRI8_T RRR_T
945 #define RRI8_IMM8 (b2)
946 #define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8)
948 #ifdef TARGET_WORDS_BIGENDIAN
949 #define RI16_IMM16 (((b1) << 8) | (b2))
950 #else
951 #define RI16_IMM16 (((b2) << 8) | (b1))
952 #endif
954 #ifdef TARGET_WORDS_BIGENDIAN
955 #define CALL_N (((b0) & 0xc) >> 2)
956 #define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2))
957 #else
958 #define CALL_N (((b0) & 0x30) >> 4)
959 #define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10))
960 #endif
961 #define CALL_OFFSET_SE \
962 (((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET)
964 #define CALLX_N CALL_N
965 #ifdef TARGET_WORDS_BIGENDIAN
966 #define CALLX_M ((b0) & 0x3)
967 #else
968 #define CALLX_M (((b0) & 0xc0) >> 6)
969 #endif
970 #define CALLX_S RRR_S
972 #define BRI12_M CALLX_M
973 #define BRI12_S RRR_S
974 #ifdef TARGET_WORDS_BIGENDIAN
975 #define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2))
976 #else
977 #define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4))
978 #endif
979 #define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12)
981 #define BRI8_M BRI12_M
982 #define BRI8_R RRI8_R
983 #define BRI8_S RRI8_S
984 #define BRI8_IMM8 RRI8_IMM8
985 #define BRI8_IMM8_SE RRI8_IMM8_SE
987 #define RSR_SR (b1)
989 uint8_t b0 = cpu_ldub_code(env, dc->pc);
990 uint8_t b1 = cpu_ldub_code(env, dc->pc + 1);
991 uint8_t b2 = 0;
993 static const uint32_t B4CONST[] = {
994 0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
997 static const uint32_t B4CONSTU[] = {
998 32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
1001 if (OP0 >= 8) {
1002 dc->next_pc = dc->pc + 2;
1003 HAS_OPTION(XTENSA_OPTION_CODE_DENSITY);
1004 } else {
1005 dc->next_pc = dc->pc + 3;
1006 b2 = cpu_ldub_code(env, dc->pc + 2);
1009 switch (OP0) {
1010 case 0: /*QRST*/
1011 switch (OP1) {
1012 case 0: /*RST0*/
1013 switch (OP2) {
1014 case 0: /*ST0*/
1015 if ((RRR_R & 0xc) == 0x8) {
1016 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1019 switch (RRR_R) {
1020 case 0: /*SNM0*/
1021 switch (CALLX_M) {
1022 case 0: /*ILL*/
1023 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
1024 break;
1026 case 1: /*reserved*/
1027 RESERVED();
1028 break;
1030 case 2: /*JR*/
1031 switch (CALLX_N) {
1032 case 0: /*RET*/
1033 case 2: /*JX*/
1034 gen_window_check1(dc, CALLX_S);
1035 gen_jump(dc, cpu_R[CALLX_S]);
1036 break;
1038 case 1: /*RETWw*/
1039 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1041 TCGv_i32 tmp = tcg_const_i32(dc->pc);
1042 gen_advance_ccount(dc);
1043 gen_helper_retw(tmp, cpu_env, tmp);
1044 gen_jump(dc, tmp);
1045 tcg_temp_free(tmp);
1047 break;
1049 case 3: /*reserved*/
1050 RESERVED();
1051 break;
1053 break;
1055 case 3: /*CALLX*/
1056 gen_window_check2(dc, CALLX_S, CALLX_N << 2);
1057 switch (CALLX_N) {
1058 case 0: /*CALLX0*/
1060 TCGv_i32 tmp = tcg_temp_new_i32();
1061 tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
1062 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
1063 gen_jump(dc, tmp);
1064 tcg_temp_free(tmp);
1066 break;
1068 case 1: /*CALLX4w*/
1069 case 2: /*CALLX8w*/
1070 case 3: /*CALLX12w*/
1071 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1073 TCGv_i32 tmp = tcg_temp_new_i32();
1075 tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
1076 gen_callw(dc, CALLX_N, tmp);
1077 tcg_temp_free(tmp);
1079 break;
1081 break;
1083 break;
1085 case 1: /*MOVSPw*/
1086 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1087 gen_window_check2(dc, RRR_T, RRR_S);
1089 TCGv_i32 pc = tcg_const_i32(dc->pc);
1090 gen_advance_ccount(dc);
1091 gen_helper_movsp(cpu_env, pc);
1092 tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]);
1093 tcg_temp_free(pc);
1095 break;
1097 case 2: /*SYNC*/
1098 switch (RRR_T) {
1099 case 0: /*ISYNC*/
1100 break;
1102 case 1: /*RSYNC*/
1103 break;
1105 case 2: /*ESYNC*/
1106 break;
1108 case 3: /*DSYNC*/
1109 break;
1111 case 8: /*EXCW*/
1112 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
1113 break;
1115 case 12: /*MEMW*/
1116 break;
1118 case 13: /*EXTW*/
1119 break;
1121 case 15: /*NOP*/
1122 break;
1124 default: /*reserved*/
1125 RESERVED();
1126 break;
1128 break;
1130 case 3: /*RFEIx*/
1131 switch (RRR_T) {
1132 case 0: /*RFETx*/
1133 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
1134 switch (RRR_S) {
1135 case 0: /*RFEx*/
1136 gen_check_privilege(dc);
1137 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
1138 gen_helper_check_interrupts(cpu_env);
1139 gen_jump(dc, cpu_SR[EPC1]);
1140 break;
1142 case 1: /*RFUEx*/
1143 RESERVED();
1144 break;
1146 case 2: /*RFDEx*/
1147 gen_check_privilege(dc);
1148 gen_jump(dc, cpu_SR[
1149 dc->config->ndepc ? DEPC : EPC1]);
1150 break;
1152 case 4: /*RFWOw*/
1153 case 5: /*RFWUw*/
1154 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1155 gen_check_privilege(dc);
1157 TCGv_i32 tmp = tcg_const_i32(1);
1159 tcg_gen_andi_i32(
1160 cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
1161 tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]);
1163 if (RRR_S == 4) {
1164 tcg_gen_andc_i32(cpu_SR[WINDOW_START],
1165 cpu_SR[WINDOW_START], tmp);
1166 } else {
1167 tcg_gen_or_i32(cpu_SR[WINDOW_START],
1168 cpu_SR[WINDOW_START], tmp);
1171 gen_helper_restore_owb(cpu_env);
1172 gen_helper_check_interrupts(cpu_env);
1173 gen_jump(dc, cpu_SR[EPC1]);
1175 tcg_temp_free(tmp);
1177 break;
1179 default: /*reserved*/
1180 RESERVED();
1181 break;
1183 break;
1185 case 1: /*RFIx*/
1186 HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT);
1187 if (RRR_S >= 2 && RRR_S <= dc->config->nlevel) {
1188 gen_check_privilege(dc);
1189 tcg_gen_mov_i32(cpu_SR[PS],
1190 cpu_SR[EPS2 + RRR_S - 2]);
1191 gen_helper_check_interrupts(cpu_env);
1192 gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]);
1193 } else {
1194 qemu_log("RFI %d is illegal\n", RRR_S);
1195 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
1197 break;
1199 case 2: /*RFME*/
1200 TBD();
1201 break;
1203 default: /*reserved*/
1204 RESERVED();
1205 break;
1208 break;
1210 case 4: /*BREAKx*/
1211 HAS_OPTION(XTENSA_OPTION_DEBUG);
1212 if (dc->debug) {
1213 gen_debug_exception(dc, DEBUGCAUSE_BI);
1215 break;
1217 case 5: /*SYSCALLx*/
1218 HAS_OPTION(XTENSA_OPTION_EXCEPTION);
1219 switch (RRR_S) {
1220 case 0: /*SYSCALLx*/
1221 gen_exception_cause(dc, SYSCALL_CAUSE);
1222 break;
1224 case 1: /*SIMCALL*/
1225 if (semihosting_enabled) {
1226 gen_check_privilege(dc);
1227 gen_helper_simcall(cpu_env);
1228 } else {
1229 qemu_log("SIMCALL but semihosting is disabled\n");
1230 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
1232 break;
1234 default:
1235 RESERVED();
1236 break;
1238 break;
1240 case 6: /*RSILx*/
1241 HAS_OPTION(XTENSA_OPTION_INTERRUPT);
1242 gen_check_privilege(dc);
1243 gen_window_check1(dc, RRR_T);
1244 tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]);
1245 tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL);
1246 tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S);
1247 gen_helper_check_interrupts(cpu_env);
1248 gen_jumpi_check_loop_end(dc, 0);
1249 break;
1251 case 7: /*WAITIx*/
1252 HAS_OPTION(XTENSA_OPTION_INTERRUPT);
1253 gen_check_privilege(dc);
1254 gen_waiti(dc, RRR_S);
1255 break;
1257 case 8: /*ANY4p*/
1258 case 9: /*ALL4p*/
1259 case 10: /*ANY8p*/
1260 case 11: /*ALL8p*/
1261 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1263 const unsigned shift = (RRR_R & 2) ? 8 : 4;
1264 TCGv_i32 mask = tcg_const_i32(
1265 ((1 << shift) - 1) << RRR_S);
1266 TCGv_i32 tmp = tcg_temp_new_i32();
1268 tcg_gen_and_i32(tmp, cpu_SR[BR], mask);
1269 if (RRR_R & 1) { /*ALL*/
1270 tcg_gen_addi_i32(tmp, tmp, 1 << RRR_S);
1271 } else { /*ANY*/
1272 tcg_gen_add_i32(tmp, tmp, mask);
1274 tcg_gen_shri_i32(tmp, tmp, RRR_S + shift);
1275 tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR],
1276 tmp, RRR_T, 1);
1277 tcg_temp_free(mask);
1278 tcg_temp_free(tmp);
1280 break;
1282 default: /*reserved*/
1283 RESERVED();
1284 break;
1287 break;
1289 case 1: /*AND*/
1290 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1291 tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1292 break;
1294 case 2: /*OR*/
1295 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1296 tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1297 break;
1299 case 3: /*XOR*/
1300 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1301 tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1302 break;
1304 case 4: /*ST1*/
1305 switch (RRR_R) {
1306 case 0: /*SSR*/
1307 gen_window_check1(dc, RRR_S);
1308 gen_right_shift_sar(dc, cpu_R[RRR_S]);
1309 break;
1311 case 1: /*SSL*/
1312 gen_window_check1(dc, RRR_S);
1313 gen_left_shift_sar(dc, cpu_R[RRR_S]);
1314 break;
1316 case 2: /*SSA8L*/
1317 gen_window_check1(dc, RRR_S);
1319 TCGv_i32 tmp = tcg_temp_new_i32();
1320 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
1321 gen_right_shift_sar(dc, tmp);
1322 tcg_temp_free(tmp);
1324 break;
1326 case 3: /*SSA8B*/
1327 gen_window_check1(dc, RRR_S);
1329 TCGv_i32 tmp = tcg_temp_new_i32();
1330 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
1331 gen_left_shift_sar(dc, tmp);
1332 tcg_temp_free(tmp);
1334 break;
1336 case 4: /*SSAI*/
1338 TCGv_i32 tmp = tcg_const_i32(
1339 RRR_S | ((RRR_T & 1) << 4));
1340 gen_right_shift_sar(dc, tmp);
1341 tcg_temp_free(tmp);
1343 break;
1345 case 6: /*RER*/
1346 TBD();
1347 break;
1349 case 7: /*WER*/
1350 TBD();
1351 break;
1353 case 8: /*ROTWw*/
1354 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1355 gen_check_privilege(dc);
1357 TCGv_i32 tmp = tcg_const_i32(
1358 RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0));
1359 gen_helper_rotw(cpu_env, tmp);
1360 tcg_temp_free(tmp);
1361 reset_used_window(dc);
1363 break;
1365 case 14: /*NSAu*/
1366 HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
1367 gen_window_check2(dc, RRR_S, RRR_T);
1368 gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]);
1369 break;
1371 case 15: /*NSAUu*/
1372 HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
1373 gen_window_check2(dc, RRR_S, RRR_T);
1374 gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]);
1375 break;
1377 default: /*reserved*/
1378 RESERVED();
1379 break;
1381 break;
1383 case 5: /*TLB*/
1384 HAS_OPTION_BITS(
1385 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) |
1386 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
1387 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION));
1388 gen_check_privilege(dc);
1389 gen_window_check2(dc, RRR_S, RRR_T);
1391 TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0);
1393 switch (RRR_R & 7) {
1394 case 3: /*RITLB0*/ /*RDTLB0*/
1395 gen_helper_rtlb0(cpu_R[RRR_T],
1396 cpu_env, cpu_R[RRR_S], dtlb);
1397 break;
1399 case 4: /*IITLB*/ /*IDTLB*/
1400 gen_helper_itlb(cpu_env, cpu_R[RRR_S], dtlb);
1401 /* This could change memory mapping, so exit tb */
1402 gen_jumpi_check_loop_end(dc, -1);
1403 break;
1405 case 5: /*PITLB*/ /*PDTLB*/
1406 tcg_gen_movi_i32(cpu_pc, dc->pc);
1407 gen_helper_ptlb(cpu_R[RRR_T],
1408 cpu_env, cpu_R[RRR_S], dtlb);
1409 break;
1411 case 6: /*WITLB*/ /*WDTLB*/
1412 gen_helper_wtlb(
1413 cpu_env, cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
1414 /* This could change memory mapping, so exit tb */
1415 gen_jumpi_check_loop_end(dc, -1);
1416 break;
1418 case 7: /*RITLB1*/ /*RDTLB1*/
1419 gen_helper_rtlb1(cpu_R[RRR_T],
1420 cpu_env, cpu_R[RRR_S], dtlb);
1421 break;
1423 default:
1424 tcg_temp_free(dtlb);
1425 RESERVED();
1426 break;
1428 tcg_temp_free(dtlb);
1430 break;
1432 case 6: /*RT0*/
1433 gen_window_check2(dc, RRR_R, RRR_T);
1434 switch (RRR_S) {
1435 case 0: /*NEG*/
1436 tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
1437 break;
1439 case 1: /*ABS*/
1441 TCGv_i32 zero = tcg_const_i32(0);
1442 TCGv_i32 neg = tcg_temp_new_i32();
1444 tcg_gen_neg_i32(neg, cpu_R[RRR_T]);
1445 tcg_gen_movcond_i32(TCG_COND_GE, cpu_R[RRR_R],
1446 cpu_R[RRR_T], zero, cpu_R[RRR_T], neg);
1447 tcg_temp_free(neg);
1448 tcg_temp_free(zero);
1450 break;
1452 default: /*reserved*/
1453 RESERVED();
1454 break;
1456 break;
1458 case 7: /*reserved*/
1459 RESERVED();
1460 break;
1462 case 8: /*ADD*/
1463 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1464 tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1465 break;
1467 case 9: /*ADD**/
1468 case 10:
1469 case 11:
1470 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1472 TCGv_i32 tmp = tcg_temp_new_i32();
1473 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8);
1474 tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
1475 tcg_temp_free(tmp);
1477 break;
1479 case 12: /*SUB*/
1480 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1481 tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1482 break;
1484 case 13: /*SUB**/
1485 case 14:
1486 case 15:
1487 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1489 TCGv_i32 tmp = tcg_temp_new_i32();
1490 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12);
1491 tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
1492 tcg_temp_free(tmp);
1494 break;
1496 break;
1498 case 1: /*RST1*/
1499 switch (OP2) {
1500 case 0: /*SLLI*/
1501 case 1:
1502 gen_window_check2(dc, RRR_R, RRR_S);
1503 tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S],
1504 32 - (RRR_T | ((OP2 & 1) << 4)));
1505 break;
1507 case 2: /*SRAI*/
1508 case 3:
1509 gen_window_check2(dc, RRR_R, RRR_T);
1510 tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T],
1511 RRR_S | ((OP2 & 1) << 4));
1512 break;
1514 case 4: /*SRLI*/
1515 gen_window_check2(dc, RRR_R, RRR_T);
1516 tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S);
1517 break;
1519 case 6: /*XSR*/
1520 if (gen_check_sr(dc, RSR_SR, SR_X)) {
1521 TCGv_i32 tmp = tcg_temp_new_i32();
1523 if (RSR_SR >= 64) {
1524 gen_check_privilege(dc);
1526 gen_window_check1(dc, RRR_T);
1527 tcg_gen_mov_i32(tmp, cpu_R[RRR_T]);
1528 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
1529 gen_wsr(dc, RSR_SR, tmp);
1530 tcg_temp_free(tmp);
1532 break;
1535 * Note: 64 bit ops are used here solely because SAR values
1536 * have range 0..63
1538 #define gen_shift_reg(cmd, reg) do { \
1539 TCGv_i64 tmp = tcg_temp_new_i64(); \
1540 tcg_gen_extu_i32_i64(tmp, reg); \
1541 tcg_gen_##cmd##_i64(v, v, tmp); \
1542 tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \
1543 tcg_temp_free_i64(v); \
1544 tcg_temp_free_i64(tmp); \
1545 } while (0)
1547 #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
1549 case 8: /*SRC*/
1550 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1552 TCGv_i64 v = tcg_temp_new_i64();
1553 tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]);
1554 gen_shift(shr);
1556 break;
1558 case 9: /*SRL*/
1559 gen_window_check2(dc, RRR_R, RRR_T);
1560 if (dc->sar_5bit) {
1561 tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
1562 } else {
1563 TCGv_i64 v = tcg_temp_new_i64();
1564 tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]);
1565 gen_shift(shr);
1567 break;
1569 case 10: /*SLL*/
1570 gen_window_check2(dc, RRR_R, RRR_S);
1571 if (dc->sar_m32_5bit) {
1572 tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32);
1573 } else {
1574 TCGv_i64 v = tcg_temp_new_i64();
1575 TCGv_i32 s = tcg_const_i32(32);
1576 tcg_gen_sub_i32(s, s, cpu_SR[SAR]);
1577 tcg_gen_andi_i32(s, s, 0x3f);
1578 tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]);
1579 gen_shift_reg(shl, s);
1580 tcg_temp_free(s);
1582 break;
1584 case 11: /*SRA*/
1585 gen_window_check2(dc, RRR_R, RRR_T);
1586 if (dc->sar_5bit) {
1587 tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
1588 } else {
1589 TCGv_i64 v = tcg_temp_new_i64();
1590 tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]);
1591 gen_shift(sar);
1593 break;
1594 #undef gen_shift
1595 #undef gen_shift_reg
1597 case 12: /*MUL16U*/
1598 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
1599 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1601 TCGv_i32 v1 = tcg_temp_new_i32();
1602 TCGv_i32 v2 = tcg_temp_new_i32();
1603 tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]);
1604 tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]);
1605 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
1606 tcg_temp_free(v2);
1607 tcg_temp_free(v1);
1609 break;
1611 case 13: /*MUL16S*/
1612 HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
1613 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1615 TCGv_i32 v1 = tcg_temp_new_i32();
1616 TCGv_i32 v2 = tcg_temp_new_i32();
1617 tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]);
1618 tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]);
1619 tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
1620 tcg_temp_free(v2);
1621 tcg_temp_free(v1);
1623 break;
1625 default: /*reserved*/
1626 RESERVED();
1627 break;
1629 break;
1631 case 2: /*RST2*/
1632 if (OP2 >= 8) {
1633 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1636 if (OP2 >= 12) {
1637 HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV);
1638 int label = gen_new_label();
1639 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label);
1640 gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE);
1641 gen_set_label(label);
1644 switch (OP2) {
1645 #define BOOLEAN_LOGIC(fn, r, s, t) \
1646 do { \
1647 HAS_OPTION(XTENSA_OPTION_BOOLEAN); \
1648 TCGv_i32 tmp1 = tcg_temp_new_i32(); \
1649 TCGv_i32 tmp2 = tcg_temp_new_i32(); \
1651 tcg_gen_shri_i32(tmp1, cpu_SR[BR], s); \
1652 tcg_gen_shri_i32(tmp2, cpu_SR[BR], t); \
1653 tcg_gen_##fn##_i32(tmp1, tmp1, tmp2); \
1654 tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp1, r, 1); \
1655 tcg_temp_free(tmp1); \
1656 tcg_temp_free(tmp2); \
1657 } while (0)
1659 case 0: /*ANDBp*/
1660 BOOLEAN_LOGIC(and, RRR_R, RRR_S, RRR_T);
1661 break;
1663 case 1: /*ANDBCp*/
1664 BOOLEAN_LOGIC(andc, RRR_R, RRR_S, RRR_T);
1665 break;
1667 case 2: /*ORBp*/
1668 BOOLEAN_LOGIC(or, RRR_R, RRR_S, RRR_T);
1669 break;
1671 case 3: /*ORBCp*/
1672 BOOLEAN_LOGIC(orc, RRR_R, RRR_S, RRR_T);
1673 break;
1675 case 4: /*XORBp*/
1676 BOOLEAN_LOGIC(xor, RRR_R, RRR_S, RRR_T);
1677 break;
1679 #undef BOOLEAN_LOGIC
1681 case 8: /*MULLi*/
1682 HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL);
1683 tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1684 break;
1686 case 10: /*MULUHi*/
1687 case 11: /*MULSHi*/
1688 HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL_HIGH);
1690 TCGv lo = tcg_temp_new();
1692 if (OP2 == 10) {
1693 tcg_gen_mulu2_i32(lo, cpu_R[RRR_R],
1694 cpu_R[RRR_S], cpu_R[RRR_T]);
1695 } else {
1696 tcg_gen_muls2_i32(lo, cpu_R[RRR_R],
1697 cpu_R[RRR_S], cpu_R[RRR_T]);
1699 tcg_temp_free(lo);
1701 break;
1703 case 12: /*QUOUi*/
1704 tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1705 break;
1707 case 13: /*QUOSi*/
1708 case 15: /*REMSi*/
1710 int label1 = gen_new_label();
1711 int label2 = gen_new_label();
1713 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000,
1714 label1);
1715 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff,
1716 label1);
1717 tcg_gen_movi_i32(cpu_R[RRR_R],
1718 OP2 == 13 ? 0x80000000 : 0);
1719 tcg_gen_br(label2);
1720 gen_set_label(label1);
1721 if (OP2 == 13) {
1722 tcg_gen_div_i32(cpu_R[RRR_R],
1723 cpu_R[RRR_S], cpu_R[RRR_T]);
1724 } else {
1725 tcg_gen_rem_i32(cpu_R[RRR_R],
1726 cpu_R[RRR_S], cpu_R[RRR_T]);
1728 gen_set_label(label2);
1730 break;
1732 case 14: /*REMUi*/
1733 tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
1734 break;
1736 default: /*reserved*/
1737 RESERVED();
1738 break;
1740 break;
1742 case 3: /*RST3*/
1743 switch (OP2) {
1744 case 0: /*RSR*/
1745 if (gen_check_sr(dc, RSR_SR, SR_R)) {
1746 if (RSR_SR >= 64) {
1747 gen_check_privilege(dc);
1749 gen_window_check1(dc, RRR_T);
1750 gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
1752 break;
1754 case 1: /*WSR*/
1755 if (gen_check_sr(dc, RSR_SR, SR_W)) {
1756 if (RSR_SR >= 64) {
1757 gen_check_privilege(dc);
1759 gen_window_check1(dc, RRR_T);
1760 gen_wsr(dc, RSR_SR, cpu_R[RRR_T]);
1762 break;
1764 case 2: /*SEXTu*/
1765 HAS_OPTION(XTENSA_OPTION_MISC_OP_SEXT);
1766 gen_window_check2(dc, RRR_R, RRR_S);
1768 int shift = 24 - RRR_T;
1770 if (shift == 24) {
1771 tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1772 } else if (shift == 16) {
1773 tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
1774 } else {
1775 TCGv_i32 tmp = tcg_temp_new_i32();
1776 tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift);
1777 tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift);
1778 tcg_temp_free(tmp);
1781 break;
1783 case 3: /*CLAMPSu*/
1784 HAS_OPTION(XTENSA_OPTION_MISC_OP_CLAMPS);
1785 gen_window_check2(dc, RRR_R, RRR_S);
1787 TCGv_i32 tmp1 = tcg_temp_new_i32();
1788 TCGv_i32 tmp2 = tcg_temp_new_i32();
1789 TCGv_i32 zero = tcg_const_i32(0);
1791 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T);
1792 tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]);
1793 tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7));
1795 tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31);
1796 tcg_gen_xori_i32(tmp1, tmp1, 0xffffffff >> (25 - RRR_T));
1798 tcg_gen_movcond_i32(TCG_COND_EQ, cpu_R[RRR_R], tmp2, zero,
1799 cpu_R[RRR_S], tmp1);
1800 tcg_temp_free(tmp1);
1801 tcg_temp_free(tmp2);
1802 tcg_temp_free(zero);
1804 break;
1806 case 4: /*MINu*/
1807 case 5: /*MAXu*/
1808 case 6: /*MINUu*/
1809 case 7: /*MAXUu*/
1810 HAS_OPTION(XTENSA_OPTION_MISC_OP_MINMAX);
1811 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1813 static const TCGCond cond[] = {
1814 TCG_COND_LE,
1815 TCG_COND_GE,
1816 TCG_COND_LEU,
1817 TCG_COND_GEU
1819 tcg_gen_movcond_i32(cond[OP2 - 4], cpu_R[RRR_R],
1820 cpu_R[RRR_S], cpu_R[RRR_T],
1821 cpu_R[RRR_S], cpu_R[RRR_T]);
1823 break;
1825 case 8: /*MOVEQZ*/
1826 case 9: /*MOVNEZ*/
1827 case 10: /*MOVLTZ*/
1828 case 11: /*MOVGEZ*/
1829 gen_window_check3(dc, RRR_R, RRR_S, RRR_T);
1831 static const TCGCond cond[] = {
1832 TCG_COND_EQ,
1833 TCG_COND_NE,
1834 TCG_COND_LT,
1835 TCG_COND_GE,
1837 TCGv_i32 zero = tcg_const_i32(0);
1839 tcg_gen_movcond_i32(cond[OP2 - 8], cpu_R[RRR_R],
1840 cpu_R[RRR_T], zero, cpu_R[RRR_S], cpu_R[RRR_R]);
1841 tcg_temp_free(zero);
1843 break;
1845 case 12: /*MOVFp*/
1846 case 13: /*MOVTp*/
1847 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
1848 gen_window_check2(dc, RRR_R, RRR_S);
1850 TCGv_i32 zero = tcg_const_i32(0);
1851 TCGv_i32 tmp = tcg_temp_new_i32();
1853 tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
1854 tcg_gen_movcond_i32(OP2 & 1 ? TCG_COND_NE : TCG_COND_EQ,
1855 cpu_R[RRR_R], tmp, zero,
1856 cpu_R[RRR_S], cpu_R[RRR_R]);
1858 tcg_temp_free(tmp);
1859 tcg_temp_free(zero);
1861 break;
1863 case 14: /*RUR*/
1864 gen_window_check1(dc, RRR_R);
1866 int st = (RRR_S << 4) + RRR_T;
1867 if (uregnames[st].name) {
1868 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]);
1869 } else {
1870 qemu_log("RUR %d not implemented, ", st);
1871 TBD();
1874 break;
1876 case 15: /*WUR*/
1877 gen_window_check1(dc, RRR_T);
1878 if (uregnames[RSR_SR].name) {
1879 gen_wur(RSR_SR, cpu_R[RRR_T]);
1880 } else {
1881 qemu_log("WUR %d not implemented, ", RSR_SR);
1882 TBD();
1884 break;
1887 break;
1889 case 4: /*EXTUI*/
1890 case 5:
1891 gen_window_check2(dc, RRR_R, RRR_T);
1893 int shiftimm = RRR_S | ((OP1 & 1) << 4);
1894 int maskimm = (1 << (OP2 + 1)) - 1;
1896 TCGv_i32 tmp = tcg_temp_new_i32();
1897 tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm);
1898 tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm);
1899 tcg_temp_free(tmp);
1901 break;
1903 case 6: /*CUST0*/
1904 RESERVED();
1905 break;
1907 case 7: /*CUST1*/
1908 RESERVED();
1909 break;
1911 case 8: /*LSCXp*/
1912 switch (OP2) {
1913 case 0: /*LSXf*/
1914 case 1: /*LSXUf*/
1915 case 4: /*SSXf*/
1916 case 5: /*SSXUf*/
1917 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1918 gen_window_check2(dc, RRR_S, RRR_T);
1919 gen_check_cpenable(dc, 0);
1921 TCGv_i32 addr = tcg_temp_new_i32();
1922 tcg_gen_add_i32(addr, cpu_R[RRR_S], cpu_R[RRR_T]);
1923 gen_load_store_alignment(dc, 2, addr, false);
1924 if (OP2 & 0x4) {
1925 tcg_gen_qemu_st32(cpu_FR[RRR_R], addr, dc->cring);
1926 } else {
1927 tcg_gen_qemu_ld32u(cpu_FR[RRR_R], addr, dc->cring);
1929 if (OP2 & 0x1) {
1930 tcg_gen_mov_i32(cpu_R[RRR_S], addr);
1932 tcg_temp_free(addr);
1934 break;
1936 default: /*reserved*/
1937 RESERVED();
1938 break;
1940 break;
1942 case 9: /*LSC4*/
1943 gen_window_check2(dc, RRR_S, RRR_T);
1944 switch (OP2) {
1945 case 0: /*L32E*/
1946 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1947 gen_check_privilege(dc);
1949 TCGv_i32 addr = tcg_temp_new_i32();
1950 tcg_gen_addi_i32(addr, cpu_R[RRR_S],
1951 (0xffffffc0 | (RRR_R << 2)));
1952 tcg_gen_qemu_ld32u(cpu_R[RRR_T], addr, dc->ring);
1953 tcg_temp_free(addr);
1955 break;
1957 case 4: /*S32E*/
1958 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
1959 gen_check_privilege(dc);
1961 TCGv_i32 addr = tcg_temp_new_i32();
1962 tcg_gen_addi_i32(addr, cpu_R[RRR_S],
1963 (0xffffffc0 | (RRR_R << 2)));
1964 tcg_gen_qemu_st32(cpu_R[RRR_T], addr, dc->ring);
1965 tcg_temp_free(addr);
1967 break;
1969 default:
1970 RESERVED();
1971 break;
1973 break;
1975 case 10: /*FP0*/
1976 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
1977 switch (OP2) {
1978 case 0: /*ADD.Sf*/
1979 gen_check_cpenable(dc, 0);
1980 gen_helper_add_s(cpu_FR[RRR_R], cpu_env,
1981 cpu_FR[RRR_S], cpu_FR[RRR_T]);
1982 break;
1984 case 1: /*SUB.Sf*/
1985 gen_check_cpenable(dc, 0);
1986 gen_helper_sub_s(cpu_FR[RRR_R], cpu_env,
1987 cpu_FR[RRR_S], cpu_FR[RRR_T]);
1988 break;
1990 case 2: /*MUL.Sf*/
1991 gen_check_cpenable(dc, 0);
1992 gen_helper_mul_s(cpu_FR[RRR_R], cpu_env,
1993 cpu_FR[RRR_S], cpu_FR[RRR_T]);
1994 break;
1996 case 4: /*MADD.Sf*/
1997 gen_check_cpenable(dc, 0);
1998 gen_helper_madd_s(cpu_FR[RRR_R], cpu_env,
1999 cpu_FR[RRR_R], cpu_FR[RRR_S], cpu_FR[RRR_T]);
2000 break;
2002 case 5: /*MSUB.Sf*/
2003 gen_check_cpenable(dc, 0);
2004 gen_helper_msub_s(cpu_FR[RRR_R], cpu_env,
2005 cpu_FR[RRR_R], cpu_FR[RRR_S], cpu_FR[RRR_T]);
2006 break;
2008 case 8: /*ROUND.Sf*/
2009 case 9: /*TRUNC.Sf*/
2010 case 10: /*FLOOR.Sf*/
2011 case 11: /*CEIL.Sf*/
2012 case 14: /*UTRUNC.Sf*/
2013 gen_window_check1(dc, RRR_R);
2014 gen_check_cpenable(dc, 0);
2016 static const unsigned rounding_mode_const[] = {
2017 float_round_nearest_even,
2018 float_round_to_zero,
2019 float_round_down,
2020 float_round_up,
2021 [6] = float_round_to_zero,
2023 TCGv_i32 rounding_mode = tcg_const_i32(
2024 rounding_mode_const[OP2 & 7]);
2025 TCGv_i32 scale = tcg_const_i32(RRR_T);
2027 if (OP2 == 14) {
2028 gen_helper_ftoui(cpu_R[RRR_R], cpu_FR[RRR_S],
2029 rounding_mode, scale);
2030 } else {
2031 gen_helper_ftoi(cpu_R[RRR_R], cpu_FR[RRR_S],
2032 rounding_mode, scale);
2035 tcg_temp_free(rounding_mode);
2036 tcg_temp_free(scale);
2038 break;
2040 case 12: /*FLOAT.Sf*/
2041 case 13: /*UFLOAT.Sf*/
2042 gen_window_check1(dc, RRR_S);
2043 gen_check_cpenable(dc, 0);
2045 TCGv_i32 scale = tcg_const_i32(-RRR_T);
2047 if (OP2 == 13) {
2048 gen_helper_uitof(cpu_FR[RRR_R], cpu_env,
2049 cpu_R[RRR_S], scale);
2050 } else {
2051 gen_helper_itof(cpu_FR[RRR_R], cpu_env,
2052 cpu_R[RRR_S], scale);
2054 tcg_temp_free(scale);
2056 break;
2058 case 15: /*FP1OP*/
2059 switch (RRR_T) {
2060 case 0: /*MOV.Sf*/
2061 gen_check_cpenable(dc, 0);
2062 tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_FR[RRR_S]);
2063 break;
2065 case 1: /*ABS.Sf*/
2066 gen_check_cpenable(dc, 0);
2067 gen_helper_abs_s(cpu_FR[RRR_R], cpu_FR[RRR_S]);
2068 break;
2070 case 4: /*RFRf*/
2071 gen_window_check1(dc, RRR_R);
2072 gen_check_cpenable(dc, 0);
2073 tcg_gen_mov_i32(cpu_R[RRR_R], cpu_FR[RRR_S]);
2074 break;
2076 case 5: /*WFRf*/
2077 gen_window_check1(dc, RRR_S);
2078 gen_check_cpenable(dc, 0);
2079 tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_R[RRR_S]);
2080 break;
2082 case 6: /*NEG.Sf*/
2083 gen_check_cpenable(dc, 0);
2084 gen_helper_neg_s(cpu_FR[RRR_R], cpu_FR[RRR_S]);
2085 break;
2087 default: /*reserved*/
2088 RESERVED();
2089 break;
2091 break;
2093 default: /*reserved*/
2094 RESERVED();
2095 break;
2097 break;
2099 case 11: /*FP1*/
2100 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
2102 #define gen_compare(rel, br, a, b) \
2103 do { \
2104 TCGv_i32 bit = tcg_const_i32(1 << br); \
2106 gen_check_cpenable(dc, 0); \
2107 gen_helper_##rel(cpu_env, bit, cpu_FR[a], cpu_FR[b]); \
2108 tcg_temp_free(bit); \
2109 } while (0)
2111 switch (OP2) {
2112 case 1: /*UN.Sf*/
2113 gen_compare(un_s, RRR_R, RRR_S, RRR_T);
2114 break;
2116 case 2: /*OEQ.Sf*/
2117 gen_compare(oeq_s, RRR_R, RRR_S, RRR_T);
2118 break;
2120 case 3: /*UEQ.Sf*/
2121 gen_compare(ueq_s, RRR_R, RRR_S, RRR_T);
2122 break;
2124 case 4: /*OLT.Sf*/
2125 gen_compare(olt_s, RRR_R, RRR_S, RRR_T);
2126 break;
2128 case 5: /*ULT.Sf*/
2129 gen_compare(ult_s, RRR_R, RRR_S, RRR_T);
2130 break;
2132 case 6: /*OLE.Sf*/
2133 gen_compare(ole_s, RRR_R, RRR_S, RRR_T);
2134 break;
2136 case 7: /*ULE.Sf*/
2137 gen_compare(ule_s, RRR_R, RRR_S, RRR_T);
2138 break;
2140 #undef gen_compare
2142 case 8: /*MOVEQZ.Sf*/
2143 case 9: /*MOVNEZ.Sf*/
2144 case 10: /*MOVLTZ.Sf*/
2145 case 11: /*MOVGEZ.Sf*/
2146 gen_window_check1(dc, RRR_T);
2147 gen_check_cpenable(dc, 0);
2149 static const TCGCond cond[] = {
2150 TCG_COND_EQ,
2151 TCG_COND_NE,
2152 TCG_COND_LT,
2153 TCG_COND_GE,
2155 TCGv_i32 zero = tcg_const_i32(0);
2157 tcg_gen_movcond_i32(cond[OP2 - 8], cpu_FR[RRR_R],
2158 cpu_R[RRR_T], zero, cpu_FR[RRR_S], cpu_FR[RRR_R]);
2159 tcg_temp_free(zero);
2161 break;
2163 case 12: /*MOVF.Sf*/
2164 case 13: /*MOVT.Sf*/
2165 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
2166 gen_check_cpenable(dc, 0);
2168 TCGv_i32 zero = tcg_const_i32(0);
2169 TCGv_i32 tmp = tcg_temp_new_i32();
2171 tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
2172 tcg_gen_movcond_i32(OP2 & 1 ? TCG_COND_NE : TCG_COND_EQ,
2173 cpu_FR[RRR_R], tmp, zero,
2174 cpu_FR[RRR_S], cpu_FR[RRR_R]);
2176 tcg_temp_free(tmp);
2177 tcg_temp_free(zero);
2179 break;
2181 default: /*reserved*/
2182 RESERVED();
2183 break;
2185 break;
2187 default: /*reserved*/
2188 RESERVED();
2189 break;
2191 break;
2193 case 1: /*L32R*/
2194 gen_window_check1(dc, RRR_T);
2196 TCGv_i32 tmp = tcg_const_i32(
2197 ((dc->tb->flags & XTENSA_TBFLAG_LITBASE) ?
2198 0 : ((dc->pc + 3) & ~3)) +
2199 (0xfffc0000 | (RI16_IMM16 << 2)));
2201 if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
2202 tcg_gen_add_i32(tmp, tmp, dc->litbase);
2204 tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring);
2205 tcg_temp_free(tmp);
2207 break;
2209 case 2: /*LSAI*/
2210 #define gen_load_store(type, shift) do { \
2211 TCGv_i32 addr = tcg_temp_new_i32(); \
2212 gen_window_check2(dc, RRI8_S, RRI8_T); \
2213 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \
2214 if (shift) { \
2215 gen_load_store_alignment(dc, shift, addr, false); \
2217 tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
2218 tcg_temp_free(addr); \
2219 } while (0)
2221 switch (RRI8_R) {
2222 case 0: /*L8UI*/
2223 gen_load_store(ld8u, 0);
2224 break;
2226 case 1: /*L16UI*/
2227 gen_load_store(ld16u, 1);
2228 break;
2230 case 2: /*L32I*/
2231 gen_load_store(ld32u, 2);
2232 break;
2234 case 4: /*S8I*/
2235 gen_load_store(st8, 0);
2236 break;
2238 case 5: /*S16I*/
2239 gen_load_store(st16, 1);
2240 break;
2242 case 6: /*S32I*/
2243 gen_load_store(st32, 2);
2244 break;
2246 #define gen_dcache_hit_test(w, shift) do { \
2247 TCGv_i32 addr = tcg_temp_new_i32(); \
2248 TCGv_i32 res = tcg_temp_new_i32(); \
2249 gen_window_check1(dc, RRI##w##_S); \
2250 tcg_gen_addi_i32(addr, cpu_R[RRI##w##_S], \
2251 RRI##w##_IMM##w << shift); \
2252 tcg_gen_qemu_ld8u(res, addr, dc->cring); \
2253 tcg_temp_free(addr); \
2254 tcg_temp_free(res); \
2255 } while (0)
2257 #define gen_dcache_hit_test4() gen_dcache_hit_test(4, 4)
2258 #define gen_dcache_hit_test8() gen_dcache_hit_test(8, 2)
2260 case 7: /*CACHEc*/
2261 if (RRI8_T < 8) {
2262 HAS_OPTION(XTENSA_OPTION_DCACHE);
2265 switch (RRI8_T) {
2266 case 0: /*DPFRc*/
2267 gen_window_check1(dc, RRI8_S);
2268 break;
2270 case 1: /*DPFWc*/
2271 gen_window_check1(dc, RRI8_S);
2272 break;
2274 case 2: /*DPFROc*/
2275 gen_window_check1(dc, RRI8_S);
2276 break;
2278 case 3: /*DPFWOc*/
2279 gen_window_check1(dc, RRI8_S);
2280 break;
2282 case 4: /*DHWBc*/
2283 gen_dcache_hit_test8();
2284 break;
2286 case 5: /*DHWBIc*/
2287 gen_dcache_hit_test8();
2288 break;
2290 case 6: /*DHIc*/
2291 gen_check_privilege(dc);
2292 gen_dcache_hit_test8();
2293 break;
2295 case 7: /*DIIc*/
2296 gen_check_privilege(dc);
2297 gen_window_check1(dc, RRI8_S);
2298 break;
2300 case 8: /*DCEc*/
2301 switch (OP1) {
2302 case 0: /*DPFLl*/
2303 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
2304 gen_check_privilege(dc);
2305 gen_dcache_hit_test4();
2306 break;
2308 case 2: /*DHUl*/
2309 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
2310 gen_check_privilege(dc);
2311 gen_dcache_hit_test4();
2312 break;
2314 case 3: /*DIUl*/
2315 HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
2316 gen_check_privilege(dc);
2317 gen_window_check1(dc, RRI4_S);
2318 break;
2320 case 4: /*DIWBc*/
2321 HAS_OPTION(XTENSA_OPTION_DCACHE);
2322 gen_check_privilege(dc);
2323 gen_window_check1(dc, RRI4_S);
2324 break;
2326 case 5: /*DIWBIc*/
2327 HAS_OPTION(XTENSA_OPTION_DCACHE);
2328 gen_check_privilege(dc);
2329 gen_window_check1(dc, RRI4_S);
2330 break;
2332 default: /*reserved*/
2333 RESERVED();
2334 break;
2337 break;
2339 #undef gen_dcache_hit_test
2340 #undef gen_dcache_hit_test4
2341 #undef gen_dcache_hit_test8
2343 #define gen_icache_hit_test(w, shift) do { \
2344 TCGv_i32 addr = tcg_temp_new_i32(); \
2345 gen_window_check1(dc, RRI##w##_S); \
2346 tcg_gen_movi_i32(cpu_pc, dc->pc); \
2347 tcg_gen_addi_i32(addr, cpu_R[RRI##w##_S], \
2348 RRI##w##_IMM##w << shift); \
2349 gen_helper_itlb_hit_test(cpu_env, addr); \
2350 tcg_temp_free(addr); \
2351 } while (0)
2353 #define gen_icache_hit_test4() gen_icache_hit_test(4, 4)
2354 #define gen_icache_hit_test8() gen_icache_hit_test(8, 2)
2356 case 12: /*IPFc*/
2357 HAS_OPTION(XTENSA_OPTION_ICACHE);
2358 gen_window_check1(dc, RRI8_S);
2359 break;
2361 case 13: /*ICEc*/
2362 switch (OP1) {
2363 case 0: /*IPFLl*/
2364 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
2365 gen_check_privilege(dc);
2366 gen_icache_hit_test4();
2367 break;
2369 case 2: /*IHUl*/
2370 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
2371 gen_check_privilege(dc);
2372 gen_icache_hit_test4();
2373 break;
2375 case 3: /*IIUl*/
2376 HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
2377 gen_check_privilege(dc);
2378 gen_window_check1(dc, RRI4_S);
2379 break;
2381 default: /*reserved*/
2382 RESERVED();
2383 break;
2385 break;
2387 case 14: /*IHIc*/
2388 HAS_OPTION(XTENSA_OPTION_ICACHE);
2389 gen_icache_hit_test8();
2390 break;
2392 case 15: /*IIIc*/
2393 HAS_OPTION(XTENSA_OPTION_ICACHE);
2394 gen_check_privilege(dc);
2395 gen_window_check1(dc, RRI8_S);
2396 break;
2398 default: /*reserved*/
2399 RESERVED();
2400 break;
2402 break;
2404 #undef gen_icache_hit_test
2405 #undef gen_icache_hit_test4
2406 #undef gen_icache_hit_test8
2408 case 9: /*L16SI*/
2409 gen_load_store(ld16s, 1);
2410 break;
2411 #undef gen_load_store
2413 case 10: /*MOVI*/
2414 gen_window_check1(dc, RRI8_T);
2415 tcg_gen_movi_i32(cpu_R[RRI8_T],
2416 RRI8_IMM8 | (RRI8_S << 8) |
2417 ((RRI8_S & 0x8) ? 0xfffff000 : 0));
2418 break;
2420 #define gen_load_store_no_hw_align(type) do { \
2421 TCGv_i32 addr = tcg_temp_local_new_i32(); \
2422 gen_window_check2(dc, RRI8_S, RRI8_T); \
2423 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); \
2424 gen_load_store_alignment(dc, 2, addr, true); \
2425 tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
2426 tcg_temp_free(addr); \
2427 } while (0)
2429 case 11: /*L32AIy*/
2430 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
2431 gen_load_store_no_hw_align(ld32u); /*TODO acquire?*/
2432 break;
2434 case 12: /*ADDI*/
2435 gen_window_check2(dc, RRI8_S, RRI8_T);
2436 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE);
2437 break;
2439 case 13: /*ADDMI*/
2440 gen_window_check2(dc, RRI8_S, RRI8_T);
2441 tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE << 8);
2442 break;
2444 case 14: /*S32C1Iy*/
2445 HAS_OPTION(XTENSA_OPTION_CONDITIONAL_STORE);
2446 gen_window_check2(dc, RRI8_S, RRI8_T);
2448 int label = gen_new_label();
2449 TCGv_i32 tmp = tcg_temp_local_new_i32();
2450 TCGv_i32 addr = tcg_temp_local_new_i32();
2451 TCGv_i32 tpc;
2453 tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]);
2454 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
2455 gen_load_store_alignment(dc, 2, addr, true);
2457 gen_advance_ccount(dc);
2458 tpc = tcg_const_i32(dc->pc);
2459 gen_helper_check_atomctl(cpu_env, tpc, addr);
2460 tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring);
2461 tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T],
2462 cpu_SR[SCOMPARE1], label);
2464 tcg_gen_qemu_st32(tmp, addr, dc->cring);
2466 gen_set_label(label);
2467 tcg_temp_free(tpc);
2468 tcg_temp_free(addr);
2469 tcg_temp_free(tmp);
2471 break;
2473 case 15: /*S32RIy*/
2474 HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
2475 gen_load_store_no_hw_align(st32); /*TODO release?*/
2476 break;
2477 #undef gen_load_store_no_hw_align
2479 default: /*reserved*/
2480 RESERVED();
2481 break;
2483 break;
2485 case 3: /*LSCIp*/
2486 switch (RRI8_R) {
2487 case 0: /*LSIf*/
2488 case 4: /*SSIf*/
2489 case 8: /*LSIUf*/
2490 case 12: /*SSIUf*/
2491 HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
2492 gen_window_check1(dc, RRI8_S);
2493 gen_check_cpenable(dc, 0);
2495 TCGv_i32 addr = tcg_temp_new_i32();
2496 tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
2497 gen_load_store_alignment(dc, 2, addr, false);
2498 if (RRI8_R & 0x4) {
2499 tcg_gen_qemu_st32(cpu_FR[RRI8_T], addr, dc->cring);
2500 } else {
2501 tcg_gen_qemu_ld32u(cpu_FR[RRI8_T], addr, dc->cring);
2503 if (RRI8_R & 0x8) {
2504 tcg_gen_mov_i32(cpu_R[RRI8_S], addr);
2506 tcg_temp_free(addr);
2508 break;
2510 default: /*reserved*/
2511 RESERVED();
2512 break;
2514 break;
2516 case 4: /*MAC16d*/
2517 HAS_OPTION(XTENSA_OPTION_MAC16);
2519 enum {
2520 MAC16_UMUL = 0x0,
2521 MAC16_MUL = 0x4,
2522 MAC16_MULA = 0x8,
2523 MAC16_MULS = 0xc,
2524 MAC16_NONE = 0xf,
2525 } op = OP1 & 0xc;
2526 bool is_m1_sr = (OP2 & 0x3) == 2;
2527 bool is_m2_sr = (OP2 & 0xc) == 0;
2528 uint32_t ld_offset = 0;
2530 if (OP2 > 9) {
2531 RESERVED();
2534 switch (OP2 & 2) {
2535 case 0: /*MACI?/MACC?*/
2536 is_m1_sr = true;
2537 ld_offset = (OP2 & 1) ? -4 : 4;
2539 if (OP2 >= 8) { /*MACI/MACC*/
2540 if (OP1 == 0) { /*LDINC/LDDEC*/
2541 op = MAC16_NONE;
2542 } else {
2543 RESERVED();
2545 } else if (op != MAC16_MULA) { /*MULA.*.*.LDINC/LDDEC*/
2546 RESERVED();
2548 break;
2550 case 2: /*MACD?/MACA?*/
2551 if (op == MAC16_UMUL && OP2 != 7) { /*UMUL only in MACAA*/
2552 RESERVED();
2554 break;
2557 if (op != MAC16_NONE) {
2558 if (!is_m1_sr) {
2559 gen_window_check1(dc, RRR_S);
2561 if (!is_m2_sr) {
2562 gen_window_check1(dc, RRR_T);
2567 TCGv_i32 vaddr = tcg_temp_new_i32();
2568 TCGv_i32 mem32 = tcg_temp_new_i32();
2570 if (ld_offset) {
2571 gen_window_check1(dc, RRR_S);
2572 tcg_gen_addi_i32(vaddr, cpu_R[RRR_S], ld_offset);
2573 gen_load_store_alignment(dc, 2, vaddr, false);
2574 tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring);
2576 if (op != MAC16_NONE) {
2577 TCGv_i32 m1 = gen_mac16_m(
2578 is_m1_sr ? cpu_SR[MR + RRR_X] : cpu_R[RRR_S],
2579 OP1 & 1, op == MAC16_UMUL);
2580 TCGv_i32 m2 = gen_mac16_m(
2581 is_m2_sr ? cpu_SR[MR + 2 + RRR_Y] : cpu_R[RRR_T],
2582 OP1 & 2, op == MAC16_UMUL);
2584 if (op == MAC16_MUL || op == MAC16_UMUL) {
2585 tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2);
2586 if (op == MAC16_UMUL) {
2587 tcg_gen_movi_i32(cpu_SR[ACCHI], 0);
2588 } else {
2589 tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31);
2591 } else {
2592 TCGv_i32 lo = tcg_temp_new_i32();
2593 TCGv_i32 hi = tcg_temp_new_i32();
2595 tcg_gen_mul_i32(lo, m1, m2);
2596 tcg_gen_sari_i32(hi, lo, 31);
2597 if (op == MAC16_MULA) {
2598 tcg_gen_add2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI],
2599 cpu_SR[ACCLO], cpu_SR[ACCHI],
2600 lo, hi);
2601 } else {
2602 tcg_gen_sub2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI],
2603 cpu_SR[ACCLO], cpu_SR[ACCHI],
2604 lo, hi);
2606 tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]);
2608 tcg_temp_free_i32(lo);
2609 tcg_temp_free_i32(hi);
2611 tcg_temp_free(m1);
2612 tcg_temp_free(m2);
2614 if (ld_offset) {
2615 tcg_gen_mov_i32(cpu_R[RRR_S], vaddr);
2616 tcg_gen_mov_i32(cpu_SR[MR + RRR_W], mem32);
2618 tcg_temp_free(vaddr);
2619 tcg_temp_free(mem32);
2622 break;
2624 case 5: /*CALLN*/
2625 switch (CALL_N) {
2626 case 0: /*CALL0*/
2627 tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
2628 gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
2629 break;
2631 case 1: /*CALL4w*/
2632 case 2: /*CALL8w*/
2633 case 3: /*CALL12w*/
2634 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2635 gen_window_check1(dc, CALL_N << 2);
2636 gen_callwi(dc, CALL_N,
2637 (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
2638 break;
2640 break;
2642 case 6: /*SI*/
2643 switch (CALL_N) {
2644 case 0: /*J*/
2645 gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0);
2646 break;
2648 case 1: /*BZ*/
2649 gen_window_check1(dc, BRI12_S);
2651 static const TCGCond cond[] = {
2652 TCG_COND_EQ, /*BEQZ*/
2653 TCG_COND_NE, /*BNEZ*/
2654 TCG_COND_LT, /*BLTZ*/
2655 TCG_COND_GE, /*BGEZ*/
2658 gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0,
2659 4 + BRI12_IMM12_SE);
2661 break;
2663 case 2: /*BI0*/
2664 gen_window_check1(dc, BRI8_S);
2666 static const TCGCond cond[] = {
2667 TCG_COND_EQ, /*BEQI*/
2668 TCG_COND_NE, /*BNEI*/
2669 TCG_COND_LT, /*BLTI*/
2670 TCG_COND_GE, /*BGEI*/
2673 gen_brcondi(dc, cond[BRI8_M & 3],
2674 cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE);
2676 break;
2678 case 3: /*BI1*/
2679 switch (BRI8_M) {
2680 case 0: /*ENTRYw*/
2681 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2683 TCGv_i32 pc = tcg_const_i32(dc->pc);
2684 TCGv_i32 s = tcg_const_i32(BRI12_S);
2685 TCGv_i32 imm = tcg_const_i32(BRI12_IMM12);
2686 gen_advance_ccount(dc);
2687 gen_helper_entry(cpu_env, pc, s, imm);
2688 tcg_temp_free(imm);
2689 tcg_temp_free(s);
2690 tcg_temp_free(pc);
2691 reset_used_window(dc);
2693 break;
2695 case 1: /*B1*/
2696 switch (BRI8_R) {
2697 case 0: /*BFp*/
2698 case 1: /*BTp*/
2699 HAS_OPTION(XTENSA_OPTION_BOOLEAN);
2701 TCGv_i32 tmp = tcg_temp_new_i32();
2702 tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRI8_S);
2703 gen_brcondi(dc,
2704 BRI8_R == 1 ? TCG_COND_NE : TCG_COND_EQ,
2705 tmp, 0, 4 + RRI8_IMM8_SE);
2706 tcg_temp_free(tmp);
2708 break;
2710 case 8: /*LOOP*/
2711 case 9: /*LOOPNEZ*/
2712 case 10: /*LOOPGTZ*/
2713 HAS_OPTION(XTENSA_OPTION_LOOP);
2714 gen_window_check1(dc, RRI8_S);
2716 uint32_t lend = dc->pc + RRI8_IMM8 + 4;
2717 TCGv_i32 tmp = tcg_const_i32(lend);
2719 tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1);
2720 tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc);
2721 gen_helper_wsr_lend(cpu_env, tmp);
2722 tcg_temp_free(tmp);
2724 if (BRI8_R > 8) {
2725 int label = gen_new_label();
2726 tcg_gen_brcondi_i32(
2727 BRI8_R == 9 ? TCG_COND_NE : TCG_COND_GT,
2728 cpu_R[RRI8_S], 0, label);
2729 gen_jumpi(dc, lend, 1);
2730 gen_set_label(label);
2733 gen_jumpi(dc, dc->next_pc, 0);
2735 break;
2737 default: /*reserved*/
2738 RESERVED();
2739 break;
2742 break;
2744 case 2: /*BLTUI*/
2745 case 3: /*BGEUI*/
2746 gen_window_check1(dc, BRI8_S);
2747 gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU,
2748 cpu_R[BRI8_S], B4CONSTU[BRI8_R], 4 + BRI8_IMM8_SE);
2749 break;
2751 break;
2754 break;
2756 case 7: /*B*/
2758 TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ;
2760 switch (RRI8_R & 7) {
2761 case 0: /*BNONE*/ /*BANY*/
2762 gen_window_check2(dc, RRI8_S, RRI8_T);
2764 TCGv_i32 tmp = tcg_temp_new_i32();
2765 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
2766 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2767 tcg_temp_free(tmp);
2769 break;
2771 case 1: /*BEQ*/ /*BNE*/
2772 case 2: /*BLT*/ /*BGE*/
2773 case 3: /*BLTU*/ /*BGEU*/
2774 gen_window_check2(dc, RRI8_S, RRI8_T);
2776 static const TCGCond cond[] = {
2777 [1] = TCG_COND_EQ,
2778 [2] = TCG_COND_LT,
2779 [3] = TCG_COND_LTU,
2780 [9] = TCG_COND_NE,
2781 [10] = TCG_COND_GE,
2782 [11] = TCG_COND_GEU,
2784 gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T],
2785 4 + RRI8_IMM8_SE);
2787 break;
2789 case 4: /*BALL*/ /*BNALL*/
2790 gen_window_check2(dc, RRI8_S, RRI8_T);
2792 TCGv_i32 tmp = tcg_temp_new_i32();
2793 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
2794 gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T],
2795 4 + RRI8_IMM8_SE);
2796 tcg_temp_free(tmp);
2798 break;
2800 case 5: /*BBC*/ /*BBS*/
2801 gen_window_check2(dc, RRI8_S, RRI8_T);
2803 #ifdef TARGET_WORDS_BIGENDIAN
2804 TCGv_i32 bit = tcg_const_i32(0x80000000);
2805 #else
2806 TCGv_i32 bit = tcg_const_i32(0x00000001);
2807 #endif
2808 TCGv_i32 tmp = tcg_temp_new_i32();
2809 tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f);
2810 #ifdef TARGET_WORDS_BIGENDIAN
2811 tcg_gen_shr_i32(bit, bit, tmp);
2812 #else
2813 tcg_gen_shl_i32(bit, bit, tmp);
2814 #endif
2815 tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit);
2816 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2817 tcg_temp_free(tmp);
2818 tcg_temp_free(bit);
2820 break;
2822 case 6: /*BBCI*/ /*BBSI*/
2823 case 7:
2824 gen_window_check1(dc, RRI8_S);
2826 TCGv_i32 tmp = tcg_temp_new_i32();
2827 tcg_gen_andi_i32(tmp, cpu_R[RRI8_S],
2828 #ifdef TARGET_WORDS_BIGENDIAN
2829 0x80000000 >> (((RRI8_R & 1) << 4) | RRI8_T));
2830 #else
2831 0x00000001 << (((RRI8_R & 1) << 4) | RRI8_T));
2832 #endif
2833 gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
2834 tcg_temp_free(tmp);
2836 break;
2840 break;
2842 #define gen_narrow_load_store(type) do { \
2843 TCGv_i32 addr = tcg_temp_new_i32(); \
2844 gen_window_check2(dc, RRRN_S, RRRN_T); \
2845 tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \
2846 gen_load_store_alignment(dc, 2, addr, false); \
2847 tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \
2848 tcg_temp_free(addr); \
2849 } while (0)
2851 case 8: /*L32I.Nn*/
2852 gen_narrow_load_store(ld32u);
2853 break;
2855 case 9: /*S32I.Nn*/
2856 gen_narrow_load_store(st32);
2857 break;
2858 #undef gen_narrow_load_store
2860 case 10: /*ADD.Nn*/
2861 gen_window_check3(dc, RRRN_R, RRRN_S, RRRN_T);
2862 tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]);
2863 break;
2865 case 11: /*ADDI.Nn*/
2866 gen_window_check2(dc, RRRN_R, RRRN_S);
2867 tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], RRRN_T ? RRRN_T : -1);
2868 break;
2870 case 12: /*ST2n*/
2871 gen_window_check1(dc, RRRN_S);
2872 if (RRRN_T < 8) { /*MOVI.Nn*/
2873 tcg_gen_movi_i32(cpu_R[RRRN_S],
2874 RRRN_R | (RRRN_T << 4) |
2875 ((RRRN_T & 6) == 6 ? 0xffffff80 : 0));
2876 } else { /*BEQZ.Nn*/ /*BNEZ.Nn*/
2877 TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ;
2879 gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0,
2880 4 + (RRRN_R | ((RRRN_T & 3) << 4)));
2882 break;
2884 case 13: /*ST3n*/
2885 switch (RRRN_R) {
2886 case 0: /*MOV.Nn*/
2887 gen_window_check2(dc, RRRN_S, RRRN_T);
2888 tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]);
2889 break;
2891 case 15: /*S3*/
2892 switch (RRRN_T) {
2893 case 0: /*RET.Nn*/
2894 gen_jump(dc, cpu_R[0]);
2895 break;
2897 case 1: /*RETW.Nn*/
2898 HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
2900 TCGv_i32 tmp = tcg_const_i32(dc->pc);
2901 gen_advance_ccount(dc);
2902 gen_helper_retw(tmp, cpu_env, tmp);
2903 gen_jump(dc, tmp);
2904 tcg_temp_free(tmp);
2906 break;
2908 case 2: /*BREAK.Nn*/
2909 HAS_OPTION(XTENSA_OPTION_DEBUG);
2910 if (dc->debug) {
2911 gen_debug_exception(dc, DEBUGCAUSE_BN);
2913 break;
2915 case 3: /*NOP.Nn*/
2916 break;
2918 case 6: /*ILL.Nn*/
2919 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
2920 break;
2922 default: /*reserved*/
2923 RESERVED();
2924 break;
2926 break;
2928 default: /*reserved*/
2929 RESERVED();
2930 break;
2932 break;
2934 default: /*reserved*/
2935 RESERVED();
2936 break;
2939 if (dc->is_jmp == DISAS_NEXT) {
2940 gen_check_loop_end(dc, 0);
2942 dc->pc = dc->next_pc;
2944 return;
2946 invalid_opcode:
2947 qemu_log("INVALID(pc = %08x)\n", dc->pc);
2948 gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
2949 #undef HAS_OPTION
2952 static void check_breakpoint(CPUXtensaState *env, DisasContext *dc)
2954 CPUState *cs = CPU(xtensa_env_get_cpu(env));
2955 CPUBreakpoint *bp;
2957 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
2958 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
2959 if (bp->pc == dc->pc) {
2960 tcg_gen_movi_i32(cpu_pc, dc->pc);
2961 gen_exception(dc, EXCP_DEBUG);
2962 dc->is_jmp = DISAS_UPDATE;
2968 static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc)
2970 unsigned i;
2972 for (i = 0; i < dc->config->nibreak; ++i) {
2973 if ((env->sregs[IBREAKENABLE] & (1 << i)) &&
2974 env->sregs[IBREAKA + i] == dc->pc) {
2975 gen_debug_exception(dc, DEBUGCAUSE_IB);
2976 break;
2981 static inline
2982 void gen_intermediate_code_internal(XtensaCPU *cpu,
2983 TranslationBlock *tb, bool search_pc)
2985 CPUState *cs = CPU(cpu);
2986 CPUXtensaState *env = &cpu->env;
2987 DisasContext dc;
2988 int insn_count = 0;
2989 int j, lj = -1;
2990 uint16_t *gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2991 int max_insns = tb->cflags & CF_COUNT_MASK;
2992 uint32_t pc_start = tb->pc;
2993 uint32_t next_page_start =
2994 (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2996 if (max_insns == 0) {
2997 max_insns = CF_COUNT_MASK;
3000 dc.config = env->config;
3001 dc.singlestep_enabled = cs->singlestep_enabled;
3002 dc.tb = tb;
3003 dc.pc = pc_start;
3004 dc.ring = tb->flags & XTENSA_TBFLAG_RING_MASK;
3005 dc.cring = (tb->flags & XTENSA_TBFLAG_EXCM) ? 0 : dc.ring;
3006 dc.lbeg = env->sregs[LBEG];
3007 dc.lend = env->sregs[LEND];
3008 dc.is_jmp = DISAS_NEXT;
3009 dc.ccount_delta = 0;
3010 dc.debug = tb->flags & XTENSA_TBFLAG_DEBUG;
3011 dc.icount = tb->flags & XTENSA_TBFLAG_ICOUNT;
3012 dc.cpenable = (tb->flags & XTENSA_TBFLAG_CPENABLE_MASK) >>
3013 XTENSA_TBFLAG_CPENABLE_SHIFT;
3015 init_litbase(&dc);
3016 init_sar_tracker(&dc);
3017 reset_used_window(&dc);
3018 if (dc.icount) {
3019 dc.next_icount = tcg_temp_local_new_i32();
3022 gen_tb_start();
3024 if (tb->flags & XTENSA_TBFLAG_EXCEPTION) {
3025 tcg_gen_movi_i32(cpu_pc, dc.pc);
3026 gen_exception(&dc, EXCP_DEBUG);
3029 do {
3030 check_breakpoint(env, &dc);
3032 if (search_pc) {
3033 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3034 if (lj < j) {
3035 lj++;
3036 while (lj < j) {
3037 tcg_ctx.gen_opc_instr_start[lj++] = 0;
3040 tcg_ctx.gen_opc_pc[lj] = dc.pc;
3041 tcg_ctx.gen_opc_instr_start[lj] = 1;
3042 tcg_ctx.gen_opc_icount[lj] = insn_count;
3045 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
3046 tcg_gen_debug_insn_start(dc.pc);
3049 ++dc.ccount_delta;
3051 if (insn_count + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
3052 gen_io_start();
3055 if (dc.icount) {
3056 int label = gen_new_label();
3058 tcg_gen_addi_i32(dc.next_icount, cpu_SR[ICOUNT], 1);
3059 tcg_gen_brcondi_i32(TCG_COND_NE, dc.next_icount, 0, label);
3060 tcg_gen_mov_i32(dc.next_icount, cpu_SR[ICOUNT]);
3061 if (dc.debug) {
3062 gen_debug_exception(&dc, DEBUGCAUSE_IC);
3064 gen_set_label(label);
3067 if (dc.debug) {
3068 gen_ibreak_check(env, &dc);
3071 disas_xtensa_insn(env, &dc);
3072 ++insn_count;
3073 if (dc.icount) {
3074 tcg_gen_mov_i32(cpu_SR[ICOUNT], dc.next_icount);
3076 if (cs->singlestep_enabled) {
3077 tcg_gen_movi_i32(cpu_pc, dc.pc);
3078 gen_exception(&dc, EXCP_DEBUG);
3079 break;
3081 } while (dc.is_jmp == DISAS_NEXT &&
3082 insn_count < max_insns &&
3083 dc.pc < next_page_start &&
3084 tcg_ctx.gen_opc_ptr < gen_opc_end);
3086 reset_litbase(&dc);
3087 reset_sar_tracker(&dc);
3088 if (dc.icount) {
3089 tcg_temp_free(dc.next_icount);
3092 if (tb->cflags & CF_LAST_IO) {
3093 gen_io_end();
3096 if (dc.is_jmp == DISAS_NEXT) {
3097 gen_jumpi(&dc, dc.pc, 0);
3099 gen_tb_end(tb, insn_count);
3100 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
3102 #ifdef DEBUG_DISAS
3103 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3104 qemu_log("----------------\n");
3105 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3106 log_target_disas(env, pc_start, dc.pc - pc_start, 0);
3107 qemu_log("\n");
3109 #endif
3110 if (search_pc) {
3111 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3112 memset(tcg_ctx.gen_opc_instr_start + lj + 1, 0,
3113 (j - lj) * sizeof(tcg_ctx.gen_opc_instr_start[0]));
3114 } else {
3115 tb->size = dc.pc - pc_start;
3116 tb->icount = insn_count;
3120 void gen_intermediate_code(CPUXtensaState *env, TranslationBlock *tb)
3122 gen_intermediate_code_internal(xtensa_env_get_cpu(env), tb, false);
3125 void gen_intermediate_code_pc(CPUXtensaState *env, TranslationBlock *tb)
3127 gen_intermediate_code_internal(xtensa_env_get_cpu(env), tb, true);
3130 void xtensa_cpu_dump_state(CPUState *cs, FILE *f,
3131 fprintf_function cpu_fprintf, int flags)
3133 XtensaCPU *cpu = XTENSA_CPU(cs);
3134 CPUXtensaState *env = &cpu->env;
3135 int i, j;
3137 cpu_fprintf(f, "PC=%08x\n\n", env->pc);
3139 for (i = j = 0; i < 256; ++i) {
3140 if (xtensa_option_bits_enabled(env->config, sregnames[i].opt_bits)) {
3141 cpu_fprintf(f, "%12s=%08x%c", sregnames[i].name, env->sregs[i],
3142 (j++ % 4) == 3 ? '\n' : ' ');
3146 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
3148 for (i = j = 0; i < 256; ++i) {
3149 if (xtensa_option_bits_enabled(env->config, uregnames[i].opt_bits)) {
3150 cpu_fprintf(f, "%s=%08x%c", uregnames[i].name, env->uregs[i],
3151 (j++ % 4) == 3 ? '\n' : ' ');
3155 cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
3157 for (i = 0; i < 16; ++i) {
3158 cpu_fprintf(f, " A%02d=%08x%c", i, env->regs[i],
3159 (i % 4) == 3 ? '\n' : ' ');
3162 cpu_fprintf(f, "\n");
3164 for (i = 0; i < env->config->nareg; ++i) {
3165 cpu_fprintf(f, "AR%02d=%08x%c", i, env->phys_regs[i],
3166 (i % 4) == 3 ? '\n' : ' ');
3169 if (xtensa_option_enabled(env->config, XTENSA_OPTION_FP_COPROCESSOR)) {
3170 cpu_fprintf(f, "\n");
3172 for (i = 0; i < 16; ++i) {
3173 cpu_fprintf(f, "F%02d=%08x (%+10.8e)%c", i,
3174 float32_val(env->fregs[i]),
3175 *(float *)&env->fregs[i], (i % 2) == 1 ? '\n' : ' ');
3180 void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb, int pc_pos)
3182 env->pc = tcg_ctx.gen_opc_pc[pc_pos];