target/hppa: Implement rfi
[qemu.git] / target / hppa / translate.c
blobdf0bb04907e7a0cde6e6bf082f473e44e96ea14f
1 /*
2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
33 /* Since we have a distinction between register size and address size,
34 we need to redefine all of these. */
36 #undef TCGv
37 #undef tcg_temp_new
38 #undef tcg_global_reg_new
39 #undef tcg_global_mem_new
40 #undef tcg_temp_local_new
41 #undef tcg_temp_free
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #define tcg_temp_free_tl tcg_temp_free_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
49 #else
50 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
51 #endif
52 #else
53 #define TCGv_tl TCGv_i32
54 #define tcg_temp_new_tl tcg_temp_new_i32
55 #define tcg_temp_free_tl tcg_temp_free_i32
56 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
57 #endif
59 #if TARGET_REGISTER_BITS == 64
60 #define TCGv_reg TCGv_i64
62 #define tcg_temp_new tcg_temp_new_i64
63 #define tcg_global_reg_new tcg_global_reg_new_i64
64 #define tcg_global_mem_new tcg_global_mem_new_i64
65 #define tcg_temp_local_new tcg_temp_local_new_i64
66 #define tcg_temp_free tcg_temp_free_i64
68 #define tcg_gen_movi_reg tcg_gen_movi_i64
69 #define tcg_gen_mov_reg tcg_gen_mov_i64
70 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
71 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
72 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
73 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
74 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
75 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
76 #define tcg_gen_ld_reg tcg_gen_ld_i64
77 #define tcg_gen_st8_reg tcg_gen_st8_i64
78 #define tcg_gen_st16_reg tcg_gen_st16_i64
79 #define tcg_gen_st32_reg tcg_gen_st32_i64
80 #define tcg_gen_st_reg tcg_gen_st_i64
81 #define tcg_gen_add_reg tcg_gen_add_i64
82 #define tcg_gen_addi_reg tcg_gen_addi_i64
83 #define tcg_gen_sub_reg tcg_gen_sub_i64
84 #define tcg_gen_neg_reg tcg_gen_neg_i64
85 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
86 #define tcg_gen_subi_reg tcg_gen_subi_i64
87 #define tcg_gen_and_reg tcg_gen_and_i64
88 #define tcg_gen_andi_reg tcg_gen_andi_i64
89 #define tcg_gen_or_reg tcg_gen_or_i64
90 #define tcg_gen_ori_reg tcg_gen_ori_i64
91 #define tcg_gen_xor_reg tcg_gen_xor_i64
92 #define tcg_gen_xori_reg tcg_gen_xori_i64
93 #define tcg_gen_not_reg tcg_gen_not_i64
94 #define tcg_gen_shl_reg tcg_gen_shl_i64
95 #define tcg_gen_shli_reg tcg_gen_shli_i64
96 #define tcg_gen_shr_reg tcg_gen_shr_i64
97 #define tcg_gen_shri_reg tcg_gen_shri_i64
98 #define tcg_gen_sar_reg tcg_gen_sar_i64
99 #define tcg_gen_sari_reg tcg_gen_sari_i64
100 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
101 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
102 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
103 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
104 #define tcg_gen_mul_reg tcg_gen_mul_i64
105 #define tcg_gen_muli_reg tcg_gen_muli_i64
106 #define tcg_gen_div_reg tcg_gen_div_i64
107 #define tcg_gen_rem_reg tcg_gen_rem_i64
108 #define tcg_gen_divu_reg tcg_gen_divu_i64
109 #define tcg_gen_remu_reg tcg_gen_remu_i64
110 #define tcg_gen_discard_reg tcg_gen_discard_i64
111 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
112 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
113 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
114 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
115 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
116 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
117 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
118 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
119 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
120 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
121 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
122 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
123 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
124 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
125 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
126 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
127 #define tcg_gen_andc_reg tcg_gen_andc_i64
128 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
129 #define tcg_gen_nand_reg tcg_gen_nand_i64
130 #define tcg_gen_nor_reg tcg_gen_nor_i64
131 #define tcg_gen_orc_reg tcg_gen_orc_i64
132 #define tcg_gen_clz_reg tcg_gen_clz_i64
133 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
134 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
135 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
136 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
137 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
138 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
139 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
140 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
141 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
142 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
143 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
144 #define tcg_gen_extract_reg tcg_gen_extract_i64
145 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
146 #define tcg_const_reg tcg_const_i64
147 #define tcg_const_local_reg tcg_const_local_i64
148 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
149 #define tcg_gen_add2_reg tcg_gen_add2_i64
150 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
151 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
152 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
153 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
154 #if UINTPTR_MAX == UINT32_MAX
155 # define tcg_gen_trunc_reg_ptr(p, r) \
156 tcg_gen_trunc_i64_i32(TCGV_PTR_TO_NAT(p), r)
157 #else
158 # define tcg_gen_trunc_reg_ptr(p, r) \
159 tcg_gen_mov_i64(TCGV_PTR_TO_NAT(p), r)
160 #endif
161 #else
162 #define TCGv_reg TCGv_i32
163 #define tcg_temp_new tcg_temp_new_i32
164 #define tcg_global_reg_new tcg_global_reg_new_i32
165 #define tcg_global_mem_new tcg_global_mem_new_i32
166 #define tcg_temp_local_new tcg_temp_local_new_i32
167 #define tcg_temp_free tcg_temp_free_i32
169 #define tcg_gen_movi_reg tcg_gen_movi_i32
170 #define tcg_gen_mov_reg tcg_gen_mov_i32
171 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
172 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
173 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
174 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
175 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
176 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
177 #define tcg_gen_ld_reg tcg_gen_ld_i32
178 #define tcg_gen_st8_reg tcg_gen_st8_i32
179 #define tcg_gen_st16_reg tcg_gen_st16_i32
180 #define tcg_gen_st32_reg tcg_gen_st32_i32
181 #define tcg_gen_st_reg tcg_gen_st_i32
182 #define tcg_gen_add_reg tcg_gen_add_i32
183 #define tcg_gen_addi_reg tcg_gen_addi_i32
184 #define tcg_gen_sub_reg tcg_gen_sub_i32
185 #define tcg_gen_neg_reg tcg_gen_neg_i32
186 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
187 #define tcg_gen_subi_reg tcg_gen_subi_i32
188 #define tcg_gen_and_reg tcg_gen_and_i32
189 #define tcg_gen_andi_reg tcg_gen_andi_i32
190 #define tcg_gen_or_reg tcg_gen_or_i32
191 #define tcg_gen_ori_reg tcg_gen_ori_i32
192 #define tcg_gen_xor_reg tcg_gen_xor_i32
193 #define tcg_gen_xori_reg tcg_gen_xori_i32
194 #define tcg_gen_not_reg tcg_gen_not_i32
195 #define tcg_gen_shl_reg tcg_gen_shl_i32
196 #define tcg_gen_shli_reg tcg_gen_shli_i32
197 #define tcg_gen_shr_reg tcg_gen_shr_i32
198 #define tcg_gen_shri_reg tcg_gen_shri_i32
199 #define tcg_gen_sar_reg tcg_gen_sar_i32
200 #define tcg_gen_sari_reg tcg_gen_sari_i32
201 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
202 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
203 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
204 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
205 #define tcg_gen_mul_reg tcg_gen_mul_i32
206 #define tcg_gen_muli_reg tcg_gen_muli_i32
207 #define tcg_gen_div_reg tcg_gen_div_i32
208 #define tcg_gen_rem_reg tcg_gen_rem_i32
209 #define tcg_gen_divu_reg tcg_gen_divu_i32
210 #define tcg_gen_remu_reg tcg_gen_remu_i32
211 #define tcg_gen_discard_reg tcg_gen_discard_i32
212 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
213 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
214 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
215 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
216 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
217 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
218 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
219 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
220 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
221 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
222 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
223 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
224 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
225 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
226 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
227 #define tcg_gen_andc_reg tcg_gen_andc_i32
228 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
229 #define tcg_gen_nand_reg tcg_gen_nand_i32
230 #define tcg_gen_nor_reg tcg_gen_nor_i32
231 #define tcg_gen_orc_reg tcg_gen_orc_i32
232 #define tcg_gen_clz_reg tcg_gen_clz_i32
233 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
234 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
235 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
236 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
237 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
238 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
239 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
240 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
241 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
242 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
243 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
244 #define tcg_gen_extract_reg tcg_gen_extract_i32
245 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
246 #define tcg_const_reg tcg_const_i32
247 #define tcg_const_local_reg tcg_const_local_i32
248 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
249 #define tcg_gen_add2_reg tcg_gen_add2_i32
250 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
251 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
252 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
253 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
254 #if UINTPTR_MAX == UINT32_MAX
255 # define tcg_gen_trunc_reg_ptr(p, r) \
256 tcg_gen_mov_i32(TCGV_PTR_TO_NAT(p), r)
257 #else
258 # define tcg_gen_trunc_reg_ptr(p, r) \
259 tcg_gen_extu_i32_i64(TCGV_PTR_TO_NAT(p), r)
260 #endif
261 #endif /* TARGET_REGISTER_BITS */
263 typedef struct DisasCond {
264 TCGCond c;
265 TCGv_reg a0, a1;
266 bool a0_is_n;
267 bool a1_is_0;
268 } DisasCond;
270 typedef struct DisasContext {
271 DisasContextBase base;
272 CPUState *cs;
274 target_ureg iaoq_f;
275 target_ureg iaoq_b;
276 target_ureg iaoq_n;
277 TCGv_reg iaoq_n_var;
279 int ntemps;
280 TCGv_reg temps[8];
282 DisasCond null_cond;
283 TCGLabel *null_lab;
285 int mmu_idx;
286 int privilege;
287 bool psw_n_nonzero;
288 } DisasContext;
290 /* Target-specific return values from translate_one, indicating the
291 state of the TB. Note that DISAS_NEXT indicates that we are not
292 exiting the TB. */
294 /* We are not using a goto_tb (for whatever reason), but have updated
295 the iaq (for whatever reason), so don't do it again on exit. */
296 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
298 /* We are exiting the TB, but have neither emitted a goto_tb, nor
299 updated the iaq for the next instruction to be executed. */
300 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
302 /* Similarly, but we want to return to the main loop immediately
303 to recognize unmasked interrupts. */
304 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
306 typedef struct DisasInsn {
307 uint32_t insn, mask;
308 DisasJumpType (*trans)(DisasContext *ctx, uint32_t insn,
309 const struct DisasInsn *f);
310 union {
311 void (*ttt)(TCGv_reg, TCGv_reg, TCGv_reg);
312 void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
313 void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
314 void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
315 void (*ded)(TCGv_i64, TCGv_env, TCGv_i64);
316 void (*wed)(TCGv_i32, TCGv_env, TCGv_i64);
317 void (*dew)(TCGv_i64, TCGv_env, TCGv_i32);
318 } f;
319 } DisasInsn;
321 /* global register indexes */
322 static TCGv_reg cpu_gr[32];
323 static TCGv_i64 cpu_sr[4];
324 static TCGv_reg cpu_iaoq_f;
325 static TCGv_reg cpu_iaoq_b;
326 static TCGv_reg cpu_sar;
327 static TCGv_reg cpu_psw_n;
328 static TCGv_reg cpu_psw_v;
329 static TCGv_reg cpu_psw_cb;
330 static TCGv_reg cpu_psw_cb_msb;
332 #include "exec/gen-icount.h"
334 void hppa_translate_init(void)
336 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
338 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
339 static const GlobalVar vars[] = {
340 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
341 DEF_VAR(psw_n),
342 DEF_VAR(psw_v),
343 DEF_VAR(psw_cb),
344 DEF_VAR(psw_cb_msb),
345 DEF_VAR(iaoq_f),
346 DEF_VAR(iaoq_b),
349 #undef DEF_VAR
351 /* Use the symbolic register names that match the disassembler. */
352 static const char gr_names[32][4] = {
353 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
354 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
355 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
356 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
358 /* SR[4-7] are not global registers so that we can index them. */
359 static const char sr_names[4][4] = {
360 "sr0", "sr1", "sr2", "sr3"
363 int i;
365 cpu_gr[0] = NULL;
366 for (i = 1; i < 32; i++) {
367 cpu_gr[i] = tcg_global_mem_new(cpu_env,
368 offsetof(CPUHPPAState, gr[i]),
369 gr_names[i]);
371 for (i = 0; i < 4; i++) {
372 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
373 offsetof(CPUHPPAState, sr[i]),
374 sr_names[i]);
377 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
378 const GlobalVar *v = &vars[i];
379 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
383 static DisasCond cond_make_f(void)
385 return (DisasCond){
386 .c = TCG_COND_NEVER,
387 .a0 = NULL,
388 .a1 = NULL,
392 static DisasCond cond_make_n(void)
394 return (DisasCond){
395 .c = TCG_COND_NE,
396 .a0 = cpu_psw_n,
397 .a0_is_n = true,
398 .a1 = NULL,
399 .a1_is_0 = true
403 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
405 DisasCond r = { .c = c, .a1 = NULL, .a1_is_0 = true };
407 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
408 r.a0 = tcg_temp_new();
409 tcg_gen_mov_reg(r.a0, a0);
411 return r;
414 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
416 DisasCond r = { .c = c };
418 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
419 r.a0 = tcg_temp_new();
420 tcg_gen_mov_reg(r.a0, a0);
421 r.a1 = tcg_temp_new();
422 tcg_gen_mov_reg(r.a1, a1);
424 return r;
427 static void cond_prep(DisasCond *cond)
429 if (cond->a1_is_0) {
430 cond->a1_is_0 = false;
431 cond->a1 = tcg_const_reg(0);
435 static void cond_free(DisasCond *cond)
437 switch (cond->c) {
438 default:
439 if (!cond->a0_is_n) {
440 tcg_temp_free(cond->a0);
442 if (!cond->a1_is_0) {
443 tcg_temp_free(cond->a1);
445 cond->a0_is_n = false;
446 cond->a1_is_0 = false;
447 cond->a0 = NULL;
448 cond->a1 = NULL;
449 /* fallthru */
450 case TCG_COND_ALWAYS:
451 cond->c = TCG_COND_NEVER;
452 break;
453 case TCG_COND_NEVER:
454 break;
458 static TCGv_reg get_temp(DisasContext *ctx)
460 unsigned i = ctx->ntemps++;
461 g_assert(i < ARRAY_SIZE(ctx->temps));
462 return ctx->temps[i] = tcg_temp_new();
465 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
467 TCGv_reg t = get_temp(ctx);
468 tcg_gen_movi_reg(t, v);
469 return t;
472 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
474 if (reg == 0) {
475 TCGv_reg t = get_temp(ctx);
476 tcg_gen_movi_reg(t, 0);
477 return t;
478 } else {
479 return cpu_gr[reg];
483 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
485 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
486 return get_temp(ctx);
487 } else {
488 return cpu_gr[reg];
492 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
494 if (ctx->null_cond.c != TCG_COND_NEVER) {
495 cond_prep(&ctx->null_cond);
496 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
497 ctx->null_cond.a1, dest, t);
498 } else {
499 tcg_gen_mov_reg(dest, t);
503 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
505 if (reg != 0) {
506 save_or_nullify(ctx, cpu_gr[reg], t);
510 #ifdef HOST_WORDS_BIGENDIAN
511 # define HI_OFS 0
512 # define LO_OFS 4
513 #else
514 # define HI_OFS 4
515 # define LO_OFS 0
516 #endif
518 static TCGv_i32 load_frw_i32(unsigned rt)
520 TCGv_i32 ret = tcg_temp_new_i32();
521 tcg_gen_ld_i32(ret, cpu_env,
522 offsetof(CPUHPPAState, fr[rt & 31])
523 + (rt & 32 ? LO_OFS : HI_OFS));
524 return ret;
527 static TCGv_i32 load_frw0_i32(unsigned rt)
529 if (rt == 0) {
530 return tcg_const_i32(0);
531 } else {
532 return load_frw_i32(rt);
536 static TCGv_i64 load_frw0_i64(unsigned rt)
538 if (rt == 0) {
539 return tcg_const_i64(0);
540 } else {
541 TCGv_i64 ret = tcg_temp_new_i64();
542 tcg_gen_ld32u_i64(ret, cpu_env,
543 offsetof(CPUHPPAState, fr[rt & 31])
544 + (rt & 32 ? LO_OFS : HI_OFS));
545 return ret;
549 static void save_frw_i32(unsigned rt, TCGv_i32 val)
551 tcg_gen_st_i32(val, cpu_env,
552 offsetof(CPUHPPAState, fr[rt & 31])
553 + (rt & 32 ? LO_OFS : HI_OFS));
556 #undef HI_OFS
557 #undef LO_OFS
559 static TCGv_i64 load_frd(unsigned rt)
561 TCGv_i64 ret = tcg_temp_new_i64();
562 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
563 return ret;
566 static TCGv_i64 load_frd0(unsigned rt)
568 if (rt == 0) {
569 return tcg_const_i64(0);
570 } else {
571 return load_frd(rt);
575 static void save_frd(unsigned rt, TCGv_i64 val)
577 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
580 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
582 #ifdef CONFIG_USER_ONLY
583 tcg_gen_movi_i64(dest, 0);
584 #else
585 if (reg < 4) {
586 tcg_gen_mov_i64(dest, cpu_sr[reg]);
587 } else {
588 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
590 #endif
593 /* Skip over the implementation of an insn that has been nullified.
594 Use this when the insn is too complex for a conditional move. */
595 static void nullify_over(DisasContext *ctx)
597 if (ctx->null_cond.c != TCG_COND_NEVER) {
598 /* The always condition should have been handled in the main loop. */
599 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
601 ctx->null_lab = gen_new_label();
602 cond_prep(&ctx->null_cond);
604 /* If we're using PSW[N], copy it to a temp because... */
605 if (ctx->null_cond.a0_is_n) {
606 ctx->null_cond.a0_is_n = false;
607 ctx->null_cond.a0 = tcg_temp_new();
608 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
610 /* ... we clear it before branching over the implementation,
611 so that (1) it's clear after nullifying this insn and
612 (2) if this insn nullifies the next, PSW[N] is valid. */
613 if (ctx->psw_n_nonzero) {
614 ctx->psw_n_nonzero = false;
615 tcg_gen_movi_reg(cpu_psw_n, 0);
618 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
619 ctx->null_cond.a1, ctx->null_lab);
620 cond_free(&ctx->null_cond);
624 /* Save the current nullification state to PSW[N]. */
625 static void nullify_save(DisasContext *ctx)
627 if (ctx->null_cond.c == TCG_COND_NEVER) {
628 if (ctx->psw_n_nonzero) {
629 tcg_gen_movi_reg(cpu_psw_n, 0);
631 return;
633 if (!ctx->null_cond.a0_is_n) {
634 cond_prep(&ctx->null_cond);
635 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
636 ctx->null_cond.a0, ctx->null_cond.a1);
637 ctx->psw_n_nonzero = true;
639 cond_free(&ctx->null_cond);
642 /* Set a PSW[N] to X. The intention is that this is used immediately
643 before a goto_tb/exit_tb, so that there is no fallthru path to other
644 code within the TB. Therefore we do not update psw_n_nonzero. */
645 static void nullify_set(DisasContext *ctx, bool x)
647 if (ctx->psw_n_nonzero || x) {
648 tcg_gen_movi_reg(cpu_psw_n, x);
652 /* Mark the end of an instruction that may have been nullified.
653 This is the pair to nullify_over. */
654 static DisasJumpType nullify_end(DisasContext *ctx, DisasJumpType status)
656 TCGLabel *null_lab = ctx->null_lab;
658 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
659 For UPDATED, we cannot update on the nullified path. */
660 assert(status != DISAS_IAQ_N_UPDATED);
662 if (likely(null_lab == NULL)) {
663 /* The current insn wasn't conditional or handled the condition
664 applied to it without a branch, so the (new) setting of
665 NULL_COND can be applied directly to the next insn. */
666 return status;
668 ctx->null_lab = NULL;
670 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
671 /* The next instruction will be unconditional,
672 and NULL_COND already reflects that. */
673 gen_set_label(null_lab);
674 } else {
675 /* The insn that we just executed is itself nullifying the next
676 instruction. Store the condition in the PSW[N] global.
677 We asserted PSW[N] = 0 in nullify_over, so that after the
678 label we have the proper value in place. */
679 nullify_save(ctx);
680 gen_set_label(null_lab);
681 ctx->null_cond = cond_make_n();
683 if (status == DISAS_NORETURN) {
684 status = DISAS_NEXT;
686 return status;
689 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
691 if (unlikely(ival == -1)) {
692 tcg_gen_mov_reg(dest, vval);
693 } else {
694 tcg_gen_movi_reg(dest, ival);
698 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
700 return ctx->iaoq_f + disp + 8;
703 static void gen_excp_1(int exception)
705 TCGv_i32 t = tcg_const_i32(exception);
706 gen_helper_excp(cpu_env, t);
707 tcg_temp_free_i32(t);
710 static DisasJumpType gen_excp(DisasContext *ctx, int exception)
712 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
713 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
714 nullify_save(ctx);
715 gen_excp_1(exception);
716 return DISAS_NORETURN;
719 static DisasJumpType gen_illegal(DisasContext *ctx)
721 nullify_over(ctx);
722 return nullify_end(ctx, gen_excp(ctx, EXCP_ILL));
725 #define CHECK_MOST_PRIVILEGED(EXCP) \
726 do { \
727 if (ctx->privilege != 0) { \
728 nullify_over(ctx); \
729 return nullify_end(ctx, gen_excp(ctx, EXCP)); \
731 } while (0)
733 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
735 /* Suppress goto_tb in the case of single-steping and IO. */
736 if ((tb_cflags(ctx->base.tb) & CF_LAST_IO) || ctx->base.singlestep_enabled) {
737 return false;
739 return true;
742 /* If the next insn is to be nullified, and it's on the same page,
743 and we're not attempting to set a breakpoint on it, then we can
744 totally skip the nullified insn. This avoids creating and
745 executing a TB that merely branches to the next TB. */
746 static bool use_nullify_skip(DisasContext *ctx)
748 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
749 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
752 static void gen_goto_tb(DisasContext *ctx, int which,
753 target_ureg f, target_ureg b)
755 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
756 tcg_gen_goto_tb(which);
757 tcg_gen_movi_reg(cpu_iaoq_f, f);
758 tcg_gen_movi_reg(cpu_iaoq_b, b);
759 tcg_gen_exit_tb((uintptr_t)ctx->base.tb + which);
760 } else {
761 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
762 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
763 if (ctx->base.singlestep_enabled) {
764 gen_excp_1(EXCP_DEBUG);
765 } else {
766 tcg_gen_lookup_and_goto_ptr();
771 /* PA has a habit of taking the LSB of a field and using that as the sign,
772 with the rest of the field becoming the least significant bits. */
773 static target_sreg low_sextract(uint32_t val, int pos, int len)
775 target_ureg x = -(target_ureg)extract32(val, pos, 1);
776 x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
777 return x;
780 static unsigned assemble_rt64(uint32_t insn)
782 unsigned r1 = extract32(insn, 6, 1);
783 unsigned r0 = extract32(insn, 0, 5);
784 return r1 * 32 + r0;
787 static unsigned assemble_ra64(uint32_t insn)
789 unsigned r1 = extract32(insn, 7, 1);
790 unsigned r0 = extract32(insn, 21, 5);
791 return r1 * 32 + r0;
794 static unsigned assemble_rb64(uint32_t insn)
796 unsigned r1 = extract32(insn, 12, 1);
797 unsigned r0 = extract32(insn, 16, 5);
798 return r1 * 32 + r0;
801 static unsigned assemble_rc64(uint32_t insn)
803 unsigned r2 = extract32(insn, 8, 1);
804 unsigned r1 = extract32(insn, 13, 3);
805 unsigned r0 = extract32(insn, 9, 2);
806 return r2 * 32 + r1 * 4 + r0;
809 static unsigned assemble_sr3(uint32_t insn)
811 unsigned s2 = extract32(insn, 13, 1);
812 unsigned s0 = extract32(insn, 14, 2);
813 return s2 * 4 + s0;
816 static target_sreg assemble_12(uint32_t insn)
818 target_ureg x = -(target_ureg)(insn & 1);
819 x = (x << 1) | extract32(insn, 2, 1);
820 x = (x << 10) | extract32(insn, 3, 10);
821 return x;
824 static target_sreg assemble_16(uint32_t insn)
826 /* Take the name from PA2.0, which produces a 16-bit number
827 only with wide mode; otherwise a 14-bit number. Since we don't
828 implement wide mode, this is always the 14-bit number. */
829 return low_sextract(insn, 0, 14);
832 static target_sreg assemble_16a(uint32_t insn)
834 /* Take the name from PA2.0, which produces a 14-bit shifted number
835 only with wide mode; otherwise a 12-bit shifted number. Since we
836 don't implement wide mode, this is always the 12-bit number. */
837 target_ureg x = -(target_ureg)(insn & 1);
838 x = (x << 11) | extract32(insn, 2, 11);
839 return x << 2;
842 static target_sreg assemble_17(uint32_t insn)
844 target_ureg x = -(target_ureg)(insn & 1);
845 x = (x << 5) | extract32(insn, 16, 5);
846 x = (x << 1) | extract32(insn, 2, 1);
847 x = (x << 10) | extract32(insn, 3, 10);
848 return x << 2;
851 static target_sreg assemble_21(uint32_t insn)
853 target_ureg x = -(target_ureg)(insn & 1);
854 x = (x << 11) | extract32(insn, 1, 11);
855 x = (x << 2) | extract32(insn, 14, 2);
856 x = (x << 5) | extract32(insn, 16, 5);
857 x = (x << 2) | extract32(insn, 12, 2);
858 return x << 11;
861 static target_sreg assemble_22(uint32_t insn)
863 target_ureg x = -(target_ureg)(insn & 1);
864 x = (x << 10) | extract32(insn, 16, 10);
865 x = (x << 1) | extract32(insn, 2, 1);
866 x = (x << 10) | extract32(insn, 3, 10);
867 return x << 2;
870 /* The parisc documentation describes only the general interpretation of
871 the conditions, without describing their exact implementation. The
872 interpretations do not stand up well when considering ADD,C and SUB,B.
873 However, considering the Addition, Subtraction and Logical conditions
874 as a whole it would appear that these relations are similar to what
875 a traditional NZCV set of flags would produce. */
877 static DisasCond do_cond(unsigned cf, TCGv_reg res,
878 TCGv_reg cb_msb, TCGv_reg sv)
880 DisasCond cond;
881 TCGv_reg tmp;
883 switch (cf >> 1) {
884 case 0: /* Never / TR */
885 cond = cond_make_f();
886 break;
887 case 1: /* = / <> (Z / !Z) */
888 cond = cond_make_0(TCG_COND_EQ, res);
889 break;
890 case 2: /* < / >= (N / !N) */
891 cond = cond_make_0(TCG_COND_LT, res);
892 break;
893 case 3: /* <= / > (N | Z / !N & !Z) */
894 cond = cond_make_0(TCG_COND_LE, res);
895 break;
896 case 4: /* NUV / UV (!C / C) */
897 cond = cond_make_0(TCG_COND_EQ, cb_msb);
898 break;
899 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
900 tmp = tcg_temp_new();
901 tcg_gen_neg_reg(tmp, cb_msb);
902 tcg_gen_and_reg(tmp, tmp, res);
903 cond = cond_make_0(TCG_COND_EQ, tmp);
904 tcg_temp_free(tmp);
905 break;
906 case 6: /* SV / NSV (V / !V) */
907 cond = cond_make_0(TCG_COND_LT, sv);
908 break;
909 case 7: /* OD / EV */
910 tmp = tcg_temp_new();
911 tcg_gen_andi_reg(tmp, res, 1);
912 cond = cond_make_0(TCG_COND_NE, tmp);
913 tcg_temp_free(tmp);
914 break;
915 default:
916 g_assert_not_reached();
918 if (cf & 1) {
919 cond.c = tcg_invert_cond(cond.c);
922 return cond;
925 /* Similar, but for the special case of subtraction without borrow, we
926 can use the inputs directly. This can allow other computation to be
927 deleted as unused. */
929 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
930 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
932 DisasCond cond;
934 switch (cf >> 1) {
935 case 1: /* = / <> */
936 cond = cond_make(TCG_COND_EQ, in1, in2);
937 break;
938 case 2: /* < / >= */
939 cond = cond_make(TCG_COND_LT, in1, in2);
940 break;
941 case 3: /* <= / > */
942 cond = cond_make(TCG_COND_LE, in1, in2);
943 break;
944 case 4: /* << / >>= */
945 cond = cond_make(TCG_COND_LTU, in1, in2);
946 break;
947 case 5: /* <<= / >> */
948 cond = cond_make(TCG_COND_LEU, in1, in2);
949 break;
950 default:
951 return do_cond(cf, res, sv, sv);
953 if (cf & 1) {
954 cond.c = tcg_invert_cond(cond.c);
957 return cond;
960 /* Similar, but for logicals, where the carry and overflow bits are not
961 computed, and use of them is undefined. */
963 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
965 switch (cf >> 1) {
966 case 4: case 5: case 6:
967 cf &= 1;
968 break;
970 return do_cond(cf, res, res, res);
973 /* Similar, but for shift/extract/deposit conditions. */
975 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
977 unsigned c, f;
979 /* Convert the compressed condition codes to standard.
980 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
981 4-7 are the reverse of 0-3. */
982 c = orig & 3;
983 if (c == 3) {
984 c = 7;
986 f = (orig & 4) / 4;
988 return do_log_cond(c * 2 + f, res);
991 /* Similar, but for unit conditions. */
993 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
994 TCGv_reg in1, TCGv_reg in2)
996 DisasCond cond;
997 TCGv_reg tmp, cb = NULL;
999 if (cf & 8) {
1000 /* Since we want to test lots of carry-out bits all at once, do not
1001 * do our normal thing and compute carry-in of bit B+1 since that
1002 * leaves us with carry bits spread across two words.
1004 cb = tcg_temp_new();
1005 tmp = tcg_temp_new();
1006 tcg_gen_or_reg(cb, in1, in2);
1007 tcg_gen_and_reg(tmp, in1, in2);
1008 tcg_gen_andc_reg(cb, cb, res);
1009 tcg_gen_or_reg(cb, cb, tmp);
1010 tcg_temp_free(tmp);
1013 switch (cf >> 1) {
1014 case 0: /* never / TR */
1015 case 1: /* undefined */
1016 case 5: /* undefined */
1017 cond = cond_make_f();
1018 break;
1020 case 2: /* SBZ / NBZ */
1021 /* See hasless(v,1) from
1022 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1024 tmp = tcg_temp_new();
1025 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1026 tcg_gen_andc_reg(tmp, tmp, res);
1027 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1028 cond = cond_make_0(TCG_COND_NE, tmp);
1029 tcg_temp_free(tmp);
1030 break;
1032 case 3: /* SHZ / NHZ */
1033 tmp = tcg_temp_new();
1034 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1035 tcg_gen_andc_reg(tmp, tmp, res);
1036 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1037 cond = cond_make_0(TCG_COND_NE, tmp);
1038 tcg_temp_free(tmp);
1039 break;
1041 case 4: /* SDC / NDC */
1042 tcg_gen_andi_reg(cb, cb, 0x88888888u);
1043 cond = cond_make_0(TCG_COND_NE, cb);
1044 break;
1046 case 6: /* SBC / NBC */
1047 tcg_gen_andi_reg(cb, cb, 0x80808080u);
1048 cond = cond_make_0(TCG_COND_NE, cb);
1049 break;
1051 case 7: /* SHC / NHC */
1052 tcg_gen_andi_reg(cb, cb, 0x80008000u);
1053 cond = cond_make_0(TCG_COND_NE, cb);
1054 break;
1056 default:
1057 g_assert_not_reached();
1059 if (cf & 8) {
1060 tcg_temp_free(cb);
1062 if (cf & 1) {
1063 cond.c = tcg_invert_cond(cond.c);
1066 return cond;
1069 /* Compute signed overflow for addition. */
1070 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1071 TCGv_reg in1, TCGv_reg in2)
1073 TCGv_reg sv = get_temp(ctx);
1074 TCGv_reg tmp = tcg_temp_new();
1076 tcg_gen_xor_reg(sv, res, in1);
1077 tcg_gen_xor_reg(tmp, in1, in2);
1078 tcg_gen_andc_reg(sv, sv, tmp);
1079 tcg_temp_free(tmp);
1081 return sv;
1084 /* Compute signed overflow for subtraction. */
1085 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1086 TCGv_reg in1, TCGv_reg in2)
1088 TCGv_reg sv = get_temp(ctx);
1089 TCGv_reg tmp = tcg_temp_new();
1091 tcg_gen_xor_reg(sv, res, in1);
1092 tcg_gen_xor_reg(tmp, in1, in2);
1093 tcg_gen_and_reg(sv, sv, tmp);
1094 tcg_temp_free(tmp);
1096 return sv;
1099 static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1100 TCGv_reg in2, unsigned shift, bool is_l,
1101 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1103 TCGv_reg dest, cb, cb_msb, sv, tmp;
1104 unsigned c = cf >> 1;
1105 DisasCond cond;
1107 dest = tcg_temp_new();
1108 cb = NULL;
1109 cb_msb = NULL;
1111 if (shift) {
1112 tmp = get_temp(ctx);
1113 tcg_gen_shli_reg(tmp, in1, shift);
1114 in1 = tmp;
1117 if (!is_l || c == 4 || c == 5) {
1118 TCGv_reg zero = tcg_const_reg(0);
1119 cb_msb = get_temp(ctx);
1120 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1121 if (is_c) {
1122 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1124 tcg_temp_free(zero);
1125 if (!is_l) {
1126 cb = get_temp(ctx);
1127 tcg_gen_xor_reg(cb, in1, in2);
1128 tcg_gen_xor_reg(cb, cb, dest);
1130 } else {
1131 tcg_gen_add_reg(dest, in1, in2);
1132 if (is_c) {
1133 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1137 /* Compute signed overflow if required. */
1138 sv = NULL;
1139 if (is_tsv || c == 6) {
1140 sv = do_add_sv(ctx, dest, in1, in2);
1141 if (is_tsv) {
1142 /* ??? Need to include overflow from shift. */
1143 gen_helper_tsv(cpu_env, sv);
1147 /* Emit any conditional trap before any writeback. */
1148 cond = do_cond(cf, dest, cb_msb, sv);
1149 if (is_tc) {
1150 cond_prep(&cond);
1151 tmp = tcg_temp_new();
1152 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1153 gen_helper_tcond(cpu_env, tmp);
1154 tcg_temp_free(tmp);
1157 /* Write back the result. */
1158 if (!is_l) {
1159 save_or_nullify(ctx, cpu_psw_cb, cb);
1160 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1162 save_gpr(ctx, rt, dest);
1163 tcg_temp_free(dest);
1165 /* Install the new nullification. */
1166 cond_free(&ctx->null_cond);
1167 ctx->null_cond = cond;
1168 return DISAS_NEXT;
1171 static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1172 TCGv_reg in2, bool is_tsv, bool is_b,
1173 bool is_tc, unsigned cf)
1175 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1176 unsigned c = cf >> 1;
1177 DisasCond cond;
1179 dest = tcg_temp_new();
1180 cb = tcg_temp_new();
1181 cb_msb = tcg_temp_new();
1183 zero = tcg_const_reg(0);
1184 if (is_b) {
1185 /* DEST,C = IN1 + ~IN2 + C. */
1186 tcg_gen_not_reg(cb, in2);
1187 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1188 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1189 tcg_gen_xor_reg(cb, cb, in1);
1190 tcg_gen_xor_reg(cb, cb, dest);
1191 } else {
1192 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1193 operations by seeding the high word with 1 and subtracting. */
1194 tcg_gen_movi_reg(cb_msb, 1);
1195 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1196 tcg_gen_eqv_reg(cb, in1, in2);
1197 tcg_gen_xor_reg(cb, cb, dest);
1199 tcg_temp_free(zero);
1201 /* Compute signed overflow if required. */
1202 sv = NULL;
1203 if (is_tsv || c == 6) {
1204 sv = do_sub_sv(ctx, dest, in1, in2);
1205 if (is_tsv) {
1206 gen_helper_tsv(cpu_env, sv);
1210 /* Compute the condition. We cannot use the special case for borrow. */
1211 if (!is_b) {
1212 cond = do_sub_cond(cf, dest, in1, in2, sv);
1213 } else {
1214 cond = do_cond(cf, dest, cb_msb, sv);
1217 /* Emit any conditional trap before any writeback. */
1218 if (is_tc) {
1219 cond_prep(&cond);
1220 tmp = tcg_temp_new();
1221 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1222 gen_helper_tcond(cpu_env, tmp);
1223 tcg_temp_free(tmp);
1226 /* Write back the result. */
1227 save_or_nullify(ctx, cpu_psw_cb, cb);
1228 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1229 save_gpr(ctx, rt, dest);
1230 tcg_temp_free(dest);
1232 /* Install the new nullification. */
1233 cond_free(&ctx->null_cond);
1234 ctx->null_cond = cond;
1235 return DISAS_NEXT;
1238 static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1239 TCGv_reg in2, unsigned cf)
1241 TCGv_reg dest, sv;
1242 DisasCond cond;
1244 dest = tcg_temp_new();
1245 tcg_gen_sub_reg(dest, in1, in2);
1247 /* Compute signed overflow if required. */
1248 sv = NULL;
1249 if ((cf >> 1) == 6) {
1250 sv = do_sub_sv(ctx, dest, in1, in2);
1253 /* Form the condition for the compare. */
1254 cond = do_sub_cond(cf, dest, in1, in2, sv);
1256 /* Clear. */
1257 tcg_gen_movi_reg(dest, 0);
1258 save_gpr(ctx, rt, dest);
1259 tcg_temp_free(dest);
1261 /* Install the new nullification. */
1262 cond_free(&ctx->null_cond);
1263 ctx->null_cond = cond;
1264 return DISAS_NEXT;
1267 static DisasJumpType do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1268 TCGv_reg in2, unsigned cf,
1269 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1271 TCGv_reg dest = dest_gpr(ctx, rt);
1273 /* Perform the operation, and writeback. */
1274 fn(dest, in1, in2);
1275 save_gpr(ctx, rt, dest);
1277 /* Install the new nullification. */
1278 cond_free(&ctx->null_cond);
1279 if (cf) {
1280 ctx->null_cond = do_log_cond(cf, dest);
1282 return DISAS_NEXT;
1285 static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1286 TCGv_reg in2, unsigned cf, bool is_tc,
1287 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1289 TCGv_reg dest;
1290 DisasCond cond;
1292 if (cf == 0) {
1293 dest = dest_gpr(ctx, rt);
1294 fn(dest, in1, in2);
1295 save_gpr(ctx, rt, dest);
1296 cond_free(&ctx->null_cond);
1297 } else {
1298 dest = tcg_temp_new();
1299 fn(dest, in1, in2);
1301 cond = do_unit_cond(cf, dest, in1, in2);
1303 if (is_tc) {
1304 TCGv_reg tmp = tcg_temp_new();
1305 cond_prep(&cond);
1306 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1307 gen_helper_tcond(cpu_env, tmp);
1308 tcg_temp_free(tmp);
1310 save_gpr(ctx, rt, dest);
1312 cond_free(&ctx->null_cond);
1313 ctx->null_cond = cond;
1315 return DISAS_NEXT;
1318 /* Emit a memory load. The modify parameter should be
1319 * < 0 for pre-modify,
1320 * > 0 for post-modify,
1321 * = 0 for no base register update.
1323 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1324 unsigned rx, int scale, target_sreg disp,
1325 int modify, TCGMemOp mop)
1327 TCGv_reg addr, base;
1329 /* Caller uses nullify_over/nullify_end. */
1330 assert(ctx->null_cond.c == TCG_COND_NEVER);
1332 addr = tcg_temp_new();
1333 base = load_gpr(ctx, rb);
1335 /* Note that RX is mutually exclusive with DISP. */
1336 if (rx) {
1337 tcg_gen_shli_reg(addr, cpu_gr[rx], scale);
1338 tcg_gen_add_reg(addr, addr, base);
1339 } else {
1340 tcg_gen_addi_reg(addr, base, disp);
1343 if (modify == 0) {
1344 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop);
1345 } else {
1346 tcg_gen_qemu_ld_i32(dest, (modify < 0 ? addr : base),
1347 ctx->mmu_idx, mop);
1348 save_gpr(ctx, rb, addr);
1350 tcg_temp_free(addr);
1353 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1354 unsigned rx, int scale, target_sreg disp,
1355 int modify, TCGMemOp mop)
1357 TCGv_reg addr, base;
1359 /* Caller uses nullify_over/nullify_end. */
1360 assert(ctx->null_cond.c == TCG_COND_NEVER);
1362 addr = tcg_temp_new();
1363 base = load_gpr(ctx, rb);
1365 /* Note that RX is mutually exclusive with DISP. */
1366 if (rx) {
1367 tcg_gen_shli_reg(addr, cpu_gr[rx], scale);
1368 tcg_gen_add_reg(addr, addr, base);
1369 } else {
1370 tcg_gen_addi_reg(addr, base, disp);
1373 if (modify == 0) {
1374 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1375 } else {
1376 tcg_gen_qemu_ld_i64(dest, (modify < 0 ? addr : base),
1377 ctx->mmu_idx, mop);
1378 save_gpr(ctx, rb, addr);
1380 tcg_temp_free(addr);
1383 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1384 unsigned rx, int scale, target_sreg disp,
1385 int modify, TCGMemOp mop)
1387 TCGv_reg addr, base;
1389 /* Caller uses nullify_over/nullify_end. */
1390 assert(ctx->null_cond.c == TCG_COND_NEVER);
1392 addr = tcg_temp_new();
1393 base = load_gpr(ctx, rb);
1395 /* Note that RX is mutually exclusive with DISP. */
1396 if (rx) {
1397 tcg_gen_shli_reg(addr, cpu_gr[rx], scale);
1398 tcg_gen_add_reg(addr, addr, base);
1399 } else {
1400 tcg_gen_addi_reg(addr, base, disp);
1403 tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), ctx->mmu_idx, mop);
1405 if (modify != 0) {
1406 save_gpr(ctx, rb, addr);
1408 tcg_temp_free(addr);
1411 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1412 unsigned rx, int scale, target_sreg disp,
1413 int modify, TCGMemOp mop)
1415 TCGv_reg addr, base;
1417 /* Caller uses nullify_over/nullify_end. */
1418 assert(ctx->null_cond.c == TCG_COND_NEVER);
1420 addr = tcg_temp_new();
1421 base = load_gpr(ctx, rb);
1423 /* Note that RX is mutually exclusive with DISP. */
1424 if (rx) {
1425 tcg_gen_shli_reg(addr, cpu_gr[rx], scale);
1426 tcg_gen_add_reg(addr, addr, base);
1427 } else {
1428 tcg_gen_addi_reg(addr, base, disp);
1431 tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), ctx->mmu_idx, mop);
1433 if (modify != 0) {
1434 save_gpr(ctx, rb, addr);
1436 tcg_temp_free(addr);
1439 #if TARGET_REGISTER_BITS == 64
1440 #define do_load_reg do_load_64
1441 #define do_store_reg do_store_64
1442 #else
1443 #define do_load_reg do_load_32
1444 #define do_store_reg do_store_32
1445 #endif
1447 static DisasJumpType do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1448 unsigned rx, int scale, target_sreg disp,
1449 int modify, TCGMemOp mop)
1451 TCGv_reg dest;
1453 nullify_over(ctx);
1455 if (modify == 0) {
1456 /* No base register update. */
1457 dest = dest_gpr(ctx, rt);
1458 } else {
1459 /* Make sure if RT == RB, we see the result of the load. */
1460 dest = get_temp(ctx);
1462 do_load_reg(ctx, dest, rb, rx, scale, disp, modify, mop);
1463 save_gpr(ctx, rt, dest);
1465 return nullify_end(ctx, DISAS_NEXT);
1468 static DisasJumpType do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1469 unsigned rx, int scale, target_sreg disp,
1470 int modify)
1472 TCGv_i32 tmp;
1474 nullify_over(ctx);
1476 tmp = tcg_temp_new_i32();
1477 do_load_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1478 save_frw_i32(rt, tmp);
1479 tcg_temp_free_i32(tmp);
1481 if (rt == 0) {
1482 gen_helper_loaded_fr0(cpu_env);
1485 return nullify_end(ctx, DISAS_NEXT);
1488 static DisasJumpType do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1489 unsigned rx, int scale, target_sreg disp,
1490 int modify)
1492 TCGv_i64 tmp;
1494 nullify_over(ctx);
1496 tmp = tcg_temp_new_i64();
1497 do_load_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1498 save_frd(rt, tmp);
1499 tcg_temp_free_i64(tmp);
1501 if (rt == 0) {
1502 gen_helper_loaded_fr0(cpu_env);
1505 return nullify_end(ctx, DISAS_NEXT);
1508 static DisasJumpType do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1509 target_sreg disp, int modify, TCGMemOp mop)
1511 nullify_over(ctx);
1512 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop);
1513 return nullify_end(ctx, DISAS_NEXT);
1516 static DisasJumpType do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1517 unsigned rx, int scale, target_sreg disp,
1518 int modify)
1520 TCGv_i32 tmp;
1522 nullify_over(ctx);
1524 tmp = load_frw_i32(rt);
1525 do_store_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
1526 tcg_temp_free_i32(tmp);
1528 return nullify_end(ctx, DISAS_NEXT);
1531 static DisasJumpType do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1532 unsigned rx, int scale, target_sreg disp,
1533 int modify)
1535 TCGv_i64 tmp;
1537 nullify_over(ctx);
1539 tmp = load_frd(rt);
1540 do_store_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
1541 tcg_temp_free_i64(tmp);
1543 return nullify_end(ctx, DISAS_NEXT);
1546 static DisasJumpType do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1547 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1549 TCGv_i32 tmp;
1551 nullify_over(ctx);
1552 tmp = load_frw0_i32(ra);
1554 func(tmp, cpu_env, tmp);
1556 save_frw_i32(rt, tmp);
1557 tcg_temp_free_i32(tmp);
1558 return nullify_end(ctx, DISAS_NEXT);
1561 static DisasJumpType do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1562 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1564 TCGv_i32 dst;
1565 TCGv_i64 src;
1567 nullify_over(ctx);
1568 src = load_frd(ra);
1569 dst = tcg_temp_new_i32();
1571 func(dst, cpu_env, src);
1573 tcg_temp_free_i64(src);
1574 save_frw_i32(rt, dst);
1575 tcg_temp_free_i32(dst);
1576 return nullify_end(ctx, DISAS_NEXT);
1579 static DisasJumpType do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1580 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1582 TCGv_i64 tmp;
1584 nullify_over(ctx);
1585 tmp = load_frd0(ra);
1587 func(tmp, cpu_env, tmp);
1589 save_frd(rt, tmp);
1590 tcg_temp_free_i64(tmp);
1591 return nullify_end(ctx, DISAS_NEXT);
1594 static DisasJumpType do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1595 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1597 TCGv_i32 src;
1598 TCGv_i64 dst;
1600 nullify_over(ctx);
1601 src = load_frw0_i32(ra);
1602 dst = tcg_temp_new_i64();
1604 func(dst, cpu_env, src);
1606 tcg_temp_free_i32(src);
1607 save_frd(rt, dst);
1608 tcg_temp_free_i64(dst);
1609 return nullify_end(ctx, DISAS_NEXT);
1612 static DisasJumpType do_fop_weww(DisasContext *ctx, unsigned rt,
1613 unsigned ra, unsigned rb,
1614 void (*func)(TCGv_i32, TCGv_env,
1615 TCGv_i32, TCGv_i32))
1617 TCGv_i32 a, b;
1619 nullify_over(ctx);
1620 a = load_frw0_i32(ra);
1621 b = load_frw0_i32(rb);
1623 func(a, cpu_env, a, b);
1625 tcg_temp_free_i32(b);
1626 save_frw_i32(rt, a);
1627 tcg_temp_free_i32(a);
1628 return nullify_end(ctx, DISAS_NEXT);
1631 static DisasJumpType do_fop_dedd(DisasContext *ctx, unsigned rt,
1632 unsigned ra, unsigned rb,
1633 void (*func)(TCGv_i64, TCGv_env,
1634 TCGv_i64, TCGv_i64))
1636 TCGv_i64 a, b;
1638 nullify_over(ctx);
1639 a = load_frd0(ra);
1640 b = load_frd0(rb);
1642 func(a, cpu_env, a, b);
1644 tcg_temp_free_i64(b);
1645 save_frd(rt, a);
1646 tcg_temp_free_i64(a);
1647 return nullify_end(ctx, DISAS_NEXT);
1650 /* Emit an unconditional branch to a direct target, which may or may not
1651 have already had nullification handled. */
1652 static DisasJumpType do_dbranch(DisasContext *ctx, target_ureg dest,
1653 unsigned link, bool is_n)
1655 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1656 if (link != 0) {
1657 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1659 ctx->iaoq_n = dest;
1660 if (is_n) {
1661 ctx->null_cond.c = TCG_COND_ALWAYS;
1663 return DISAS_NEXT;
1664 } else {
1665 nullify_over(ctx);
1667 if (link != 0) {
1668 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1671 if (is_n && use_nullify_skip(ctx)) {
1672 nullify_set(ctx, 0);
1673 gen_goto_tb(ctx, 0, dest, dest + 4);
1674 } else {
1675 nullify_set(ctx, is_n);
1676 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1679 nullify_end(ctx, DISAS_NEXT);
1681 nullify_set(ctx, 0);
1682 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1683 return DISAS_NORETURN;
1687 /* Emit a conditional branch to a direct target. If the branch itself
1688 is nullified, we should have already used nullify_over. */
1689 static DisasJumpType do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1690 DisasCond *cond)
1692 target_ureg dest = iaoq_dest(ctx, disp);
1693 TCGLabel *taken = NULL;
1694 TCGCond c = cond->c;
1695 bool n;
1697 assert(ctx->null_cond.c == TCG_COND_NEVER);
1699 /* Handle TRUE and NEVER as direct branches. */
1700 if (c == TCG_COND_ALWAYS) {
1701 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1703 if (c == TCG_COND_NEVER) {
1704 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1707 taken = gen_new_label();
1708 cond_prep(cond);
1709 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1710 cond_free(cond);
1712 /* Not taken: Condition not satisfied; nullify on backward branches. */
1713 n = is_n && disp < 0;
1714 if (n && use_nullify_skip(ctx)) {
1715 nullify_set(ctx, 0);
1716 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1717 } else {
1718 if (!n && ctx->null_lab) {
1719 gen_set_label(ctx->null_lab);
1720 ctx->null_lab = NULL;
1722 nullify_set(ctx, n);
1723 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1726 gen_set_label(taken);
1728 /* Taken: Condition satisfied; nullify on forward branches. */
1729 n = is_n && disp >= 0;
1730 if (n && use_nullify_skip(ctx)) {
1731 nullify_set(ctx, 0);
1732 gen_goto_tb(ctx, 1, dest, dest + 4);
1733 } else {
1734 nullify_set(ctx, n);
1735 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1738 /* Not taken: the branch itself was nullified. */
1739 if (ctx->null_lab) {
1740 gen_set_label(ctx->null_lab);
1741 ctx->null_lab = NULL;
1742 return DISAS_IAQ_N_STALE;
1743 } else {
1744 return DISAS_NORETURN;
1748 /* Emit an unconditional branch to an indirect target. This handles
1749 nullification of the branch itself. */
1750 static DisasJumpType do_ibranch(DisasContext *ctx, TCGv_reg dest,
1751 unsigned link, bool is_n)
1753 TCGv_reg a0, a1, next, tmp;
1754 TCGCond c;
1756 assert(ctx->null_lab == NULL);
1758 if (ctx->null_cond.c == TCG_COND_NEVER) {
1759 if (link != 0) {
1760 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1762 next = get_temp(ctx);
1763 tcg_gen_mov_reg(next, dest);
1764 ctx->iaoq_n = -1;
1765 ctx->iaoq_n_var = next;
1766 if (is_n) {
1767 ctx->null_cond.c = TCG_COND_ALWAYS;
1769 } else if (is_n && use_nullify_skip(ctx)) {
1770 /* The (conditional) branch, B, nullifies the next insn, N,
1771 and we're allowed to skip execution N (no single-step or
1772 tracepoint in effect). Since the goto_ptr that we must use
1773 for the indirect branch consumes no special resources, we
1774 can (conditionally) skip B and continue execution. */
1775 /* The use_nullify_skip test implies we have a known control path. */
1776 tcg_debug_assert(ctx->iaoq_b != -1);
1777 tcg_debug_assert(ctx->iaoq_n != -1);
1779 /* We do have to handle the non-local temporary, DEST, before
1780 branching. Since IOAQ_F is not really live at this point, we
1781 can simply store DEST optimistically. Similarly with IAOQ_B. */
1782 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1783 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1785 nullify_over(ctx);
1786 if (link != 0) {
1787 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1789 tcg_gen_lookup_and_goto_ptr();
1790 return nullify_end(ctx, DISAS_NEXT);
1791 } else {
1792 cond_prep(&ctx->null_cond);
1793 c = ctx->null_cond.c;
1794 a0 = ctx->null_cond.a0;
1795 a1 = ctx->null_cond.a1;
1797 tmp = tcg_temp_new();
1798 next = get_temp(ctx);
1800 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1801 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1802 ctx->iaoq_n = -1;
1803 ctx->iaoq_n_var = next;
1805 if (link != 0) {
1806 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1809 if (is_n) {
1810 /* The branch nullifies the next insn, which means the state of N
1811 after the branch is the inverse of the state of N that applied
1812 to the branch. */
1813 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1814 cond_free(&ctx->null_cond);
1815 ctx->null_cond = cond_make_n();
1816 ctx->psw_n_nonzero = true;
1817 } else {
1818 cond_free(&ctx->null_cond);
1822 return DISAS_NEXT;
1825 #ifdef CONFIG_USER_ONLY
1826 /* On Linux, page zero is normally marked execute only + gateway.
1827 Therefore normal read or write is supposed to fail, but specific
1828 offsets have kernel code mapped to raise permissions to implement
1829 system calls. Handling this via an explicit check here, rather
1830 in than the "be disp(sr2,r0)" instruction that probably sent us
1831 here, is the easiest way to handle the branch delay slot on the
1832 aforementioned BE. */
1833 static DisasJumpType do_page_zero(DisasContext *ctx)
1835 /* If by some means we get here with PSW[N]=1, that implies that
1836 the B,GATE instruction would be skipped, and we'd fault on the
1837 next insn within the privilaged page. */
1838 switch (ctx->null_cond.c) {
1839 case TCG_COND_NEVER:
1840 break;
1841 case TCG_COND_ALWAYS:
1842 tcg_gen_movi_reg(cpu_psw_n, 0);
1843 goto do_sigill;
1844 default:
1845 /* Since this is always the first (and only) insn within the
1846 TB, we should know the state of PSW[N] from TB->FLAGS. */
1847 g_assert_not_reached();
1850 /* Check that we didn't arrive here via some means that allowed
1851 non-sequential instruction execution. Normally the PSW[B] bit
1852 detects this by disallowing the B,GATE instruction to execute
1853 under such conditions. */
1854 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1855 goto do_sigill;
1858 switch (ctx->iaoq_f) {
1859 case 0x00: /* Null pointer call */
1860 gen_excp_1(EXCP_IMP);
1861 return DISAS_NORETURN;
1863 case 0xb0: /* LWS */
1864 gen_excp_1(EXCP_SYSCALL_LWS);
1865 return DISAS_NORETURN;
1867 case 0xe0: /* SET_THREAD_POINTER */
1868 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
1869 tcg_gen_mov_reg(cpu_iaoq_f, cpu_gr[31]);
1870 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
1871 return DISAS_IAQ_N_UPDATED;
1873 case 0x100: /* SYSCALL */
1874 gen_excp_1(EXCP_SYSCALL);
1875 return DISAS_NORETURN;
1877 default:
1878 do_sigill:
1879 gen_excp_1(EXCP_ILL);
1880 return DISAS_NORETURN;
1883 #endif
1885 static DisasJumpType trans_nop(DisasContext *ctx, uint32_t insn,
1886 const DisasInsn *di)
1888 cond_free(&ctx->null_cond);
1889 return DISAS_NEXT;
1892 static DisasJumpType trans_break(DisasContext *ctx, uint32_t insn,
1893 const DisasInsn *di)
1895 nullify_over(ctx);
1896 return nullify_end(ctx, gen_excp(ctx, EXCP_BREAK));
1899 static DisasJumpType trans_sync(DisasContext *ctx, uint32_t insn,
1900 const DisasInsn *di)
1902 /* No point in nullifying the memory barrier. */
1903 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1905 cond_free(&ctx->null_cond);
1906 return DISAS_NEXT;
1909 static DisasJumpType trans_mfia(DisasContext *ctx, uint32_t insn,
1910 const DisasInsn *di)
1912 unsigned rt = extract32(insn, 0, 5);
1913 TCGv_reg tmp = dest_gpr(ctx, rt);
1914 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
1915 save_gpr(ctx, rt, tmp);
1917 cond_free(&ctx->null_cond);
1918 return DISAS_NEXT;
1921 static DisasJumpType trans_mfsp(DisasContext *ctx, uint32_t insn,
1922 const DisasInsn *di)
1924 unsigned rt = extract32(insn, 0, 5);
1925 unsigned rs = assemble_sr3(insn);
1926 TCGv_i64 t0 = tcg_temp_new_i64();
1927 TCGv_reg t1 = tcg_temp_new();
1929 load_spr(ctx, t0, rs);
1930 tcg_gen_shri_i64(t0, t0, 32);
1931 tcg_gen_trunc_i64_reg(t1, t0);
1933 save_gpr(ctx, rt, t1);
1934 tcg_temp_free(t1);
1935 tcg_temp_free_i64(t0);
1937 cond_free(&ctx->null_cond);
1938 return DISAS_NEXT;
1941 static DisasJumpType trans_mfctl(DisasContext *ctx, uint32_t insn,
1942 const DisasInsn *di)
1944 unsigned rt = extract32(insn, 0, 5);
1945 unsigned ctl = extract32(insn, 21, 5);
1946 TCGv_reg tmp;
1948 switch (ctl) {
1949 case CR_SAR:
1950 #ifdef TARGET_HPPA64
1951 if (extract32(insn, 14, 1) == 0) {
1952 /* MFSAR without ,W masks low 5 bits. */
1953 tmp = dest_gpr(ctx, rt);
1954 tcg_gen_andi_reg(tmp, cpu_sar, 31);
1955 save_gpr(ctx, rt, tmp);
1956 goto done;
1958 #endif
1959 save_gpr(ctx, rt, cpu_sar);
1960 goto done;
1961 case CR_IT: /* Interval Timer */
1962 /* FIXME: Respect PSW_S bit. */
1963 nullify_over(ctx);
1964 tmp = dest_gpr(ctx, rt);
1965 tcg_gen_movi_reg(tmp, 0); /* FIXME */
1966 save_gpr(ctx, rt, tmp);
1967 break;
1968 case 26:
1969 case 27:
1970 break;
1971 default:
1972 /* All other control registers are privileged. */
1973 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
1974 break;
1977 tmp = get_temp(ctx);
1978 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
1979 save_gpr(ctx, rt, tmp);
1981 done:
1982 cond_free(&ctx->null_cond);
1983 return DISAS_NEXT;
1986 static DisasJumpType trans_mtsp(DisasContext *ctx, uint32_t insn,
1987 const DisasInsn *di)
1989 unsigned rr = extract32(insn, 16, 5);
1990 unsigned rs = assemble_sr3(insn);
1991 TCGv_i64 t64;
1993 if (rs >= 5) {
1994 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
1996 nullify_over(ctx);
1998 t64 = tcg_temp_new_i64();
1999 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2000 tcg_gen_shli_i64(t64, t64, 32);
2002 if (rs >= 4) {
2003 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2004 } else {
2005 tcg_gen_mov_i64(cpu_sr[rs], t64);
2007 tcg_temp_free_i64(t64);
2009 return nullify_end(ctx, DISAS_NEXT);
2012 static DisasJumpType trans_mtctl(DisasContext *ctx, uint32_t insn,
2013 const DisasInsn *di)
2015 unsigned rin = extract32(insn, 16, 5);
2016 unsigned ctl = extract32(insn, 21, 5);
2017 TCGv_reg reg = load_gpr(ctx, rin);
2018 TCGv_reg tmp;
2020 if (ctl == CR_SAR) {
2021 tmp = tcg_temp_new();
2022 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2023 save_or_nullify(ctx, cpu_sar, tmp);
2024 tcg_temp_free(tmp);
2026 cond_free(&ctx->null_cond);
2027 return DISAS_NEXT;
2030 /* All other control registers are privileged or read-only. */
2031 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2033 nullify_over(ctx);
2034 switch (ctl) {
2035 case CR_IT:
2036 /* ??? modify interval timer offset */
2037 break;
2039 case CR_IIASQ:
2040 case CR_IIAOQ:
2041 /* FIXME: Respect PSW_Q bit */
2042 /* The write advances the queue and stores to the back element. */
2043 tmp = get_temp(ctx);
2044 tcg_gen_ld_reg(tmp, cpu_env,
2045 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2046 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2047 tcg_gen_st_reg(reg, cpu_env,
2048 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2049 break;
2051 default:
2052 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2053 break;
2055 return nullify_end(ctx, DISAS_NEXT);
2058 static DisasJumpType trans_mtsarcm(DisasContext *ctx, uint32_t insn,
2059 const DisasInsn *di)
2061 unsigned rin = extract32(insn, 16, 5);
2062 TCGv_reg tmp = tcg_temp_new();
2064 tcg_gen_not_reg(tmp, load_gpr(ctx, rin));
2065 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2066 save_or_nullify(ctx, cpu_sar, tmp);
2067 tcg_temp_free(tmp);
2069 cond_free(&ctx->null_cond);
2070 return DISAS_NEXT;
2073 static DisasJumpType trans_ldsid(DisasContext *ctx, uint32_t insn,
2074 const DisasInsn *di)
2076 unsigned rt = extract32(insn, 0, 5);
2077 TCGv_reg dest = dest_gpr(ctx, rt);
2079 /* Since we don't implement space registers, this returns zero. */
2080 tcg_gen_movi_reg(dest, 0);
2081 save_gpr(ctx, rt, dest);
2083 cond_free(&ctx->null_cond);
2084 return DISAS_NEXT;
2087 #ifndef CONFIG_USER_ONLY
2088 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
2089 static target_ureg extract_sm_imm(uint32_t insn)
2091 target_ureg val = extract32(insn, 16, 10);
2093 if (val & PSW_SM_E) {
2094 val = (val & ~PSW_SM_E) | PSW_E;
2096 if (val & PSW_SM_W) {
2097 val = (val & ~PSW_SM_W) | PSW_W;
2099 return val;
2102 static DisasJumpType trans_rsm(DisasContext *ctx, uint32_t insn,
2103 const DisasInsn *di)
2105 unsigned rt = extract32(insn, 0, 5);
2106 target_ureg sm = extract_sm_imm(insn);
2107 TCGv_reg tmp;
2109 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2110 nullify_over(ctx);
2112 tmp = get_temp(ctx);
2113 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2114 tcg_gen_andi_reg(tmp, tmp, ~sm);
2115 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2116 save_gpr(ctx, rt, tmp);
2118 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2119 return nullify_end(ctx, DISAS_IAQ_N_STALE_EXIT);
2122 static DisasJumpType trans_ssm(DisasContext *ctx, uint32_t insn,
2123 const DisasInsn *di)
2125 unsigned rt = extract32(insn, 0, 5);
2126 target_ureg sm = extract_sm_imm(insn);
2127 TCGv_reg tmp;
2129 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2130 nullify_over(ctx);
2132 tmp = get_temp(ctx);
2133 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2134 tcg_gen_ori_reg(tmp, tmp, sm);
2135 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2136 save_gpr(ctx, rt, tmp);
2138 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2139 return nullify_end(ctx, DISAS_IAQ_N_STALE_EXIT);
2142 static DisasJumpType trans_mtsm(DisasContext *ctx, uint32_t insn,
2143 const DisasInsn *di)
2145 unsigned rr = extract32(insn, 16, 5);
2146 TCGv_reg tmp, reg;
2148 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2149 nullify_over(ctx);
2151 reg = load_gpr(ctx, rr);
2152 tmp = get_temp(ctx);
2153 gen_helper_swap_system_mask(tmp, cpu_env, reg);
2155 /* Exit the TB to recognize new interrupts. */
2156 return nullify_end(ctx, DISAS_IAQ_N_STALE_EXIT);
2159 static DisasJumpType trans_rfi(DisasContext *ctx, uint32_t insn,
2160 const DisasInsn *di)
2162 unsigned comp = extract32(insn, 5, 4);
2164 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2165 nullify_over(ctx);
2167 if (comp == 5) {
2168 gen_helper_rfi_r(cpu_env);
2169 } else {
2170 gen_helper_rfi(cpu_env);
2172 if (ctx->base.singlestep_enabled) {
2173 gen_excp_1(EXCP_DEBUG);
2174 } else {
2175 tcg_gen_exit_tb(0);
2178 /* Exit the TB to recognize new interrupts. */
2179 return nullify_end(ctx, DISAS_NORETURN);
2181 #endif /* !CONFIG_USER_ONLY */
2183 static const DisasInsn table_system[] = {
2184 { 0x00000000u, 0xfc001fe0u, trans_break },
2185 { 0x00001820u, 0xffe01fffu, trans_mtsp },
2186 { 0x00001840u, 0xfc00ffffu, trans_mtctl },
2187 { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm },
2188 { 0x000014a0u, 0xffffffe0u, trans_mfia },
2189 { 0x000004a0u, 0xffff1fe0u, trans_mfsp },
2190 { 0x000008a0u, 0xfc1fbfe0u, trans_mfctl },
2191 { 0x00000400u, 0xffffffffu, trans_sync },
2192 { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
2193 #ifndef CONFIG_USER_ONLY
2194 { 0x00000e60u, 0xfc00ffe0u, trans_rsm },
2195 { 0x00000d60u, 0xfc00ffe0u, trans_ssm },
2196 { 0x00001860u, 0xffe0ffffu, trans_mtsm },
2197 { 0x00000c00u, 0xfffffe1fu, trans_rfi },
2198 #endif
2201 static DisasJumpType trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
2202 const DisasInsn *di)
2204 unsigned rb = extract32(insn, 21, 5);
2205 unsigned rx = extract32(insn, 16, 5);
2206 TCGv_reg dest = dest_gpr(ctx, rb);
2207 TCGv_reg src1 = load_gpr(ctx, rb);
2208 TCGv_reg src2 = load_gpr(ctx, rx);
2210 /* The only thing we need to do is the base register modification. */
2211 tcg_gen_add_reg(dest, src1, src2);
2212 save_gpr(ctx, rb, dest);
2214 cond_free(&ctx->null_cond);
2215 return DISAS_NEXT;
2218 static DisasJumpType trans_probe(DisasContext *ctx, uint32_t insn,
2219 const DisasInsn *di)
2221 unsigned rt = extract32(insn, 0, 5);
2222 unsigned rb = extract32(insn, 21, 5);
2223 unsigned is_write = extract32(insn, 6, 1);
2224 TCGv_reg dest;
2226 nullify_over(ctx);
2228 /* ??? Do something with priv level operand. */
2229 dest = dest_gpr(ctx, rt);
2230 if (is_write) {
2231 gen_helper_probe_w(dest, load_gpr(ctx, rb));
2232 } else {
2233 gen_helper_probe_r(dest, load_gpr(ctx, rb));
2235 save_gpr(ctx, rt, dest);
2236 return nullify_end(ctx, DISAS_NEXT);
2239 static const DisasInsn table_mem_mgmt[] = {
2240 { 0x04003280u, 0xfc003fffu, trans_nop }, /* fdc, disp */
2241 { 0x04001280u, 0xfc003fffu, trans_nop }, /* fdc, index */
2242 { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
2243 { 0x040012c0u, 0xfc003fffu, trans_nop }, /* fdce */
2244 { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
2245 { 0x04000280u, 0xfc001fffu, trans_nop }, /* fic 0a */
2246 { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
2247 { 0x040013c0u, 0xfc003fffu, trans_nop }, /* fic 4f */
2248 { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
2249 { 0x040002c0u, 0xfc001fffu, trans_nop }, /* fice */
2250 { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
2251 { 0x04002700u, 0xfc003fffu, trans_nop }, /* pdc */
2252 { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
2253 { 0x04001180u, 0xfc003fa0u, trans_probe }, /* probe */
2254 { 0x04003180u, 0xfc003fa0u, trans_probe }, /* probei */
2257 static DisasJumpType trans_add(DisasContext *ctx, uint32_t insn,
2258 const DisasInsn *di)
2260 unsigned r2 = extract32(insn, 21, 5);
2261 unsigned r1 = extract32(insn, 16, 5);
2262 unsigned cf = extract32(insn, 12, 4);
2263 unsigned ext = extract32(insn, 8, 4);
2264 unsigned shift = extract32(insn, 6, 2);
2265 unsigned rt = extract32(insn, 0, 5);
2266 TCGv_reg tcg_r1, tcg_r2;
2267 bool is_c = false;
2268 bool is_l = false;
2269 bool is_tc = false;
2270 bool is_tsv = false;
2271 DisasJumpType ret;
2273 switch (ext) {
2274 case 0x6: /* ADD, SHLADD */
2275 break;
2276 case 0xa: /* ADD,L, SHLADD,L */
2277 is_l = true;
2278 break;
2279 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
2280 is_tsv = true;
2281 break;
2282 case 0x7: /* ADD,C */
2283 is_c = true;
2284 break;
2285 case 0xf: /* ADD,C,TSV */
2286 is_c = is_tsv = true;
2287 break;
2288 default:
2289 return gen_illegal(ctx);
2292 if (cf) {
2293 nullify_over(ctx);
2295 tcg_r1 = load_gpr(ctx, r1);
2296 tcg_r2 = load_gpr(ctx, r2);
2297 ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
2298 return nullify_end(ctx, ret);
2301 static DisasJumpType trans_sub(DisasContext *ctx, uint32_t insn,
2302 const DisasInsn *di)
2304 unsigned r2 = extract32(insn, 21, 5);
2305 unsigned r1 = extract32(insn, 16, 5);
2306 unsigned cf = extract32(insn, 12, 4);
2307 unsigned ext = extract32(insn, 6, 6);
2308 unsigned rt = extract32(insn, 0, 5);
2309 TCGv_reg tcg_r1, tcg_r2;
2310 bool is_b = false;
2311 bool is_tc = false;
2312 bool is_tsv = false;
2313 DisasJumpType ret;
2315 switch (ext) {
2316 case 0x10: /* SUB */
2317 break;
2318 case 0x30: /* SUB,TSV */
2319 is_tsv = true;
2320 break;
2321 case 0x14: /* SUB,B */
2322 is_b = true;
2323 break;
2324 case 0x34: /* SUB,B,TSV */
2325 is_b = is_tsv = true;
2326 break;
2327 case 0x13: /* SUB,TC */
2328 is_tc = true;
2329 break;
2330 case 0x33: /* SUB,TSV,TC */
2331 is_tc = is_tsv = true;
2332 break;
2333 default:
2334 return gen_illegal(ctx);
2337 if (cf) {
2338 nullify_over(ctx);
2340 tcg_r1 = load_gpr(ctx, r1);
2341 tcg_r2 = load_gpr(ctx, r2);
2342 ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
2343 return nullify_end(ctx, ret);
2346 static DisasJumpType trans_log(DisasContext *ctx, uint32_t insn,
2347 const DisasInsn *di)
2349 unsigned r2 = extract32(insn, 21, 5);
2350 unsigned r1 = extract32(insn, 16, 5);
2351 unsigned cf = extract32(insn, 12, 4);
2352 unsigned rt = extract32(insn, 0, 5);
2353 TCGv_reg tcg_r1, tcg_r2;
2354 DisasJumpType ret;
2356 if (cf) {
2357 nullify_over(ctx);
2359 tcg_r1 = load_gpr(ctx, r1);
2360 tcg_r2 = load_gpr(ctx, r2);
2361 ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f.ttt);
2362 return nullify_end(ctx, ret);
2365 /* OR r,0,t -> COPY (according to gas) */
2366 static DisasJumpType trans_copy(DisasContext *ctx, uint32_t insn,
2367 const DisasInsn *di)
2369 unsigned r1 = extract32(insn, 16, 5);
2370 unsigned rt = extract32(insn, 0, 5);
2372 if (r1 == 0) {
2373 TCGv_reg dest = dest_gpr(ctx, rt);
2374 tcg_gen_movi_reg(dest, 0);
2375 save_gpr(ctx, rt, dest);
2376 } else {
2377 save_gpr(ctx, rt, cpu_gr[r1]);
2379 cond_free(&ctx->null_cond);
2380 return DISAS_NEXT;
2383 static DisasJumpType trans_cmpclr(DisasContext *ctx, uint32_t insn,
2384 const DisasInsn *di)
2386 unsigned r2 = extract32(insn, 21, 5);
2387 unsigned r1 = extract32(insn, 16, 5);
2388 unsigned cf = extract32(insn, 12, 4);
2389 unsigned rt = extract32(insn, 0, 5);
2390 TCGv_reg tcg_r1, tcg_r2;
2391 DisasJumpType ret;
2393 if (cf) {
2394 nullify_over(ctx);
2396 tcg_r1 = load_gpr(ctx, r1);
2397 tcg_r2 = load_gpr(ctx, r2);
2398 ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
2399 return nullify_end(ctx, ret);
2402 static DisasJumpType trans_uxor(DisasContext *ctx, uint32_t insn,
2403 const DisasInsn *di)
2405 unsigned r2 = extract32(insn, 21, 5);
2406 unsigned r1 = extract32(insn, 16, 5);
2407 unsigned cf = extract32(insn, 12, 4);
2408 unsigned rt = extract32(insn, 0, 5);
2409 TCGv_reg tcg_r1, tcg_r2;
2410 DisasJumpType ret;
2412 if (cf) {
2413 nullify_over(ctx);
2415 tcg_r1 = load_gpr(ctx, r1);
2416 tcg_r2 = load_gpr(ctx, r2);
2417 ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_reg);
2418 return nullify_end(ctx, ret);
2421 static DisasJumpType trans_uaddcm(DisasContext *ctx, uint32_t insn,
2422 const DisasInsn *di)
2424 unsigned r2 = extract32(insn, 21, 5);
2425 unsigned r1 = extract32(insn, 16, 5);
2426 unsigned cf = extract32(insn, 12, 4);
2427 unsigned is_tc = extract32(insn, 6, 1);
2428 unsigned rt = extract32(insn, 0, 5);
2429 TCGv_reg tcg_r1, tcg_r2, tmp;
2430 DisasJumpType ret;
2432 if (cf) {
2433 nullify_over(ctx);
2435 tcg_r1 = load_gpr(ctx, r1);
2436 tcg_r2 = load_gpr(ctx, r2);
2437 tmp = get_temp(ctx);
2438 tcg_gen_not_reg(tmp, tcg_r2);
2439 ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_reg);
2440 return nullify_end(ctx, ret);
2443 static DisasJumpType trans_dcor(DisasContext *ctx, uint32_t insn,
2444 const DisasInsn *di)
2446 unsigned r2 = extract32(insn, 21, 5);
2447 unsigned cf = extract32(insn, 12, 4);
2448 unsigned is_i = extract32(insn, 6, 1);
2449 unsigned rt = extract32(insn, 0, 5);
2450 TCGv_reg tmp;
2451 DisasJumpType ret;
2453 nullify_over(ctx);
2455 tmp = get_temp(ctx);
2456 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2457 if (!is_i) {
2458 tcg_gen_not_reg(tmp, tmp);
2460 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2461 tcg_gen_muli_reg(tmp, tmp, 6);
2462 ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
2463 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2465 return nullify_end(ctx, ret);
2468 static DisasJumpType trans_ds(DisasContext *ctx, uint32_t insn,
2469 const DisasInsn *di)
2471 unsigned r2 = extract32(insn, 21, 5);
2472 unsigned r1 = extract32(insn, 16, 5);
2473 unsigned cf = extract32(insn, 12, 4);
2474 unsigned rt = extract32(insn, 0, 5);
2475 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2477 nullify_over(ctx);
2479 in1 = load_gpr(ctx, r1);
2480 in2 = load_gpr(ctx, r2);
2482 add1 = tcg_temp_new();
2483 add2 = tcg_temp_new();
2484 addc = tcg_temp_new();
2485 dest = tcg_temp_new();
2486 zero = tcg_const_reg(0);
2488 /* Form R1 << 1 | PSW[CB]{8}. */
2489 tcg_gen_add_reg(add1, in1, in1);
2490 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2492 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2493 carry{8} requires that we subtract via + ~R2 + 1, as described in
2494 the manual. By extracting and masking V, we can produce the
2495 proper inputs to the addition without movcond. */
2496 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2497 tcg_gen_xor_reg(add2, in2, addc);
2498 tcg_gen_andi_reg(addc, addc, 1);
2499 /* ??? This is only correct for 32-bit. */
2500 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2501 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2503 tcg_temp_free(addc);
2504 tcg_temp_free(zero);
2506 /* Write back the result register. */
2507 save_gpr(ctx, rt, dest);
2509 /* Write back PSW[CB]. */
2510 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2511 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2513 /* Write back PSW[V] for the division step. */
2514 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2515 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2517 /* Install the new nullification. */
2518 if (cf) {
2519 TCGv_reg sv = NULL;
2520 if (cf >> 1 == 6) {
2521 /* ??? The lshift is supposed to contribute to overflow. */
2522 sv = do_add_sv(ctx, dest, add1, add2);
2524 ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
2527 tcg_temp_free(add1);
2528 tcg_temp_free(add2);
2529 tcg_temp_free(dest);
2531 return nullify_end(ctx, DISAS_NEXT);
2534 static const DisasInsn table_arith_log[] = {
2535 { 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */
2536 { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
2537 { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_reg },
2538 { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_reg },
2539 { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_reg },
2540 { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_reg },
2541 { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
2542 { 0x08000380u, 0xfc000fe0u, trans_uxor },
2543 { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
2544 { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
2545 { 0x08000440u, 0xfc000fe0u, trans_ds },
2546 { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
2547 { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
2548 { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
2549 { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
2552 static DisasJumpType trans_addi(DisasContext *ctx, uint32_t insn)
2554 target_sreg im = low_sextract(insn, 0, 11);
2555 unsigned e1 = extract32(insn, 11, 1);
2556 unsigned cf = extract32(insn, 12, 4);
2557 unsigned rt = extract32(insn, 16, 5);
2558 unsigned r2 = extract32(insn, 21, 5);
2559 unsigned o1 = extract32(insn, 26, 1);
2560 TCGv_reg tcg_im, tcg_r2;
2561 DisasJumpType ret;
2563 if (cf) {
2564 nullify_over(ctx);
2567 tcg_im = load_const(ctx, im);
2568 tcg_r2 = load_gpr(ctx, r2);
2569 ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2571 return nullify_end(ctx, ret);
2574 static DisasJumpType trans_subi(DisasContext *ctx, uint32_t insn)
2576 target_sreg im = low_sextract(insn, 0, 11);
2577 unsigned e1 = extract32(insn, 11, 1);
2578 unsigned cf = extract32(insn, 12, 4);
2579 unsigned rt = extract32(insn, 16, 5);
2580 unsigned r2 = extract32(insn, 21, 5);
2581 TCGv_reg tcg_im, tcg_r2;
2582 DisasJumpType ret;
2584 if (cf) {
2585 nullify_over(ctx);
2588 tcg_im = load_const(ctx, im);
2589 tcg_r2 = load_gpr(ctx, r2);
2590 ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2592 return nullify_end(ctx, ret);
2595 static DisasJumpType trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2597 target_sreg im = low_sextract(insn, 0, 11);
2598 unsigned cf = extract32(insn, 12, 4);
2599 unsigned rt = extract32(insn, 16, 5);
2600 unsigned r2 = extract32(insn, 21, 5);
2601 TCGv_reg tcg_im, tcg_r2;
2602 DisasJumpType ret;
2604 if (cf) {
2605 nullify_over(ctx);
2608 tcg_im = load_const(ctx, im);
2609 tcg_r2 = load_gpr(ctx, r2);
2610 ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2612 return nullify_end(ctx, ret);
2615 static DisasJumpType trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
2616 const DisasInsn *di)
2618 unsigned rt = extract32(insn, 0, 5);
2619 unsigned m = extract32(insn, 5, 1);
2620 unsigned sz = extract32(insn, 6, 2);
2621 unsigned a = extract32(insn, 13, 1);
2622 int disp = low_sextract(insn, 16, 5);
2623 unsigned rb = extract32(insn, 21, 5);
2624 int modify = (m ? (a ? -1 : 1) : 0);
2625 TCGMemOp mop = MO_TE | sz;
2627 return do_load(ctx, rt, rb, 0, 0, disp, modify, mop);
2630 static DisasJumpType trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
2631 const DisasInsn *di)
2633 unsigned rt = extract32(insn, 0, 5);
2634 unsigned m = extract32(insn, 5, 1);
2635 unsigned sz = extract32(insn, 6, 2);
2636 unsigned u = extract32(insn, 13, 1);
2637 unsigned rx = extract32(insn, 16, 5);
2638 unsigned rb = extract32(insn, 21, 5);
2639 TCGMemOp mop = MO_TE | sz;
2641 return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, m, mop);
2644 static DisasJumpType trans_st_idx_i(DisasContext *ctx, uint32_t insn,
2645 const DisasInsn *di)
2647 int disp = low_sextract(insn, 0, 5);
2648 unsigned m = extract32(insn, 5, 1);
2649 unsigned sz = extract32(insn, 6, 2);
2650 unsigned a = extract32(insn, 13, 1);
2651 unsigned rr = extract32(insn, 16, 5);
2652 unsigned rb = extract32(insn, 21, 5);
2653 int modify = (m ? (a ? -1 : 1) : 0);
2654 TCGMemOp mop = MO_TE | sz;
2656 return do_store(ctx, rr, rb, disp, modify, mop);
2659 static DisasJumpType trans_ldcw(DisasContext *ctx, uint32_t insn,
2660 const DisasInsn *di)
2662 unsigned rt = extract32(insn, 0, 5);
2663 unsigned m = extract32(insn, 5, 1);
2664 unsigned i = extract32(insn, 12, 1);
2665 unsigned au = extract32(insn, 13, 1);
2666 unsigned rx = extract32(insn, 16, 5);
2667 unsigned rb = extract32(insn, 21, 5);
2668 TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
2669 TCGv_reg zero, addr, base, dest;
2670 int modify, disp = 0, scale = 0;
2672 nullify_over(ctx);
2674 /* ??? Share more code with do_load and do_load_{32,64}. */
2676 if (i) {
2677 modify = (m ? (au ? -1 : 1) : 0);
2678 disp = low_sextract(rx, 0, 5);
2679 rx = 0;
2680 } else {
2681 modify = m;
2682 if (au) {
2683 scale = mop & MO_SIZE;
2686 if (modify) {
2687 /* Base register modification. Make sure if RT == RB, we see
2688 the result of the load. */
2689 dest = get_temp(ctx);
2690 } else {
2691 dest = dest_gpr(ctx, rt);
2694 addr = tcg_temp_new();
2695 base = load_gpr(ctx, rb);
2696 if (rx) {
2697 tcg_gen_shli_reg(addr, cpu_gr[rx], scale);
2698 tcg_gen_add_reg(addr, addr, base);
2699 } else {
2700 tcg_gen_addi_reg(addr, base, disp);
2703 zero = tcg_const_reg(0);
2704 tcg_gen_atomic_xchg_reg(dest, (modify <= 0 ? addr : base),
2705 zero, ctx->mmu_idx, mop);
2706 if (modify) {
2707 save_gpr(ctx, rb, addr);
2709 save_gpr(ctx, rt, dest);
2711 return nullify_end(ctx, DISAS_NEXT);
2714 static DisasJumpType trans_stby(DisasContext *ctx, uint32_t insn,
2715 const DisasInsn *di)
2717 target_sreg disp = low_sextract(insn, 0, 5);
2718 unsigned m = extract32(insn, 5, 1);
2719 unsigned a = extract32(insn, 13, 1);
2720 unsigned rt = extract32(insn, 16, 5);
2721 unsigned rb = extract32(insn, 21, 5);
2722 TCGv_reg addr, val;
2724 nullify_over(ctx);
2726 addr = tcg_temp_new();
2727 if (m || disp == 0) {
2728 tcg_gen_mov_reg(addr, load_gpr(ctx, rb));
2729 } else {
2730 tcg_gen_addi_reg(addr, load_gpr(ctx, rb), disp);
2732 val = load_gpr(ctx, rt);
2734 if (a) {
2735 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2736 gen_helper_stby_e_parallel(cpu_env, addr, val);
2737 } else {
2738 gen_helper_stby_e(cpu_env, addr, val);
2740 } else {
2741 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2742 gen_helper_stby_b_parallel(cpu_env, addr, val);
2743 } else {
2744 gen_helper_stby_b(cpu_env, addr, val);
2748 if (m) {
2749 tcg_gen_addi_reg(addr, addr, disp);
2750 tcg_gen_andi_reg(addr, addr, ~3);
2751 save_gpr(ctx, rb, addr);
2753 tcg_temp_free(addr);
2755 return nullify_end(ctx, DISAS_NEXT);
2758 static const DisasInsn table_index_mem[] = {
2759 { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
2760 { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
2761 { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
2762 { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
2763 { 0x0c001300u, 0xfc0013c0, trans_stby },
2766 static DisasJumpType trans_ldil(DisasContext *ctx, uint32_t insn)
2768 unsigned rt = extract32(insn, 21, 5);
2769 target_sreg i = assemble_21(insn);
2770 TCGv_reg tcg_rt = dest_gpr(ctx, rt);
2772 tcg_gen_movi_reg(tcg_rt, i);
2773 save_gpr(ctx, rt, tcg_rt);
2774 cond_free(&ctx->null_cond);
2776 return DISAS_NEXT;
2779 static DisasJumpType trans_addil(DisasContext *ctx, uint32_t insn)
2781 unsigned rt = extract32(insn, 21, 5);
2782 target_sreg i = assemble_21(insn);
2783 TCGv_reg tcg_rt = load_gpr(ctx, rt);
2784 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2786 tcg_gen_addi_reg(tcg_r1, tcg_rt, i);
2787 save_gpr(ctx, 1, tcg_r1);
2788 cond_free(&ctx->null_cond);
2790 return DISAS_NEXT;
2793 static DisasJumpType trans_ldo(DisasContext *ctx, uint32_t insn)
2795 unsigned rb = extract32(insn, 21, 5);
2796 unsigned rt = extract32(insn, 16, 5);
2797 target_sreg i = assemble_16(insn);
2798 TCGv_reg tcg_rt = dest_gpr(ctx, rt);
2800 /* Special case rb == 0, for the LDI pseudo-op.
2801 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2802 if (rb == 0) {
2803 tcg_gen_movi_reg(tcg_rt, i);
2804 } else {
2805 tcg_gen_addi_reg(tcg_rt, cpu_gr[rb], i);
2807 save_gpr(ctx, rt, tcg_rt);
2808 cond_free(&ctx->null_cond);
2810 return DISAS_NEXT;
2813 static DisasJumpType trans_load(DisasContext *ctx, uint32_t insn,
2814 bool is_mod, TCGMemOp mop)
2816 unsigned rb = extract32(insn, 21, 5);
2817 unsigned rt = extract32(insn, 16, 5);
2818 target_sreg i = assemble_16(insn);
2820 return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2823 static DisasJumpType trans_load_w(DisasContext *ctx, uint32_t insn)
2825 unsigned rb = extract32(insn, 21, 5);
2826 unsigned rt = extract32(insn, 16, 5);
2827 target_sreg i = assemble_16a(insn);
2828 unsigned ext2 = extract32(insn, 1, 2);
2830 switch (ext2) {
2831 case 0:
2832 case 1:
2833 /* FLDW without modification. */
2834 return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2835 case 2:
2836 /* LDW with modification. Note that the sign of I selects
2837 post-dec vs pre-inc. */
2838 return do_load(ctx, rt, rb, 0, 0, i, (i < 0 ? 1 : -1), MO_TEUL);
2839 default:
2840 return gen_illegal(ctx);
2844 static DisasJumpType trans_fload_mod(DisasContext *ctx, uint32_t insn)
2846 target_sreg i = assemble_16a(insn);
2847 unsigned t1 = extract32(insn, 1, 1);
2848 unsigned a = extract32(insn, 2, 1);
2849 unsigned t0 = extract32(insn, 16, 5);
2850 unsigned rb = extract32(insn, 21, 5);
2852 /* FLDW with modification. */
2853 return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2856 static DisasJumpType trans_store(DisasContext *ctx, uint32_t insn,
2857 bool is_mod, TCGMemOp mop)
2859 unsigned rb = extract32(insn, 21, 5);
2860 unsigned rt = extract32(insn, 16, 5);
2861 target_sreg i = assemble_16(insn);
2863 return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2866 static DisasJumpType trans_store_w(DisasContext *ctx, uint32_t insn)
2868 unsigned rb = extract32(insn, 21, 5);
2869 unsigned rt = extract32(insn, 16, 5);
2870 target_sreg i = assemble_16a(insn);
2871 unsigned ext2 = extract32(insn, 1, 2);
2873 switch (ext2) {
2874 case 0:
2875 case 1:
2876 /* FSTW without modification. */
2877 return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
2878 case 2:
2879 /* LDW with modification. */
2880 return do_store(ctx, rt, rb, i, (i < 0 ? 1 : -1), MO_TEUL);
2881 default:
2882 return gen_illegal(ctx);
2886 static DisasJumpType trans_fstore_mod(DisasContext *ctx, uint32_t insn)
2888 target_sreg i = assemble_16a(insn);
2889 unsigned t1 = extract32(insn, 1, 1);
2890 unsigned a = extract32(insn, 2, 1);
2891 unsigned t0 = extract32(insn, 16, 5);
2892 unsigned rb = extract32(insn, 21, 5);
2894 /* FSTW with modification. */
2895 return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
2898 static DisasJumpType trans_copr_w(DisasContext *ctx, uint32_t insn)
2900 unsigned t0 = extract32(insn, 0, 5);
2901 unsigned m = extract32(insn, 5, 1);
2902 unsigned t1 = extract32(insn, 6, 1);
2903 unsigned ext3 = extract32(insn, 7, 3);
2904 /* unsigned cc = extract32(insn, 10, 2); */
2905 unsigned i = extract32(insn, 12, 1);
2906 unsigned ua = extract32(insn, 13, 1);
2907 unsigned rx = extract32(insn, 16, 5);
2908 unsigned rb = extract32(insn, 21, 5);
2909 unsigned rt = t1 * 32 + t0;
2910 int modify = (m ? (ua ? -1 : 1) : 0);
2911 int disp, scale;
2913 if (i == 0) {
2914 scale = (ua ? 2 : 0);
2915 disp = 0;
2916 modify = m;
2917 } else {
2918 disp = low_sextract(rx, 0, 5);
2919 scale = 0;
2920 rx = 0;
2921 modify = (m ? (ua ? -1 : 1) : 0);
2924 switch (ext3) {
2925 case 0: /* FLDW */
2926 return do_floadw(ctx, rt, rb, rx, scale, disp, modify);
2927 case 4: /* FSTW */
2928 return do_fstorew(ctx, rt, rb, rx, scale, disp, modify);
2930 return gen_illegal(ctx);
2933 static DisasJumpType trans_copr_dw(DisasContext *ctx, uint32_t insn)
2935 unsigned rt = extract32(insn, 0, 5);
2936 unsigned m = extract32(insn, 5, 1);
2937 unsigned ext4 = extract32(insn, 6, 4);
2938 /* unsigned cc = extract32(insn, 10, 2); */
2939 unsigned i = extract32(insn, 12, 1);
2940 unsigned ua = extract32(insn, 13, 1);
2941 unsigned rx = extract32(insn, 16, 5);
2942 unsigned rb = extract32(insn, 21, 5);
2943 int modify = (m ? (ua ? -1 : 1) : 0);
2944 int disp, scale;
2946 if (i == 0) {
2947 scale = (ua ? 3 : 0);
2948 disp = 0;
2949 modify = m;
2950 } else {
2951 disp = low_sextract(rx, 0, 5);
2952 scale = 0;
2953 rx = 0;
2954 modify = (m ? (ua ? -1 : 1) : 0);
2957 switch (ext4) {
2958 case 0: /* FLDD */
2959 return do_floadd(ctx, rt, rb, rx, scale, disp, modify);
2960 case 8: /* FSTD */
2961 return do_fstored(ctx, rt, rb, rx, scale, disp, modify);
2962 default:
2963 return gen_illegal(ctx);
2967 static DisasJumpType trans_cmpb(DisasContext *ctx, uint32_t insn,
2968 bool is_true, bool is_imm, bool is_dw)
2970 target_sreg disp = assemble_12(insn) * 4;
2971 unsigned n = extract32(insn, 1, 1);
2972 unsigned c = extract32(insn, 13, 3);
2973 unsigned r = extract32(insn, 21, 5);
2974 unsigned cf = c * 2 + !is_true;
2975 TCGv_reg dest, in1, in2, sv;
2976 DisasCond cond;
2978 nullify_over(ctx);
2980 if (is_imm) {
2981 in1 = load_const(ctx, low_sextract(insn, 16, 5));
2982 } else {
2983 in1 = load_gpr(ctx, extract32(insn, 16, 5));
2985 in2 = load_gpr(ctx, r);
2986 dest = get_temp(ctx);
2988 tcg_gen_sub_reg(dest, in1, in2);
2990 sv = NULL;
2991 if (c == 6) {
2992 sv = do_sub_sv(ctx, dest, in1, in2);
2995 cond = do_sub_cond(cf, dest, in1, in2, sv);
2996 return do_cbranch(ctx, disp, n, &cond);
2999 static DisasJumpType trans_addb(DisasContext *ctx, uint32_t insn,
3000 bool is_true, bool is_imm)
3002 target_sreg disp = assemble_12(insn) * 4;
3003 unsigned n = extract32(insn, 1, 1);
3004 unsigned c = extract32(insn, 13, 3);
3005 unsigned r = extract32(insn, 21, 5);
3006 unsigned cf = c * 2 + !is_true;
3007 TCGv_reg dest, in1, in2, sv, cb_msb;
3008 DisasCond cond;
3010 nullify_over(ctx);
3012 if (is_imm) {
3013 in1 = load_const(ctx, low_sextract(insn, 16, 5));
3014 } else {
3015 in1 = load_gpr(ctx, extract32(insn, 16, 5));
3017 in2 = load_gpr(ctx, r);
3018 dest = dest_gpr(ctx, r);
3019 sv = NULL;
3020 cb_msb = NULL;
3022 switch (c) {
3023 default:
3024 tcg_gen_add_reg(dest, in1, in2);
3025 break;
3026 case 4: case 5:
3027 cb_msb = get_temp(ctx);
3028 tcg_gen_movi_reg(cb_msb, 0);
3029 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3030 break;
3031 case 6:
3032 tcg_gen_add_reg(dest, in1, in2);
3033 sv = do_add_sv(ctx, dest, in1, in2);
3034 break;
3037 cond = do_cond(cf, dest, cb_msb, sv);
3038 return do_cbranch(ctx, disp, n, &cond);
3041 static DisasJumpType trans_bb(DisasContext *ctx, uint32_t insn)
3043 target_sreg disp = assemble_12(insn) * 4;
3044 unsigned n = extract32(insn, 1, 1);
3045 unsigned c = extract32(insn, 15, 1);
3046 unsigned r = extract32(insn, 16, 5);
3047 unsigned p = extract32(insn, 21, 5);
3048 unsigned i = extract32(insn, 26, 1);
3049 TCGv_reg tmp, tcg_r;
3050 DisasCond cond;
3052 nullify_over(ctx);
3054 tmp = tcg_temp_new();
3055 tcg_r = load_gpr(ctx, r);
3056 if (i) {
3057 tcg_gen_shli_reg(tmp, tcg_r, p);
3058 } else {
3059 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3062 cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
3063 tcg_temp_free(tmp);
3064 return do_cbranch(ctx, disp, n, &cond);
3067 static DisasJumpType trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
3069 target_sreg disp = assemble_12(insn) * 4;
3070 unsigned n = extract32(insn, 1, 1);
3071 unsigned c = extract32(insn, 13, 3);
3072 unsigned t = extract32(insn, 16, 5);
3073 unsigned r = extract32(insn, 21, 5);
3074 TCGv_reg dest;
3075 DisasCond cond;
3077 nullify_over(ctx);
3079 dest = dest_gpr(ctx, r);
3080 if (is_imm) {
3081 tcg_gen_movi_reg(dest, low_sextract(t, 0, 5));
3082 } else if (t == 0) {
3083 tcg_gen_movi_reg(dest, 0);
3084 } else {
3085 tcg_gen_mov_reg(dest, cpu_gr[t]);
3088 cond = do_sed_cond(c, dest);
3089 return do_cbranch(ctx, disp, n, &cond);
3092 static DisasJumpType trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
3093 const DisasInsn *di)
3095 unsigned rt = extract32(insn, 0, 5);
3096 unsigned c = extract32(insn, 13, 3);
3097 unsigned r1 = extract32(insn, 16, 5);
3098 unsigned r2 = extract32(insn, 21, 5);
3099 TCGv_reg dest;
3101 if (c) {
3102 nullify_over(ctx);
3105 dest = dest_gpr(ctx, rt);
3106 if (r1 == 0) {
3107 tcg_gen_ext32u_reg(dest, load_gpr(ctx, r2));
3108 tcg_gen_shr_reg(dest, dest, cpu_sar);
3109 } else if (r1 == r2) {
3110 TCGv_i32 t32 = tcg_temp_new_i32();
3111 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, r2));
3112 tcg_gen_rotr_i32(t32, t32, cpu_sar);
3113 tcg_gen_extu_i32_reg(dest, t32);
3114 tcg_temp_free_i32(t32);
3115 } else {
3116 TCGv_i64 t = tcg_temp_new_i64();
3117 TCGv_i64 s = tcg_temp_new_i64();
3119 tcg_gen_concat_reg_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
3120 tcg_gen_extu_reg_i64(s, cpu_sar);
3121 tcg_gen_shr_i64(t, t, s);
3122 tcg_gen_trunc_i64_reg(dest, t);
3124 tcg_temp_free_i64(t);
3125 tcg_temp_free_i64(s);
3127 save_gpr(ctx, rt, dest);
3129 /* Install the new nullification. */
3130 cond_free(&ctx->null_cond);
3131 if (c) {
3132 ctx->null_cond = do_sed_cond(c, dest);
3134 return nullify_end(ctx, DISAS_NEXT);
3137 static DisasJumpType trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
3138 const DisasInsn *di)
3140 unsigned rt = extract32(insn, 0, 5);
3141 unsigned cpos = extract32(insn, 5, 5);
3142 unsigned c = extract32(insn, 13, 3);
3143 unsigned r1 = extract32(insn, 16, 5);
3144 unsigned r2 = extract32(insn, 21, 5);
3145 unsigned sa = 31 - cpos;
3146 TCGv_reg dest, t2;
3148 if (c) {
3149 nullify_over(ctx);
3152 dest = dest_gpr(ctx, rt);
3153 t2 = load_gpr(ctx, r2);
3154 if (r1 == r2) {
3155 TCGv_i32 t32 = tcg_temp_new_i32();
3156 tcg_gen_trunc_reg_i32(t32, t2);
3157 tcg_gen_rotri_i32(t32, t32, sa);
3158 tcg_gen_extu_i32_reg(dest, t32);
3159 tcg_temp_free_i32(t32);
3160 } else if (r1 == 0) {
3161 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3162 } else {
3163 TCGv_reg t0 = tcg_temp_new();
3164 tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
3165 tcg_gen_deposit_reg(dest, t0, cpu_gr[r1], 32 - sa, sa);
3166 tcg_temp_free(t0);
3168 save_gpr(ctx, rt, dest);
3170 /* Install the new nullification. */
3171 cond_free(&ctx->null_cond);
3172 if (c) {
3173 ctx->null_cond = do_sed_cond(c, dest);
3175 return nullify_end(ctx, DISAS_NEXT);
3178 static DisasJumpType trans_extrw_sar(DisasContext *ctx, uint32_t insn,
3179 const DisasInsn *di)
3181 unsigned clen = extract32(insn, 0, 5);
3182 unsigned is_se = extract32(insn, 10, 1);
3183 unsigned c = extract32(insn, 13, 3);
3184 unsigned rt = extract32(insn, 16, 5);
3185 unsigned rr = extract32(insn, 21, 5);
3186 unsigned len = 32 - clen;
3187 TCGv_reg dest, src, tmp;
3189 if (c) {
3190 nullify_over(ctx);
3193 dest = dest_gpr(ctx, rt);
3194 src = load_gpr(ctx, rr);
3195 tmp = tcg_temp_new();
3197 /* Recall that SAR is using big-endian bit numbering. */
3198 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3199 if (is_se) {
3200 tcg_gen_sar_reg(dest, src, tmp);
3201 tcg_gen_sextract_reg(dest, dest, 0, len);
3202 } else {
3203 tcg_gen_shr_reg(dest, src, tmp);
3204 tcg_gen_extract_reg(dest, dest, 0, len);
3206 tcg_temp_free(tmp);
3207 save_gpr(ctx, rt, dest);
3209 /* Install the new nullification. */
3210 cond_free(&ctx->null_cond);
3211 if (c) {
3212 ctx->null_cond = do_sed_cond(c, dest);
3214 return nullify_end(ctx, DISAS_NEXT);
3217 static DisasJumpType trans_extrw_imm(DisasContext *ctx, uint32_t insn,
3218 const DisasInsn *di)
3220 unsigned clen = extract32(insn, 0, 5);
3221 unsigned pos = extract32(insn, 5, 5);
3222 unsigned is_se = extract32(insn, 10, 1);
3223 unsigned c = extract32(insn, 13, 3);
3224 unsigned rt = extract32(insn, 16, 5);
3225 unsigned rr = extract32(insn, 21, 5);
3226 unsigned len = 32 - clen;
3227 unsigned cpos = 31 - pos;
3228 TCGv_reg dest, src;
3230 if (c) {
3231 nullify_over(ctx);
3234 dest = dest_gpr(ctx, rt);
3235 src = load_gpr(ctx, rr);
3236 if (is_se) {
3237 tcg_gen_sextract_reg(dest, src, cpos, len);
3238 } else {
3239 tcg_gen_extract_reg(dest, src, cpos, len);
3241 save_gpr(ctx, rt, dest);
3243 /* Install the new nullification. */
3244 cond_free(&ctx->null_cond);
3245 if (c) {
3246 ctx->null_cond = do_sed_cond(c, dest);
3248 return nullify_end(ctx, DISAS_NEXT);
3251 static const DisasInsn table_sh_ex[] = {
3252 { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
3253 { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
3254 { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
3255 { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
3258 static DisasJumpType trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
3259 const DisasInsn *di)
3261 unsigned clen = extract32(insn, 0, 5);
3262 unsigned cpos = extract32(insn, 5, 5);
3263 unsigned nz = extract32(insn, 10, 1);
3264 unsigned c = extract32(insn, 13, 3);
3265 target_sreg val = low_sextract(insn, 16, 5);
3266 unsigned rt = extract32(insn, 21, 5);
3267 unsigned len = 32 - clen;
3268 target_sreg mask0, mask1;
3269 TCGv_reg dest;
3271 if (c) {
3272 nullify_over(ctx);
3274 if (cpos + len > 32) {
3275 len = 32 - cpos;
3278 dest = dest_gpr(ctx, rt);
3279 mask0 = deposit64(0, cpos, len, val);
3280 mask1 = deposit64(-1, cpos, len, val);
3282 if (nz) {
3283 TCGv_reg src = load_gpr(ctx, rt);
3284 if (mask1 != -1) {
3285 tcg_gen_andi_reg(dest, src, mask1);
3286 src = dest;
3288 tcg_gen_ori_reg(dest, src, mask0);
3289 } else {
3290 tcg_gen_movi_reg(dest, mask0);
3292 save_gpr(ctx, rt, dest);
3294 /* Install the new nullification. */
3295 cond_free(&ctx->null_cond);
3296 if (c) {
3297 ctx->null_cond = do_sed_cond(c, dest);
3299 return nullify_end(ctx, DISAS_NEXT);
3302 static DisasJumpType trans_depw_imm(DisasContext *ctx, uint32_t insn,
3303 const DisasInsn *di)
3305 unsigned clen = extract32(insn, 0, 5);
3306 unsigned cpos = extract32(insn, 5, 5);
3307 unsigned nz = extract32(insn, 10, 1);
3308 unsigned c = extract32(insn, 13, 3);
3309 unsigned rr = extract32(insn, 16, 5);
3310 unsigned rt = extract32(insn, 21, 5);
3311 unsigned rs = nz ? rt : 0;
3312 unsigned len = 32 - clen;
3313 TCGv_reg dest, val;
3315 if (c) {
3316 nullify_over(ctx);
3318 if (cpos + len > 32) {
3319 len = 32 - cpos;
3322 dest = dest_gpr(ctx, rt);
3323 val = load_gpr(ctx, rr);
3324 if (rs == 0) {
3325 tcg_gen_deposit_z_reg(dest, val, cpos, len);
3326 } else {
3327 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, cpos, len);
3329 save_gpr(ctx, rt, dest);
3331 /* Install the new nullification. */
3332 cond_free(&ctx->null_cond);
3333 if (c) {
3334 ctx->null_cond = do_sed_cond(c, dest);
3336 return nullify_end(ctx, DISAS_NEXT);
3339 static DisasJumpType trans_depw_sar(DisasContext *ctx, uint32_t insn,
3340 const DisasInsn *di)
3342 unsigned clen = extract32(insn, 0, 5);
3343 unsigned nz = extract32(insn, 10, 1);
3344 unsigned i = extract32(insn, 12, 1);
3345 unsigned c = extract32(insn, 13, 3);
3346 unsigned rt = extract32(insn, 21, 5);
3347 unsigned rs = nz ? rt : 0;
3348 unsigned len = 32 - clen;
3349 TCGv_reg val, mask, tmp, shift, dest;
3350 unsigned msb = 1U << (len - 1);
3352 if (c) {
3353 nullify_over(ctx);
3356 if (i) {
3357 val = load_const(ctx, low_sextract(insn, 16, 5));
3358 } else {
3359 val = load_gpr(ctx, extract32(insn, 16, 5));
3361 dest = dest_gpr(ctx, rt);
3362 shift = tcg_temp_new();
3363 tmp = tcg_temp_new();
3365 /* Convert big-endian bit numbering in SAR to left-shift. */
3366 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3368 mask = tcg_const_reg(msb + (msb - 1));
3369 tcg_gen_and_reg(tmp, val, mask);
3370 if (rs) {
3371 tcg_gen_shl_reg(mask, mask, shift);
3372 tcg_gen_shl_reg(tmp, tmp, shift);
3373 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3374 tcg_gen_or_reg(dest, dest, tmp);
3375 } else {
3376 tcg_gen_shl_reg(dest, tmp, shift);
3378 tcg_temp_free(shift);
3379 tcg_temp_free(mask);
3380 tcg_temp_free(tmp);
3381 save_gpr(ctx, rt, dest);
3383 /* Install the new nullification. */
3384 cond_free(&ctx->null_cond);
3385 if (c) {
3386 ctx->null_cond = do_sed_cond(c, dest);
3388 return nullify_end(ctx, DISAS_NEXT);
3391 static const DisasInsn table_depw[] = {
3392 { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
3393 { 0xd4000800u, 0xfc001800u, trans_depw_imm },
3394 { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
3397 static DisasJumpType trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
3399 unsigned n = extract32(insn, 1, 1);
3400 unsigned b = extract32(insn, 21, 5);
3401 target_sreg disp = assemble_17(insn);
3403 /* unsigned s = low_uextract(insn, 13, 3); */
3404 /* ??? It seems like there should be a good way of using
3405 "be disp(sr2, r0)", the canonical gateway entry mechanism
3406 to our advantage. But that appears to be inconvenient to
3407 manage along side branch delay slots. Therefore we handle
3408 entry into the gateway page via absolute address. */
3410 /* Since we don't implement spaces, just branch. Do notice the special
3411 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3412 goto_tb to the TB containing the syscall. */
3413 if (b == 0) {
3414 return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
3415 } else {
3416 TCGv_reg tmp = get_temp(ctx);
3417 tcg_gen_addi_reg(tmp, load_gpr(ctx, b), disp);
3418 return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
3422 static DisasJumpType trans_bl(DisasContext *ctx, uint32_t insn,
3423 const DisasInsn *di)
3425 unsigned n = extract32(insn, 1, 1);
3426 unsigned link = extract32(insn, 21, 5);
3427 target_sreg disp = assemble_17(insn);
3429 return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
3432 static DisasJumpType trans_bl_long(DisasContext *ctx, uint32_t insn,
3433 const DisasInsn *di)
3435 unsigned n = extract32(insn, 1, 1);
3436 target_sreg disp = assemble_22(insn);
3438 return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
3441 static DisasJumpType trans_blr(DisasContext *ctx, uint32_t insn,
3442 const DisasInsn *di)
3444 unsigned n = extract32(insn, 1, 1);
3445 unsigned rx = extract32(insn, 16, 5);
3446 unsigned link = extract32(insn, 21, 5);
3447 TCGv_reg tmp = get_temp(ctx);
3449 tcg_gen_shli_reg(tmp, load_gpr(ctx, rx), 3);
3450 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3451 return do_ibranch(ctx, tmp, link, n);
3454 static DisasJumpType trans_bv(DisasContext *ctx, uint32_t insn,
3455 const DisasInsn *di)
3457 unsigned n = extract32(insn, 1, 1);
3458 unsigned rx = extract32(insn, 16, 5);
3459 unsigned rb = extract32(insn, 21, 5);
3460 TCGv_reg dest;
3462 if (rx == 0) {
3463 dest = load_gpr(ctx, rb);
3464 } else {
3465 dest = get_temp(ctx);
3466 tcg_gen_shli_reg(dest, load_gpr(ctx, rx), 3);
3467 tcg_gen_add_reg(dest, dest, load_gpr(ctx, rb));
3469 return do_ibranch(ctx, dest, 0, n);
3472 static DisasJumpType trans_bve(DisasContext *ctx, uint32_t insn,
3473 const DisasInsn *di)
3475 unsigned n = extract32(insn, 1, 1);
3476 unsigned rb = extract32(insn, 21, 5);
3477 unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3479 return do_ibranch(ctx, load_gpr(ctx, rb), link, n);
3482 static const DisasInsn table_branch[] = {
3483 { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3484 { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3485 { 0xe8004000u, 0xfc00fffdu, trans_blr },
3486 { 0xe800c000u, 0xfc00fffdu, trans_bv },
3487 { 0xe800d000u, 0xfc00dffcu, trans_bve },
3490 static DisasJumpType trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3491 const DisasInsn *di)
3493 unsigned rt = extract32(insn, 0, 5);
3494 unsigned ra = extract32(insn, 21, 5);
3495 return do_fop_wew(ctx, rt, ra, di->f.wew);
3498 static DisasJumpType trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3499 const DisasInsn *di)
3501 unsigned rt = assemble_rt64(insn);
3502 unsigned ra = assemble_ra64(insn);
3503 return do_fop_wew(ctx, rt, ra, di->f.wew);
3506 static DisasJumpType trans_fop_ded(DisasContext *ctx, uint32_t insn,
3507 const DisasInsn *di)
3509 unsigned rt = extract32(insn, 0, 5);
3510 unsigned ra = extract32(insn, 21, 5);
3511 return do_fop_ded(ctx, rt, ra, di->f.ded);
3514 static DisasJumpType trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
3515 const DisasInsn *di)
3517 unsigned rt = extract32(insn, 0, 5);
3518 unsigned ra = extract32(insn, 21, 5);
3519 return do_fop_wed(ctx, rt, ra, di->f.wed);
3522 static DisasJumpType trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
3523 const DisasInsn *di)
3525 unsigned rt = assemble_rt64(insn);
3526 unsigned ra = extract32(insn, 21, 5);
3527 return do_fop_wed(ctx, rt, ra, di->f.wed);
3530 static DisasJumpType trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
3531 const DisasInsn *di)
3533 unsigned rt = extract32(insn, 0, 5);
3534 unsigned ra = extract32(insn, 21, 5);
3535 return do_fop_dew(ctx, rt, ra, di->f.dew);
3538 static DisasJumpType trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
3539 const DisasInsn *di)
3541 unsigned rt = extract32(insn, 0, 5);
3542 unsigned ra = assemble_ra64(insn);
3543 return do_fop_dew(ctx, rt, ra, di->f.dew);
3546 static DisasJumpType trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
3547 const DisasInsn *di)
3549 unsigned rt = extract32(insn, 0, 5);
3550 unsigned rb = extract32(insn, 16, 5);
3551 unsigned ra = extract32(insn, 21, 5);
3552 return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3555 static DisasJumpType trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
3556 const DisasInsn *di)
3558 unsigned rt = assemble_rt64(insn);
3559 unsigned rb = assemble_rb64(insn);
3560 unsigned ra = assemble_ra64(insn);
3561 return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3564 static DisasJumpType trans_fop_dedd(DisasContext *ctx, uint32_t insn,
3565 const DisasInsn *di)
3567 unsigned rt = extract32(insn, 0, 5);
3568 unsigned rb = extract32(insn, 16, 5);
3569 unsigned ra = extract32(insn, 21, 5);
3570 return do_fop_dedd(ctx, rt, ra, rb, di->f.dedd);
3573 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3575 tcg_gen_mov_i32(dst, src);
3578 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3580 tcg_gen_mov_i64(dst, src);
3583 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3585 tcg_gen_andi_i32(dst, src, INT32_MAX);
3588 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3590 tcg_gen_andi_i64(dst, src, INT64_MAX);
3593 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3595 tcg_gen_xori_i32(dst, src, INT32_MIN);
3598 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3600 tcg_gen_xori_i64(dst, src, INT64_MIN);
3603 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3605 tcg_gen_ori_i32(dst, src, INT32_MIN);
3608 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3610 tcg_gen_ori_i64(dst, src, INT64_MIN);
3613 static DisasJumpType do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
3614 unsigned y, unsigned c)
3616 TCGv_i32 ta, tb, tc, ty;
3618 nullify_over(ctx);
3620 ta = load_frw0_i32(ra);
3621 tb = load_frw0_i32(rb);
3622 ty = tcg_const_i32(y);
3623 tc = tcg_const_i32(c);
3625 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3627 tcg_temp_free_i32(ta);
3628 tcg_temp_free_i32(tb);
3629 tcg_temp_free_i32(ty);
3630 tcg_temp_free_i32(tc);
3632 return nullify_end(ctx, DISAS_NEXT);
3635 static DisasJumpType trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
3636 const DisasInsn *di)
3638 unsigned c = extract32(insn, 0, 5);
3639 unsigned y = extract32(insn, 13, 3);
3640 unsigned rb = extract32(insn, 16, 5);
3641 unsigned ra = extract32(insn, 21, 5);
3642 return do_fcmp_s(ctx, ra, rb, y, c);
3645 static DisasJumpType trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
3646 const DisasInsn *di)
3648 unsigned c = extract32(insn, 0, 5);
3649 unsigned y = extract32(insn, 13, 3);
3650 unsigned rb = assemble_rb64(insn);
3651 unsigned ra = assemble_ra64(insn);
3652 return do_fcmp_s(ctx, ra, rb, y, c);
3655 static DisasJumpType trans_fcmp_d(DisasContext *ctx, uint32_t insn,
3656 const DisasInsn *di)
3658 unsigned c = extract32(insn, 0, 5);
3659 unsigned y = extract32(insn, 13, 3);
3660 unsigned rb = extract32(insn, 16, 5);
3661 unsigned ra = extract32(insn, 21, 5);
3662 TCGv_i64 ta, tb;
3663 TCGv_i32 tc, ty;
3665 nullify_over(ctx);
3667 ta = load_frd0(ra);
3668 tb = load_frd0(rb);
3669 ty = tcg_const_i32(y);
3670 tc = tcg_const_i32(c);
3672 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3674 tcg_temp_free_i64(ta);
3675 tcg_temp_free_i64(tb);
3676 tcg_temp_free_i32(ty);
3677 tcg_temp_free_i32(tc);
3679 return nullify_end(ctx, DISAS_NEXT);
3682 static DisasJumpType trans_ftest_t(DisasContext *ctx, uint32_t insn,
3683 const DisasInsn *di)
3685 unsigned y = extract32(insn, 13, 3);
3686 unsigned cbit = (y ^ 1) - 1;
3687 TCGv_reg t;
3689 nullify_over(ctx);
3691 t = tcg_temp_new();
3692 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3693 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3694 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3695 tcg_temp_free(t);
3697 return nullify_end(ctx, DISAS_NEXT);
3700 static DisasJumpType trans_ftest_q(DisasContext *ctx, uint32_t insn,
3701 const DisasInsn *di)
3703 unsigned c = extract32(insn, 0, 5);
3704 int mask;
3705 bool inv = false;
3706 TCGv_reg t;
3708 nullify_over(ctx);
3710 t = tcg_temp_new();
3711 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3713 switch (c) {
3714 case 0: /* simple */
3715 tcg_gen_andi_reg(t, t, 0x4000000);
3716 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3717 goto done;
3718 case 2: /* rej */
3719 inv = true;
3720 /* fallthru */
3721 case 1: /* acc */
3722 mask = 0x43ff800;
3723 break;
3724 case 6: /* rej8 */
3725 inv = true;
3726 /* fallthru */
3727 case 5: /* acc8 */
3728 mask = 0x43f8000;
3729 break;
3730 case 9: /* acc6 */
3731 mask = 0x43e0000;
3732 break;
3733 case 13: /* acc4 */
3734 mask = 0x4380000;
3735 break;
3736 case 17: /* acc2 */
3737 mask = 0x4200000;
3738 break;
3739 default:
3740 return gen_illegal(ctx);
3742 if (inv) {
3743 TCGv_reg c = load_const(ctx, mask);
3744 tcg_gen_or_reg(t, t, c);
3745 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3746 } else {
3747 tcg_gen_andi_reg(t, t, mask);
3748 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3750 done:
3751 return nullify_end(ctx, DISAS_NEXT);
3754 static DisasJumpType trans_xmpyu(DisasContext *ctx, uint32_t insn,
3755 const DisasInsn *di)
3757 unsigned rt = extract32(insn, 0, 5);
3758 unsigned rb = assemble_rb64(insn);
3759 unsigned ra = assemble_ra64(insn);
3760 TCGv_i64 a, b;
3762 nullify_over(ctx);
3764 a = load_frw0_i64(ra);
3765 b = load_frw0_i64(rb);
3766 tcg_gen_mul_i64(a, a, b);
3767 save_frd(rt, a);
3768 tcg_temp_free_i64(a);
3769 tcg_temp_free_i64(b);
3771 return nullify_end(ctx, DISAS_NEXT);
3774 #define FOP_DED trans_fop_ded, .f.ded
3775 #define FOP_DEDD trans_fop_dedd, .f.dedd
3777 #define FOP_WEW trans_fop_wew_0c, .f.wew
3778 #define FOP_DEW trans_fop_dew_0c, .f.dew
3779 #define FOP_WED trans_fop_wed_0c, .f.wed
3780 #define FOP_WEWW trans_fop_weww_0c, .f.weww
3782 static const DisasInsn table_float_0c[] = {
3783 /* floating point class zero */
3784 { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
3785 { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
3786 { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
3787 { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
3788 { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
3789 { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
3791 { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3792 { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3793 { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3794 { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3795 { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3796 { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3798 /* floating point class three */
3799 { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
3800 { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
3801 { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
3802 { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
3804 { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3805 { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3806 { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3807 { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3809 /* floating point class one */
3810 /* float/float */
3811 { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
3812 { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
3813 /* int/float */
3814 { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
3815 { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
3816 { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
3817 { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3818 /* float/int */
3819 { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
3820 { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
3821 { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
3822 { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3823 /* float/int truncate */
3824 { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
3825 { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
3826 { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
3827 { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3828 /* uint/float */
3829 { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
3830 { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
3831 { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
3832 { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3833 /* float/uint */
3834 { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
3835 { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
3836 { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
3837 { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3838 /* float/uint truncate */
3839 { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
3840 { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
3841 { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
3842 { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3844 /* floating point class two */
3845 { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
3846 { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
3847 { 0x30002420, 0xffffffe0, trans_ftest_q },
3848 { 0x30000420, 0xffff1fff, trans_ftest_t },
3850 /* FID. Note that ra == rt == 0, which via fcpy puts 0 into fr0.
3851 This is machine/revision == 0, which is reserved for simulator. */
3852 { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
3855 #undef FOP_WEW
3856 #undef FOP_DEW
3857 #undef FOP_WED
3858 #undef FOP_WEWW
3859 #define FOP_WEW trans_fop_wew_0e, .f.wew
3860 #define FOP_DEW trans_fop_dew_0e, .f.dew
3861 #define FOP_WED trans_fop_wed_0e, .f.wed
3862 #define FOP_WEWW trans_fop_weww_0e, .f.weww
3864 static const DisasInsn table_float_0e[] = {
3865 /* floating point class zero */
3866 { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
3867 { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
3868 { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
3869 { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
3870 { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
3871 { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
3873 { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3874 { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
3875 { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
3876 { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
3877 { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
3878 { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
3880 /* floating point class three */
3881 { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
3882 { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
3883 { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
3884 { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
3886 { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
3887 { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
3888 { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
3889 { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
3891 { 0x38004700, 0xfc00ef60, trans_xmpyu },
3893 /* floating point class one */
3894 /* float/float */
3895 { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
3896 { 0x38002200, 0xfc1fffc0, FOP_DEW = gen_helper_fcnv_s_d },
3897 /* int/float */
3898 { 0x38008200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_w_s },
3899 { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
3900 { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
3901 { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
3902 /* float/int */
3903 { 0x38010200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_w },
3904 { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
3905 { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
3906 { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
3907 /* float/int truncate */
3908 { 0x38018200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_w },
3909 { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
3910 { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
3911 { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
3912 /* uint/float */
3913 { 0x38028200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_uw_s },
3914 { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
3915 { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
3916 { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
3917 /* float/uint */
3918 { 0x38030200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_uw },
3919 { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
3920 { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
3921 { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
3922 /* float/uint truncate */
3923 { 0x38038200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_uw },
3924 { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
3925 { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
3926 { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
3928 /* floating point class two */
3929 { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
3930 { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
3933 #undef FOP_WEW
3934 #undef FOP_DEW
3935 #undef FOP_WED
3936 #undef FOP_WEWW
3937 #undef FOP_DED
3938 #undef FOP_DEDD
3940 /* Convert the fmpyadd single-precision register encodings to standard. */
3941 static inline int fmpyadd_s_reg(unsigned r)
3943 return (r & 16) * 2 + 16 + (r & 15);
3946 static DisasJumpType trans_fmpyadd(DisasContext *ctx,
3947 uint32_t insn, bool is_sub)
3949 unsigned tm = extract32(insn, 0, 5);
3950 unsigned f = extract32(insn, 5, 1);
3951 unsigned ra = extract32(insn, 6, 5);
3952 unsigned ta = extract32(insn, 11, 5);
3953 unsigned rm2 = extract32(insn, 16, 5);
3954 unsigned rm1 = extract32(insn, 21, 5);
3956 nullify_over(ctx);
3958 /* Independent multiply & add/sub, with undefined behaviour
3959 if outputs overlap inputs. */
3960 if (f == 0) {
3961 tm = fmpyadd_s_reg(tm);
3962 ra = fmpyadd_s_reg(ra);
3963 ta = fmpyadd_s_reg(ta);
3964 rm2 = fmpyadd_s_reg(rm2);
3965 rm1 = fmpyadd_s_reg(rm1);
3966 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3967 do_fop_weww(ctx, ta, ta, ra,
3968 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3969 } else {
3970 do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
3971 do_fop_dedd(ctx, ta, ta, ra,
3972 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3975 return nullify_end(ctx, DISAS_NEXT);
3978 static DisasJumpType trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
3979 const DisasInsn *di)
3981 unsigned rt = assemble_rt64(insn);
3982 unsigned neg = extract32(insn, 5, 1);
3983 unsigned rm1 = assemble_ra64(insn);
3984 unsigned rm2 = assemble_rb64(insn);
3985 unsigned ra3 = assemble_rc64(insn);
3986 TCGv_i32 a, b, c;
3988 nullify_over(ctx);
3989 a = load_frw0_i32(rm1);
3990 b = load_frw0_i32(rm2);
3991 c = load_frw0_i32(ra3);
3993 if (neg) {
3994 gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
3995 } else {
3996 gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
3999 tcg_temp_free_i32(b);
4000 tcg_temp_free_i32(c);
4001 save_frw_i32(rt, a);
4002 tcg_temp_free_i32(a);
4003 return nullify_end(ctx, DISAS_NEXT);
4006 static DisasJumpType trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
4007 const DisasInsn *di)
4009 unsigned rt = extract32(insn, 0, 5);
4010 unsigned neg = extract32(insn, 5, 1);
4011 unsigned rm1 = extract32(insn, 21, 5);
4012 unsigned rm2 = extract32(insn, 16, 5);
4013 unsigned ra3 = assemble_rc64(insn);
4014 TCGv_i64 a, b, c;
4016 nullify_over(ctx);
4017 a = load_frd0(rm1);
4018 b = load_frd0(rm2);
4019 c = load_frd0(ra3);
4021 if (neg) {
4022 gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
4023 } else {
4024 gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
4027 tcg_temp_free_i64(b);
4028 tcg_temp_free_i64(c);
4029 save_frd(rt, a);
4030 tcg_temp_free_i64(a);
4031 return nullify_end(ctx, DISAS_NEXT);
4034 static const DisasInsn table_fp_fused[] = {
4035 { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
4036 { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
4039 static DisasJumpType translate_table_int(DisasContext *ctx, uint32_t insn,
4040 const DisasInsn table[], size_t n)
4042 size_t i;
4043 for (i = 0; i < n; ++i) {
4044 if ((insn & table[i].mask) == table[i].insn) {
4045 return table[i].trans(ctx, insn, &table[i]);
4048 return gen_illegal(ctx);
4051 #define translate_table(ctx, insn, table) \
4052 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
4054 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
4056 uint32_t opc = extract32(insn, 26, 6);
4058 switch (opc) {
4059 case 0x00: /* system op */
4060 return translate_table(ctx, insn, table_system);
4061 case 0x01:
4062 return translate_table(ctx, insn, table_mem_mgmt);
4063 case 0x02:
4064 return translate_table(ctx, insn, table_arith_log);
4065 case 0x03:
4066 return translate_table(ctx, insn, table_index_mem);
4067 case 0x06:
4068 return trans_fmpyadd(ctx, insn, false);
4069 case 0x08:
4070 return trans_ldil(ctx, insn);
4071 case 0x09:
4072 return trans_copr_w(ctx, insn);
4073 case 0x0A:
4074 return trans_addil(ctx, insn);
4075 case 0x0B:
4076 return trans_copr_dw(ctx, insn);
4077 case 0x0C:
4078 return translate_table(ctx, insn, table_float_0c);
4079 case 0x0D:
4080 return trans_ldo(ctx, insn);
4081 case 0x0E:
4082 return translate_table(ctx, insn, table_float_0e);
4084 case 0x10:
4085 return trans_load(ctx, insn, false, MO_UB);
4086 case 0x11:
4087 return trans_load(ctx, insn, false, MO_TEUW);
4088 case 0x12:
4089 return trans_load(ctx, insn, false, MO_TEUL);
4090 case 0x13:
4091 return trans_load(ctx, insn, true, MO_TEUL);
4092 case 0x16:
4093 return trans_fload_mod(ctx, insn);
4094 case 0x17:
4095 return trans_load_w(ctx, insn);
4096 case 0x18:
4097 return trans_store(ctx, insn, false, MO_UB);
4098 case 0x19:
4099 return trans_store(ctx, insn, false, MO_TEUW);
4100 case 0x1A:
4101 return trans_store(ctx, insn, false, MO_TEUL);
4102 case 0x1B:
4103 return trans_store(ctx, insn, true, MO_TEUL);
4104 case 0x1E:
4105 return trans_fstore_mod(ctx, insn);
4106 case 0x1F:
4107 return trans_store_w(ctx, insn);
4109 case 0x20:
4110 return trans_cmpb(ctx, insn, true, false, false);
4111 case 0x21:
4112 return trans_cmpb(ctx, insn, true, true, false);
4113 case 0x22:
4114 return trans_cmpb(ctx, insn, false, false, false);
4115 case 0x23:
4116 return trans_cmpb(ctx, insn, false, true, false);
4117 case 0x24:
4118 return trans_cmpiclr(ctx, insn);
4119 case 0x25:
4120 return trans_subi(ctx, insn);
4121 case 0x26:
4122 return trans_fmpyadd(ctx, insn, true);
4123 case 0x27:
4124 return trans_cmpb(ctx, insn, true, false, true);
4125 case 0x28:
4126 return trans_addb(ctx, insn, true, false);
4127 case 0x29:
4128 return trans_addb(ctx, insn, true, true);
4129 case 0x2A:
4130 return trans_addb(ctx, insn, false, false);
4131 case 0x2B:
4132 return trans_addb(ctx, insn, false, true);
4133 case 0x2C:
4134 case 0x2D:
4135 return trans_addi(ctx, insn);
4136 case 0x2E:
4137 return translate_table(ctx, insn, table_fp_fused);
4138 case 0x2F:
4139 return trans_cmpb(ctx, insn, false, false, true);
4141 case 0x30:
4142 case 0x31:
4143 return trans_bb(ctx, insn);
4144 case 0x32:
4145 return trans_movb(ctx, insn, false);
4146 case 0x33:
4147 return trans_movb(ctx, insn, true);
4148 case 0x34:
4149 return translate_table(ctx, insn, table_sh_ex);
4150 case 0x35:
4151 return translate_table(ctx, insn, table_depw);
4152 case 0x38:
4153 return trans_be(ctx, insn, false);
4154 case 0x39:
4155 return trans_be(ctx, insn, true);
4156 case 0x3A:
4157 return translate_table(ctx, insn, table_branch);
4159 case 0x04: /* spopn */
4160 case 0x05: /* diag */
4161 case 0x0F: /* product specific */
4162 break;
4164 case 0x07: /* unassigned */
4165 case 0x15: /* unassigned */
4166 case 0x1D: /* unassigned */
4167 case 0x37: /* unassigned */
4168 case 0x3F: /* unassigned */
4169 default:
4170 break;
4172 return gen_illegal(ctx);
4175 static int hppa_tr_init_disas_context(DisasContextBase *dcbase,
4176 CPUState *cs, int max_insns)
4178 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4179 int bound;
4181 ctx->cs = cs;
4183 #ifdef CONFIG_USER_ONLY
4184 ctx->privilege = MMU_USER_IDX;
4185 ctx->mmu_idx = MMU_USER_IDX;
4186 #else
4187 ctx->privilege = ctx->base.pc_first & 3;
4188 ctx->mmu_idx = (ctx->base.tb->flags & PSW_D
4189 ? ctx->privilege : MMU_PHYS_IDX);
4190 #endif
4191 ctx->iaoq_f = ctx->base.pc_first;
4192 ctx->iaoq_b = ctx->base.tb->cs_base;
4193 ctx->base.pc_first &= -4;
4195 ctx->iaoq_n = -1;
4196 ctx->iaoq_n_var = NULL;
4198 /* Bound the number of instructions by those left on the page. */
4199 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4200 bound = MIN(max_insns, bound);
4202 ctx->ntemps = 0;
4203 memset(ctx->temps, 0, sizeof(ctx->temps));
4205 return bound;
4208 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4210 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4212 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4213 ctx->null_cond = cond_make_f();
4214 ctx->psw_n_nonzero = false;
4215 if (ctx->base.tb->flags & PSW_N) {
4216 ctx->null_cond.c = TCG_COND_ALWAYS;
4217 ctx->psw_n_nonzero = true;
4219 ctx->null_lab = NULL;
4222 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4224 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4226 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4229 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
4230 const CPUBreakpoint *bp)
4232 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4234 ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG);
4235 ctx->base.pc_next = (ctx->iaoq_f & -4) + 4;
4236 return true;
4239 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4241 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4242 CPUHPPAState *env = cs->env_ptr;
4243 DisasJumpType ret;
4244 int i, n;
4246 /* Execute one insn. */
4247 #ifdef CONFIG_USER_ONLY
4248 if (ctx->iaoq_f < TARGET_PAGE_SIZE) {
4249 ret = do_page_zero(ctx);
4250 assert(ret != DISAS_NEXT);
4251 } else
4252 #endif
4254 /* Always fetch the insn, even if nullified, so that we check
4255 the page permissions for execute. */
4256 uint32_t insn = cpu_ldl_code(env, ctx->iaoq_f & -4);
4258 /* Set up the IA queue for the next insn.
4259 This will be overwritten by a branch. */
4260 if (ctx->iaoq_b == -1) {
4261 ctx->iaoq_n = -1;
4262 ctx->iaoq_n_var = get_temp(ctx);
4263 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4264 } else {
4265 ctx->iaoq_n = ctx->iaoq_b + 4;
4266 ctx->iaoq_n_var = NULL;
4269 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4270 ctx->null_cond.c = TCG_COND_NEVER;
4271 ret = DISAS_NEXT;
4272 } else {
4273 ret = translate_one(ctx, insn);
4274 assert(ctx->null_lab == NULL);
4278 /* Free any temporaries allocated. */
4279 for (i = 0, n = ctx->ntemps; i < n; ++i) {
4280 tcg_temp_free(ctx->temps[i]);
4281 ctx->temps[i] = NULL;
4283 ctx->ntemps = 0;
4285 /* Advance the insn queue. Note that this check also detects
4286 a priority change within the instruction queue. */
4287 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4288 if (ctx->null_cond.c == TCG_COND_NEVER
4289 || ctx->null_cond.c == TCG_COND_ALWAYS) {
4290 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4291 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4292 ret = DISAS_NORETURN;
4293 } else {
4294 ret = DISAS_IAQ_N_STALE;
4297 ctx->iaoq_f = ctx->iaoq_b;
4298 ctx->iaoq_b = ctx->iaoq_n;
4299 ctx->base.is_jmp = ret;
4301 if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
4302 return;
4304 if (ctx->iaoq_f == -1) {
4305 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4306 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4307 nullify_save(ctx);
4308 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
4309 } else if (ctx->iaoq_b == -1) {
4310 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4314 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4316 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4317 DisasJumpType is_jmp = ctx->base.is_jmp;
4319 switch (is_jmp) {
4320 case DISAS_NORETURN:
4321 break;
4322 case DISAS_TOO_MANY:
4323 case DISAS_IAQ_N_STALE:
4324 case DISAS_IAQ_N_STALE_EXIT:
4325 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4326 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4327 nullify_save(ctx);
4328 /* FALLTHRU */
4329 case DISAS_IAQ_N_UPDATED:
4330 if (ctx->base.singlestep_enabled) {
4331 gen_excp_1(EXCP_DEBUG);
4332 } else if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
4333 tcg_gen_exit_tb(0);
4334 } else {
4335 tcg_gen_lookup_and_goto_ptr();
4337 break;
4338 default:
4339 g_assert_not_reached();
4342 /* We don't actually use this during normal translation,
4343 but we should interact with the generic main loop. */
4344 ctx->base.pc_next = ctx->base.pc_first + 4 * ctx->base.num_insns;
4347 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4349 target_ureg pc = dcbase->pc_first;
4351 #ifdef CONFIG_USER_ONLY
4352 switch (pc) {
4353 case 0x00:
4354 qemu_log("IN:\n0x00000000: (null)\n");
4355 return;
4356 case 0xb0:
4357 qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
4358 return;
4359 case 0xe0:
4360 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
4361 return;
4362 case 0x100:
4363 qemu_log("IN:\n0x00000100: syscall\n");
4364 return;
4366 #endif
4368 qemu_log("IN: %s\n", lookup_symbol(pc));
4369 log_target_disas(cs, pc, dcbase->tb->size);
4372 static const TranslatorOps hppa_tr_ops = {
4373 .init_disas_context = hppa_tr_init_disas_context,
4374 .tb_start = hppa_tr_tb_start,
4375 .insn_start = hppa_tr_insn_start,
4376 .breakpoint_check = hppa_tr_breakpoint_check,
4377 .translate_insn = hppa_tr_translate_insn,
4378 .tb_stop = hppa_tr_tb_stop,
4379 .disas_log = hppa_tr_disas_log,
4382 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4385 DisasContext ctx;
4386 translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
4389 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4390 target_ulong *data)
4392 env->iaoq_f = data[0];
4393 if (data[1] != -1) {
4394 env->iaoq_b = data[1];
4396 /* Since we were executing the instruction at IAOQ_F, and took some
4397 sort of action that provoked the cpu_restore_state, we can infer
4398 that the instruction was not nullified. */
4399 env->psw_n = 0;