memory: unify loops to sync dirty log bitmap
[qemu/ar7.git] / target / hppa / translate.c
blob6499b392f9e273d5c628a1c1c0c7d2371fb43b60
1 /*
2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
33 /* Since we have a distinction between register size and address size,
34 we need to redefine all of these. */
36 #undef TCGv
37 #undef tcg_temp_new
38 #undef tcg_global_reg_new
39 #undef tcg_global_mem_new
40 #undef tcg_temp_local_new
41 #undef tcg_temp_free
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #define tcg_temp_free_tl tcg_temp_free_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
49 #else
50 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
51 #endif
52 #else
53 #define TCGv_tl TCGv_i32
54 #define tcg_temp_new_tl tcg_temp_new_i32
55 #define tcg_temp_free_tl tcg_temp_free_i32
56 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
57 #endif
59 #if TARGET_REGISTER_BITS == 64
60 #define TCGv_reg TCGv_i64
62 #define tcg_temp_new tcg_temp_new_i64
63 #define tcg_global_reg_new tcg_global_reg_new_i64
64 #define tcg_global_mem_new tcg_global_mem_new_i64
65 #define tcg_temp_local_new tcg_temp_local_new_i64
66 #define tcg_temp_free tcg_temp_free_i64
68 #define tcg_gen_movi_reg tcg_gen_movi_i64
69 #define tcg_gen_mov_reg tcg_gen_mov_i64
70 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
71 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
72 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
73 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
74 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
75 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
76 #define tcg_gen_ld_reg tcg_gen_ld_i64
77 #define tcg_gen_st8_reg tcg_gen_st8_i64
78 #define tcg_gen_st16_reg tcg_gen_st16_i64
79 #define tcg_gen_st32_reg tcg_gen_st32_i64
80 #define tcg_gen_st_reg tcg_gen_st_i64
81 #define tcg_gen_add_reg tcg_gen_add_i64
82 #define tcg_gen_addi_reg tcg_gen_addi_i64
83 #define tcg_gen_sub_reg tcg_gen_sub_i64
84 #define tcg_gen_neg_reg tcg_gen_neg_i64
85 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
86 #define tcg_gen_subi_reg tcg_gen_subi_i64
87 #define tcg_gen_and_reg tcg_gen_and_i64
88 #define tcg_gen_andi_reg tcg_gen_andi_i64
89 #define tcg_gen_or_reg tcg_gen_or_i64
90 #define tcg_gen_ori_reg tcg_gen_ori_i64
91 #define tcg_gen_xor_reg tcg_gen_xor_i64
92 #define tcg_gen_xori_reg tcg_gen_xori_i64
93 #define tcg_gen_not_reg tcg_gen_not_i64
94 #define tcg_gen_shl_reg tcg_gen_shl_i64
95 #define tcg_gen_shli_reg tcg_gen_shli_i64
96 #define tcg_gen_shr_reg tcg_gen_shr_i64
97 #define tcg_gen_shri_reg tcg_gen_shri_i64
98 #define tcg_gen_sar_reg tcg_gen_sar_i64
99 #define tcg_gen_sari_reg tcg_gen_sari_i64
100 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
101 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
102 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
103 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
104 #define tcg_gen_mul_reg tcg_gen_mul_i64
105 #define tcg_gen_muli_reg tcg_gen_muli_i64
106 #define tcg_gen_div_reg tcg_gen_div_i64
107 #define tcg_gen_rem_reg tcg_gen_rem_i64
108 #define tcg_gen_divu_reg tcg_gen_divu_i64
109 #define tcg_gen_remu_reg tcg_gen_remu_i64
110 #define tcg_gen_discard_reg tcg_gen_discard_i64
111 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
112 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
113 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
114 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
115 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
116 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
117 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
118 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
119 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
120 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
121 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
122 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
123 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
124 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
125 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
126 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
127 #define tcg_gen_andc_reg tcg_gen_andc_i64
128 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
129 #define tcg_gen_nand_reg tcg_gen_nand_i64
130 #define tcg_gen_nor_reg tcg_gen_nor_i64
131 #define tcg_gen_orc_reg tcg_gen_orc_i64
132 #define tcg_gen_clz_reg tcg_gen_clz_i64
133 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
134 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
135 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
136 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
137 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
138 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
139 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
140 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
141 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
142 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
143 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
144 #define tcg_gen_extract_reg tcg_gen_extract_i64
145 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
146 #define tcg_const_reg tcg_const_i64
147 #define tcg_const_local_reg tcg_const_local_i64
148 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
149 #define tcg_gen_add2_reg tcg_gen_add2_i64
150 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
151 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
152 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
153 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
154 #if UINTPTR_MAX == UINT32_MAX
155 # define tcg_gen_trunc_reg_ptr(p, r) \
156 tcg_gen_trunc_i64_i32(TCGV_PTR_TO_NAT(p), r)
157 #else
158 # define tcg_gen_trunc_reg_ptr(p, r) \
159 tcg_gen_mov_i64(TCGV_PTR_TO_NAT(p), r)
160 #endif
161 #else
162 #define TCGv_reg TCGv_i32
163 #define tcg_temp_new tcg_temp_new_i32
164 #define tcg_global_reg_new tcg_global_reg_new_i32
165 #define tcg_global_mem_new tcg_global_mem_new_i32
166 #define tcg_temp_local_new tcg_temp_local_new_i32
167 #define tcg_temp_free tcg_temp_free_i32
169 #define tcg_gen_movi_reg tcg_gen_movi_i32
170 #define tcg_gen_mov_reg tcg_gen_mov_i32
171 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
172 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
173 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
174 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
175 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
176 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
177 #define tcg_gen_ld_reg tcg_gen_ld_i32
178 #define tcg_gen_st8_reg tcg_gen_st8_i32
179 #define tcg_gen_st16_reg tcg_gen_st16_i32
180 #define tcg_gen_st32_reg tcg_gen_st32_i32
181 #define tcg_gen_st_reg tcg_gen_st_i32
182 #define tcg_gen_add_reg tcg_gen_add_i32
183 #define tcg_gen_addi_reg tcg_gen_addi_i32
184 #define tcg_gen_sub_reg tcg_gen_sub_i32
185 #define tcg_gen_neg_reg tcg_gen_neg_i32
186 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
187 #define tcg_gen_subi_reg tcg_gen_subi_i32
188 #define tcg_gen_and_reg tcg_gen_and_i32
189 #define tcg_gen_andi_reg tcg_gen_andi_i32
190 #define tcg_gen_or_reg tcg_gen_or_i32
191 #define tcg_gen_ori_reg tcg_gen_ori_i32
192 #define tcg_gen_xor_reg tcg_gen_xor_i32
193 #define tcg_gen_xori_reg tcg_gen_xori_i32
194 #define tcg_gen_not_reg tcg_gen_not_i32
195 #define tcg_gen_shl_reg tcg_gen_shl_i32
196 #define tcg_gen_shli_reg tcg_gen_shli_i32
197 #define tcg_gen_shr_reg tcg_gen_shr_i32
198 #define tcg_gen_shri_reg tcg_gen_shri_i32
199 #define tcg_gen_sar_reg tcg_gen_sar_i32
200 #define tcg_gen_sari_reg tcg_gen_sari_i32
201 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
202 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
203 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
204 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
205 #define tcg_gen_mul_reg tcg_gen_mul_i32
206 #define tcg_gen_muli_reg tcg_gen_muli_i32
207 #define tcg_gen_div_reg tcg_gen_div_i32
208 #define tcg_gen_rem_reg tcg_gen_rem_i32
209 #define tcg_gen_divu_reg tcg_gen_divu_i32
210 #define tcg_gen_remu_reg tcg_gen_remu_i32
211 #define tcg_gen_discard_reg tcg_gen_discard_i32
212 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
213 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
214 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
215 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
216 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
217 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
218 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
219 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
220 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
221 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
222 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
223 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
224 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
225 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
226 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
227 #define tcg_gen_andc_reg tcg_gen_andc_i32
228 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
229 #define tcg_gen_nand_reg tcg_gen_nand_i32
230 #define tcg_gen_nor_reg tcg_gen_nor_i32
231 #define tcg_gen_orc_reg tcg_gen_orc_i32
232 #define tcg_gen_clz_reg tcg_gen_clz_i32
233 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
234 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
235 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
236 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
237 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
238 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
239 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
240 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
241 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
242 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
243 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
244 #define tcg_gen_extract_reg tcg_gen_extract_i32
245 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
246 #define tcg_const_reg tcg_const_i32
247 #define tcg_const_local_reg tcg_const_local_i32
248 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
249 #define tcg_gen_add2_reg tcg_gen_add2_i32
250 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
251 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
252 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
253 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
254 #if UINTPTR_MAX == UINT32_MAX
255 # define tcg_gen_trunc_reg_ptr(p, r) \
256 tcg_gen_mov_i32(TCGV_PTR_TO_NAT(p), r)
257 #else
258 # define tcg_gen_trunc_reg_ptr(p, r) \
259 tcg_gen_extu_i32_i64(TCGV_PTR_TO_NAT(p), r)
260 #endif
261 #endif /* TARGET_REGISTER_BITS */
263 typedef struct DisasCond {
264 TCGCond c;
265 TCGv_reg a0, a1;
266 bool a0_is_n;
267 bool a1_is_0;
268 } DisasCond;
270 typedef struct DisasContext {
271 DisasContextBase base;
272 CPUState *cs;
274 target_ureg iaoq_f;
275 target_ureg iaoq_b;
276 target_ureg iaoq_n;
277 TCGv_reg iaoq_n_var;
279 int ntempr, ntempl;
280 TCGv_reg tempr[8];
281 TCGv_tl templ[4];
283 DisasCond null_cond;
284 TCGLabel *null_lab;
286 uint32_t insn;
287 uint32_t tb_flags;
288 int mmu_idx;
289 int privilege;
290 bool psw_n_nonzero;
291 } DisasContext;
293 /* Target-specific return values from translate_one, indicating the
294 state of the TB. Note that DISAS_NEXT indicates that we are not
295 exiting the TB. */
297 /* We are not using a goto_tb (for whatever reason), but have updated
298 the iaq (for whatever reason), so don't do it again on exit. */
299 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
301 /* We are exiting the TB, but have neither emitted a goto_tb, nor
302 updated the iaq for the next instruction to be executed. */
303 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
305 /* Similarly, but we want to return to the main loop immediately
306 to recognize unmasked interrupts. */
307 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
309 typedef struct DisasInsn {
310 uint32_t insn, mask;
311 DisasJumpType (*trans)(DisasContext *ctx, uint32_t insn,
312 const struct DisasInsn *f);
313 union {
314 void (*ttt)(TCGv_reg, TCGv_reg, TCGv_reg);
315 void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
316 void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
317 void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
318 void (*ded)(TCGv_i64, TCGv_env, TCGv_i64);
319 void (*wed)(TCGv_i32, TCGv_env, TCGv_i64);
320 void (*dew)(TCGv_i64, TCGv_env, TCGv_i32);
321 } f;
322 } DisasInsn;
324 /* global register indexes */
325 static TCGv_reg cpu_gr[32];
326 static TCGv_i64 cpu_sr[4];
327 static TCGv_i64 cpu_srH;
328 static TCGv_reg cpu_iaoq_f;
329 static TCGv_reg cpu_iaoq_b;
330 static TCGv_i64 cpu_iasq_f;
331 static TCGv_i64 cpu_iasq_b;
332 static TCGv_reg cpu_sar;
333 static TCGv_reg cpu_psw_n;
334 static TCGv_reg cpu_psw_v;
335 static TCGv_reg cpu_psw_cb;
336 static TCGv_reg cpu_psw_cb_msb;
338 #include "exec/gen-icount.h"
340 void hppa_translate_init(void)
342 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
344 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
345 static const GlobalVar vars[] = {
346 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
347 DEF_VAR(psw_n),
348 DEF_VAR(psw_v),
349 DEF_VAR(psw_cb),
350 DEF_VAR(psw_cb_msb),
351 DEF_VAR(iaoq_f),
352 DEF_VAR(iaoq_b),
355 #undef DEF_VAR
357 /* Use the symbolic register names that match the disassembler. */
358 static const char gr_names[32][4] = {
359 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
360 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
361 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
362 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
364 /* SR[4-7] are not global registers so that we can index them. */
365 static const char sr_names[5][4] = {
366 "sr0", "sr1", "sr2", "sr3", "srH"
369 int i;
371 cpu_gr[0] = NULL;
372 for (i = 1; i < 32; i++) {
373 cpu_gr[i] = tcg_global_mem_new(cpu_env,
374 offsetof(CPUHPPAState, gr[i]),
375 gr_names[i]);
377 for (i = 0; i < 4; i++) {
378 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
379 offsetof(CPUHPPAState, sr[i]),
380 sr_names[i]);
382 cpu_srH = tcg_global_mem_new_i64(cpu_env,
383 offsetof(CPUHPPAState, sr[4]),
384 sr_names[4]);
386 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
387 const GlobalVar *v = &vars[i];
388 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
391 cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
392 offsetof(CPUHPPAState, iasq_f),
393 "iasq_f");
394 cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
395 offsetof(CPUHPPAState, iasq_b),
396 "iasq_b");
399 static DisasCond cond_make_f(void)
401 return (DisasCond){
402 .c = TCG_COND_NEVER,
403 .a0 = NULL,
404 .a1 = NULL,
408 static DisasCond cond_make_n(void)
410 return (DisasCond){
411 .c = TCG_COND_NE,
412 .a0 = cpu_psw_n,
413 .a0_is_n = true,
414 .a1 = NULL,
415 .a1_is_0 = true
419 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
421 DisasCond r = { .c = c, .a1 = NULL, .a1_is_0 = true };
423 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
424 r.a0 = tcg_temp_new();
425 tcg_gen_mov_reg(r.a0, a0);
427 return r;
430 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
432 DisasCond r = { .c = c };
434 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
435 r.a0 = tcg_temp_new();
436 tcg_gen_mov_reg(r.a0, a0);
437 r.a1 = tcg_temp_new();
438 tcg_gen_mov_reg(r.a1, a1);
440 return r;
443 static void cond_prep(DisasCond *cond)
445 if (cond->a1_is_0) {
446 cond->a1_is_0 = false;
447 cond->a1 = tcg_const_reg(0);
451 static void cond_free(DisasCond *cond)
453 switch (cond->c) {
454 default:
455 if (!cond->a0_is_n) {
456 tcg_temp_free(cond->a0);
458 if (!cond->a1_is_0) {
459 tcg_temp_free(cond->a1);
461 cond->a0_is_n = false;
462 cond->a1_is_0 = false;
463 cond->a0 = NULL;
464 cond->a1 = NULL;
465 /* fallthru */
466 case TCG_COND_ALWAYS:
467 cond->c = TCG_COND_NEVER;
468 break;
469 case TCG_COND_NEVER:
470 break;
474 static TCGv_reg get_temp(DisasContext *ctx)
476 unsigned i = ctx->ntempr++;
477 g_assert(i < ARRAY_SIZE(ctx->tempr));
478 return ctx->tempr[i] = tcg_temp_new();
481 #ifndef CONFIG_USER_ONLY
482 static TCGv_tl get_temp_tl(DisasContext *ctx)
484 unsigned i = ctx->ntempl++;
485 g_assert(i < ARRAY_SIZE(ctx->templ));
486 return ctx->templ[i] = tcg_temp_new_tl();
488 #endif
490 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
492 TCGv_reg t = get_temp(ctx);
493 tcg_gen_movi_reg(t, v);
494 return t;
497 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
499 if (reg == 0) {
500 TCGv_reg t = get_temp(ctx);
501 tcg_gen_movi_reg(t, 0);
502 return t;
503 } else {
504 return cpu_gr[reg];
508 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
510 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
511 return get_temp(ctx);
512 } else {
513 return cpu_gr[reg];
517 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
519 if (ctx->null_cond.c != TCG_COND_NEVER) {
520 cond_prep(&ctx->null_cond);
521 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
522 ctx->null_cond.a1, dest, t);
523 } else {
524 tcg_gen_mov_reg(dest, t);
528 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
530 if (reg != 0) {
531 save_or_nullify(ctx, cpu_gr[reg], t);
535 #ifdef HOST_WORDS_BIGENDIAN
536 # define HI_OFS 0
537 # define LO_OFS 4
538 #else
539 # define HI_OFS 4
540 # define LO_OFS 0
541 #endif
543 static TCGv_i32 load_frw_i32(unsigned rt)
545 TCGv_i32 ret = tcg_temp_new_i32();
546 tcg_gen_ld_i32(ret, cpu_env,
547 offsetof(CPUHPPAState, fr[rt & 31])
548 + (rt & 32 ? LO_OFS : HI_OFS));
549 return ret;
552 static TCGv_i32 load_frw0_i32(unsigned rt)
554 if (rt == 0) {
555 return tcg_const_i32(0);
556 } else {
557 return load_frw_i32(rt);
561 static TCGv_i64 load_frw0_i64(unsigned rt)
563 if (rt == 0) {
564 return tcg_const_i64(0);
565 } else {
566 TCGv_i64 ret = tcg_temp_new_i64();
567 tcg_gen_ld32u_i64(ret, cpu_env,
568 offsetof(CPUHPPAState, fr[rt & 31])
569 + (rt & 32 ? LO_OFS : HI_OFS));
570 return ret;
574 static void save_frw_i32(unsigned rt, TCGv_i32 val)
576 tcg_gen_st_i32(val, cpu_env,
577 offsetof(CPUHPPAState, fr[rt & 31])
578 + (rt & 32 ? LO_OFS : HI_OFS));
581 #undef HI_OFS
582 #undef LO_OFS
584 static TCGv_i64 load_frd(unsigned rt)
586 TCGv_i64 ret = tcg_temp_new_i64();
587 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
588 return ret;
591 static TCGv_i64 load_frd0(unsigned rt)
593 if (rt == 0) {
594 return tcg_const_i64(0);
595 } else {
596 return load_frd(rt);
600 static void save_frd(unsigned rt, TCGv_i64 val)
602 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
605 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
607 #ifdef CONFIG_USER_ONLY
608 tcg_gen_movi_i64(dest, 0);
609 #else
610 if (reg < 4) {
611 tcg_gen_mov_i64(dest, cpu_sr[reg]);
612 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
613 tcg_gen_mov_i64(dest, cpu_srH);
614 } else {
615 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
617 #endif
620 /* Skip over the implementation of an insn that has been nullified.
621 Use this when the insn is too complex for a conditional move. */
622 static void nullify_over(DisasContext *ctx)
624 if (ctx->null_cond.c != TCG_COND_NEVER) {
625 /* The always condition should have been handled in the main loop. */
626 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
628 ctx->null_lab = gen_new_label();
629 cond_prep(&ctx->null_cond);
631 /* If we're using PSW[N], copy it to a temp because... */
632 if (ctx->null_cond.a0_is_n) {
633 ctx->null_cond.a0_is_n = false;
634 ctx->null_cond.a0 = tcg_temp_new();
635 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
637 /* ... we clear it before branching over the implementation,
638 so that (1) it's clear after nullifying this insn and
639 (2) if this insn nullifies the next, PSW[N] is valid. */
640 if (ctx->psw_n_nonzero) {
641 ctx->psw_n_nonzero = false;
642 tcg_gen_movi_reg(cpu_psw_n, 0);
645 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
646 ctx->null_cond.a1, ctx->null_lab);
647 cond_free(&ctx->null_cond);
651 /* Save the current nullification state to PSW[N]. */
652 static void nullify_save(DisasContext *ctx)
654 if (ctx->null_cond.c == TCG_COND_NEVER) {
655 if (ctx->psw_n_nonzero) {
656 tcg_gen_movi_reg(cpu_psw_n, 0);
658 return;
660 if (!ctx->null_cond.a0_is_n) {
661 cond_prep(&ctx->null_cond);
662 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
663 ctx->null_cond.a0, ctx->null_cond.a1);
664 ctx->psw_n_nonzero = true;
666 cond_free(&ctx->null_cond);
669 /* Set a PSW[N] to X. The intention is that this is used immediately
670 before a goto_tb/exit_tb, so that there is no fallthru path to other
671 code within the TB. Therefore we do not update psw_n_nonzero. */
672 static void nullify_set(DisasContext *ctx, bool x)
674 if (ctx->psw_n_nonzero || x) {
675 tcg_gen_movi_reg(cpu_psw_n, x);
679 /* Mark the end of an instruction that may have been nullified.
680 This is the pair to nullify_over. */
681 static DisasJumpType nullify_end(DisasContext *ctx, DisasJumpType status)
683 TCGLabel *null_lab = ctx->null_lab;
685 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
686 For UPDATED, we cannot update on the nullified path. */
687 assert(status != DISAS_IAQ_N_UPDATED);
689 if (likely(null_lab == NULL)) {
690 /* The current insn wasn't conditional or handled the condition
691 applied to it without a branch, so the (new) setting of
692 NULL_COND can be applied directly to the next insn. */
693 return status;
695 ctx->null_lab = NULL;
697 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
698 /* The next instruction will be unconditional,
699 and NULL_COND already reflects that. */
700 gen_set_label(null_lab);
701 } else {
702 /* The insn that we just executed is itself nullifying the next
703 instruction. Store the condition in the PSW[N] global.
704 We asserted PSW[N] = 0 in nullify_over, so that after the
705 label we have the proper value in place. */
706 nullify_save(ctx);
707 gen_set_label(null_lab);
708 ctx->null_cond = cond_make_n();
710 if (status == DISAS_NORETURN) {
711 status = DISAS_NEXT;
713 return status;
716 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
718 if (unlikely(ival == -1)) {
719 tcg_gen_mov_reg(dest, vval);
720 } else {
721 tcg_gen_movi_reg(dest, ival);
725 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
727 return ctx->iaoq_f + disp + 8;
730 static void gen_excp_1(int exception)
732 TCGv_i32 t = tcg_const_i32(exception);
733 gen_helper_excp(cpu_env, t);
734 tcg_temp_free_i32(t);
737 static DisasJumpType gen_excp(DisasContext *ctx, int exception)
739 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
740 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
741 nullify_save(ctx);
742 gen_excp_1(exception);
743 return DISAS_NORETURN;
746 static DisasJumpType gen_excp_iir(DisasContext *ctx, int exc)
748 TCGv_reg tmp = tcg_const_reg(ctx->insn);
749 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
750 tcg_temp_free(tmp);
751 return gen_excp(ctx, exc);
754 static DisasJumpType gen_illegal(DisasContext *ctx)
756 nullify_over(ctx);
757 return nullify_end(ctx, gen_excp_iir(ctx, EXCP_ILL));
760 #define CHECK_MOST_PRIVILEGED(EXCP) \
761 do { \
762 if (ctx->privilege != 0) { \
763 nullify_over(ctx); \
764 return nullify_end(ctx, gen_excp_iir(ctx, EXCP)); \
766 } while (0)
768 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
770 /* Suppress goto_tb in the case of single-steping and IO. */
771 if ((tb_cflags(ctx->base.tb) & CF_LAST_IO) || ctx->base.singlestep_enabled) {
772 return false;
774 return true;
777 /* If the next insn is to be nullified, and it's on the same page,
778 and we're not attempting to set a breakpoint on it, then we can
779 totally skip the nullified insn. This avoids creating and
780 executing a TB that merely branches to the next TB. */
781 static bool use_nullify_skip(DisasContext *ctx)
783 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
784 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
787 static void gen_goto_tb(DisasContext *ctx, int which,
788 target_ureg f, target_ureg b)
790 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
791 tcg_gen_goto_tb(which);
792 tcg_gen_movi_reg(cpu_iaoq_f, f);
793 tcg_gen_movi_reg(cpu_iaoq_b, b);
794 tcg_gen_exit_tb((uintptr_t)ctx->base.tb + which);
795 } else {
796 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
797 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
798 if (ctx->base.singlestep_enabled) {
799 gen_excp_1(EXCP_DEBUG);
800 } else {
801 tcg_gen_lookup_and_goto_ptr();
806 /* PA has a habit of taking the LSB of a field and using that as the sign,
807 with the rest of the field becoming the least significant bits. */
808 static target_sreg low_sextract(uint32_t val, int pos, int len)
810 target_ureg x = -(target_ureg)extract32(val, pos, 1);
811 x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
812 return x;
815 static unsigned assemble_rt64(uint32_t insn)
817 unsigned r1 = extract32(insn, 6, 1);
818 unsigned r0 = extract32(insn, 0, 5);
819 return r1 * 32 + r0;
822 static unsigned assemble_ra64(uint32_t insn)
824 unsigned r1 = extract32(insn, 7, 1);
825 unsigned r0 = extract32(insn, 21, 5);
826 return r1 * 32 + r0;
829 static unsigned assemble_rb64(uint32_t insn)
831 unsigned r1 = extract32(insn, 12, 1);
832 unsigned r0 = extract32(insn, 16, 5);
833 return r1 * 32 + r0;
836 static unsigned assemble_rc64(uint32_t insn)
838 unsigned r2 = extract32(insn, 8, 1);
839 unsigned r1 = extract32(insn, 13, 3);
840 unsigned r0 = extract32(insn, 9, 2);
841 return r2 * 32 + r1 * 4 + r0;
844 static unsigned assemble_sr3(uint32_t insn)
846 unsigned s2 = extract32(insn, 13, 1);
847 unsigned s0 = extract32(insn, 14, 2);
848 return s2 * 4 + s0;
851 static target_sreg assemble_12(uint32_t insn)
853 target_ureg x = -(target_ureg)(insn & 1);
854 x = (x << 1) | extract32(insn, 2, 1);
855 x = (x << 10) | extract32(insn, 3, 10);
856 return x;
859 static target_sreg assemble_16(uint32_t insn)
861 /* Take the name from PA2.0, which produces a 16-bit number
862 only with wide mode; otherwise a 14-bit number. Since we don't
863 implement wide mode, this is always the 14-bit number. */
864 return low_sextract(insn, 0, 14);
867 static target_sreg assemble_16a(uint32_t insn)
869 /* Take the name from PA2.0, which produces a 14-bit shifted number
870 only with wide mode; otherwise a 12-bit shifted number. Since we
871 don't implement wide mode, this is always the 12-bit number. */
872 target_ureg x = -(target_ureg)(insn & 1);
873 x = (x << 11) | extract32(insn, 2, 11);
874 return x << 2;
877 static target_sreg assemble_17(uint32_t insn)
879 target_ureg x = -(target_ureg)(insn & 1);
880 x = (x << 5) | extract32(insn, 16, 5);
881 x = (x << 1) | extract32(insn, 2, 1);
882 x = (x << 10) | extract32(insn, 3, 10);
883 return x << 2;
886 static target_sreg assemble_21(uint32_t insn)
888 target_ureg x = -(target_ureg)(insn & 1);
889 x = (x << 11) | extract32(insn, 1, 11);
890 x = (x << 2) | extract32(insn, 14, 2);
891 x = (x << 5) | extract32(insn, 16, 5);
892 x = (x << 2) | extract32(insn, 12, 2);
893 return x << 11;
896 static target_sreg assemble_22(uint32_t insn)
898 target_ureg x = -(target_ureg)(insn & 1);
899 x = (x << 10) | extract32(insn, 16, 10);
900 x = (x << 1) | extract32(insn, 2, 1);
901 x = (x << 10) | extract32(insn, 3, 10);
902 return x << 2;
905 /* The parisc documentation describes only the general interpretation of
906 the conditions, without describing their exact implementation. The
907 interpretations do not stand up well when considering ADD,C and SUB,B.
908 However, considering the Addition, Subtraction and Logical conditions
909 as a whole it would appear that these relations are similar to what
910 a traditional NZCV set of flags would produce. */
912 static DisasCond do_cond(unsigned cf, TCGv_reg res,
913 TCGv_reg cb_msb, TCGv_reg sv)
915 DisasCond cond;
916 TCGv_reg tmp;
918 switch (cf >> 1) {
919 case 0: /* Never / TR */
920 cond = cond_make_f();
921 break;
922 case 1: /* = / <> (Z / !Z) */
923 cond = cond_make_0(TCG_COND_EQ, res);
924 break;
925 case 2: /* < / >= (N / !N) */
926 cond = cond_make_0(TCG_COND_LT, res);
927 break;
928 case 3: /* <= / > (N | Z / !N & !Z) */
929 cond = cond_make_0(TCG_COND_LE, res);
930 break;
931 case 4: /* NUV / UV (!C / C) */
932 cond = cond_make_0(TCG_COND_EQ, cb_msb);
933 break;
934 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
935 tmp = tcg_temp_new();
936 tcg_gen_neg_reg(tmp, cb_msb);
937 tcg_gen_and_reg(tmp, tmp, res);
938 cond = cond_make_0(TCG_COND_EQ, tmp);
939 tcg_temp_free(tmp);
940 break;
941 case 6: /* SV / NSV (V / !V) */
942 cond = cond_make_0(TCG_COND_LT, sv);
943 break;
944 case 7: /* OD / EV */
945 tmp = tcg_temp_new();
946 tcg_gen_andi_reg(tmp, res, 1);
947 cond = cond_make_0(TCG_COND_NE, tmp);
948 tcg_temp_free(tmp);
949 break;
950 default:
951 g_assert_not_reached();
953 if (cf & 1) {
954 cond.c = tcg_invert_cond(cond.c);
957 return cond;
960 /* Similar, but for the special case of subtraction without borrow, we
961 can use the inputs directly. This can allow other computation to be
962 deleted as unused. */
964 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
965 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
967 DisasCond cond;
969 switch (cf >> 1) {
970 case 1: /* = / <> */
971 cond = cond_make(TCG_COND_EQ, in1, in2);
972 break;
973 case 2: /* < / >= */
974 cond = cond_make(TCG_COND_LT, in1, in2);
975 break;
976 case 3: /* <= / > */
977 cond = cond_make(TCG_COND_LE, in1, in2);
978 break;
979 case 4: /* << / >>= */
980 cond = cond_make(TCG_COND_LTU, in1, in2);
981 break;
982 case 5: /* <<= / >> */
983 cond = cond_make(TCG_COND_LEU, in1, in2);
984 break;
985 default:
986 return do_cond(cf, res, sv, sv);
988 if (cf & 1) {
989 cond.c = tcg_invert_cond(cond.c);
992 return cond;
995 /* Similar, but for logicals, where the carry and overflow bits are not
996 computed, and use of them is undefined. */
998 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
1000 switch (cf >> 1) {
1001 case 4: case 5: case 6:
1002 cf &= 1;
1003 break;
1005 return do_cond(cf, res, res, res);
1008 /* Similar, but for shift/extract/deposit conditions. */
1010 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
1012 unsigned c, f;
1014 /* Convert the compressed condition codes to standard.
1015 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1016 4-7 are the reverse of 0-3. */
1017 c = orig & 3;
1018 if (c == 3) {
1019 c = 7;
1021 f = (orig & 4) / 4;
1023 return do_log_cond(c * 2 + f, res);
1026 /* Similar, but for unit conditions. */
1028 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1029 TCGv_reg in1, TCGv_reg in2)
1031 DisasCond cond;
1032 TCGv_reg tmp, cb = NULL;
1034 if (cf & 8) {
1035 /* Since we want to test lots of carry-out bits all at once, do not
1036 * do our normal thing and compute carry-in of bit B+1 since that
1037 * leaves us with carry bits spread across two words.
1039 cb = tcg_temp_new();
1040 tmp = tcg_temp_new();
1041 tcg_gen_or_reg(cb, in1, in2);
1042 tcg_gen_and_reg(tmp, in1, in2);
1043 tcg_gen_andc_reg(cb, cb, res);
1044 tcg_gen_or_reg(cb, cb, tmp);
1045 tcg_temp_free(tmp);
1048 switch (cf >> 1) {
1049 case 0: /* never / TR */
1050 case 1: /* undefined */
1051 case 5: /* undefined */
1052 cond = cond_make_f();
1053 break;
1055 case 2: /* SBZ / NBZ */
1056 /* See hasless(v,1) from
1057 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1059 tmp = tcg_temp_new();
1060 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1061 tcg_gen_andc_reg(tmp, tmp, res);
1062 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1063 cond = cond_make_0(TCG_COND_NE, tmp);
1064 tcg_temp_free(tmp);
1065 break;
1067 case 3: /* SHZ / NHZ */
1068 tmp = tcg_temp_new();
1069 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1070 tcg_gen_andc_reg(tmp, tmp, res);
1071 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1072 cond = cond_make_0(TCG_COND_NE, tmp);
1073 tcg_temp_free(tmp);
1074 break;
1076 case 4: /* SDC / NDC */
1077 tcg_gen_andi_reg(cb, cb, 0x88888888u);
1078 cond = cond_make_0(TCG_COND_NE, cb);
1079 break;
1081 case 6: /* SBC / NBC */
1082 tcg_gen_andi_reg(cb, cb, 0x80808080u);
1083 cond = cond_make_0(TCG_COND_NE, cb);
1084 break;
1086 case 7: /* SHC / NHC */
1087 tcg_gen_andi_reg(cb, cb, 0x80008000u);
1088 cond = cond_make_0(TCG_COND_NE, cb);
1089 break;
1091 default:
1092 g_assert_not_reached();
1094 if (cf & 8) {
1095 tcg_temp_free(cb);
1097 if (cf & 1) {
1098 cond.c = tcg_invert_cond(cond.c);
1101 return cond;
1104 /* Compute signed overflow for addition. */
1105 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1106 TCGv_reg in1, TCGv_reg in2)
1108 TCGv_reg sv = get_temp(ctx);
1109 TCGv_reg tmp = tcg_temp_new();
1111 tcg_gen_xor_reg(sv, res, in1);
1112 tcg_gen_xor_reg(tmp, in1, in2);
1113 tcg_gen_andc_reg(sv, sv, tmp);
1114 tcg_temp_free(tmp);
1116 return sv;
1119 /* Compute signed overflow for subtraction. */
1120 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1121 TCGv_reg in1, TCGv_reg in2)
1123 TCGv_reg sv = get_temp(ctx);
1124 TCGv_reg tmp = tcg_temp_new();
1126 tcg_gen_xor_reg(sv, res, in1);
1127 tcg_gen_xor_reg(tmp, in1, in2);
1128 tcg_gen_and_reg(sv, sv, tmp);
1129 tcg_temp_free(tmp);
1131 return sv;
1134 static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1135 TCGv_reg in2, unsigned shift, bool is_l,
1136 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1138 TCGv_reg dest, cb, cb_msb, sv, tmp;
1139 unsigned c = cf >> 1;
1140 DisasCond cond;
1142 dest = tcg_temp_new();
1143 cb = NULL;
1144 cb_msb = NULL;
1146 if (shift) {
1147 tmp = get_temp(ctx);
1148 tcg_gen_shli_reg(tmp, in1, shift);
1149 in1 = tmp;
1152 if (!is_l || c == 4 || c == 5) {
1153 TCGv_reg zero = tcg_const_reg(0);
1154 cb_msb = get_temp(ctx);
1155 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1156 if (is_c) {
1157 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1159 tcg_temp_free(zero);
1160 if (!is_l) {
1161 cb = get_temp(ctx);
1162 tcg_gen_xor_reg(cb, in1, in2);
1163 tcg_gen_xor_reg(cb, cb, dest);
1165 } else {
1166 tcg_gen_add_reg(dest, in1, in2);
1167 if (is_c) {
1168 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1172 /* Compute signed overflow if required. */
1173 sv = NULL;
1174 if (is_tsv || c == 6) {
1175 sv = do_add_sv(ctx, dest, in1, in2);
1176 if (is_tsv) {
1177 /* ??? Need to include overflow from shift. */
1178 gen_helper_tsv(cpu_env, sv);
1182 /* Emit any conditional trap before any writeback. */
1183 cond = do_cond(cf, dest, cb_msb, sv);
1184 if (is_tc) {
1185 cond_prep(&cond);
1186 tmp = tcg_temp_new();
1187 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1188 gen_helper_tcond(cpu_env, tmp);
1189 tcg_temp_free(tmp);
1192 /* Write back the result. */
1193 if (!is_l) {
1194 save_or_nullify(ctx, cpu_psw_cb, cb);
1195 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1197 save_gpr(ctx, rt, dest);
1198 tcg_temp_free(dest);
1200 /* Install the new nullification. */
1201 cond_free(&ctx->null_cond);
1202 ctx->null_cond = cond;
1203 return DISAS_NEXT;
1206 static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1207 TCGv_reg in2, bool is_tsv, bool is_b,
1208 bool is_tc, unsigned cf)
1210 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1211 unsigned c = cf >> 1;
1212 DisasCond cond;
1214 dest = tcg_temp_new();
1215 cb = tcg_temp_new();
1216 cb_msb = tcg_temp_new();
1218 zero = tcg_const_reg(0);
1219 if (is_b) {
1220 /* DEST,C = IN1 + ~IN2 + C. */
1221 tcg_gen_not_reg(cb, in2);
1222 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1223 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1224 tcg_gen_xor_reg(cb, cb, in1);
1225 tcg_gen_xor_reg(cb, cb, dest);
1226 } else {
1227 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1228 operations by seeding the high word with 1 and subtracting. */
1229 tcg_gen_movi_reg(cb_msb, 1);
1230 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1231 tcg_gen_eqv_reg(cb, in1, in2);
1232 tcg_gen_xor_reg(cb, cb, dest);
1234 tcg_temp_free(zero);
1236 /* Compute signed overflow if required. */
1237 sv = NULL;
1238 if (is_tsv || c == 6) {
1239 sv = do_sub_sv(ctx, dest, in1, in2);
1240 if (is_tsv) {
1241 gen_helper_tsv(cpu_env, sv);
1245 /* Compute the condition. We cannot use the special case for borrow. */
1246 if (!is_b) {
1247 cond = do_sub_cond(cf, dest, in1, in2, sv);
1248 } else {
1249 cond = do_cond(cf, dest, cb_msb, sv);
1252 /* Emit any conditional trap before any writeback. */
1253 if (is_tc) {
1254 cond_prep(&cond);
1255 tmp = tcg_temp_new();
1256 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1257 gen_helper_tcond(cpu_env, tmp);
1258 tcg_temp_free(tmp);
1261 /* Write back the result. */
1262 save_or_nullify(ctx, cpu_psw_cb, cb);
1263 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1264 save_gpr(ctx, rt, dest);
1265 tcg_temp_free(dest);
1267 /* Install the new nullification. */
1268 cond_free(&ctx->null_cond);
1269 ctx->null_cond = cond;
1270 return DISAS_NEXT;
1273 static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1274 TCGv_reg in2, unsigned cf)
1276 TCGv_reg dest, sv;
1277 DisasCond cond;
1279 dest = tcg_temp_new();
1280 tcg_gen_sub_reg(dest, in1, in2);
1282 /* Compute signed overflow if required. */
1283 sv = NULL;
1284 if ((cf >> 1) == 6) {
1285 sv = do_sub_sv(ctx, dest, in1, in2);
1288 /* Form the condition for the compare. */
1289 cond = do_sub_cond(cf, dest, in1, in2, sv);
1291 /* Clear. */
1292 tcg_gen_movi_reg(dest, 0);
1293 save_gpr(ctx, rt, dest);
1294 tcg_temp_free(dest);
1296 /* Install the new nullification. */
1297 cond_free(&ctx->null_cond);
1298 ctx->null_cond = cond;
1299 return DISAS_NEXT;
1302 static DisasJumpType do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1303 TCGv_reg in2, unsigned cf,
1304 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1306 TCGv_reg dest = dest_gpr(ctx, rt);
1308 /* Perform the operation, and writeback. */
1309 fn(dest, in1, in2);
1310 save_gpr(ctx, rt, dest);
1312 /* Install the new nullification. */
1313 cond_free(&ctx->null_cond);
1314 if (cf) {
1315 ctx->null_cond = do_log_cond(cf, dest);
1317 return DISAS_NEXT;
1320 static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1321 TCGv_reg in2, unsigned cf, bool is_tc,
1322 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1324 TCGv_reg dest;
1325 DisasCond cond;
1327 if (cf == 0) {
1328 dest = dest_gpr(ctx, rt);
1329 fn(dest, in1, in2);
1330 save_gpr(ctx, rt, dest);
1331 cond_free(&ctx->null_cond);
1332 } else {
1333 dest = tcg_temp_new();
1334 fn(dest, in1, in2);
1336 cond = do_unit_cond(cf, dest, in1, in2);
1338 if (is_tc) {
1339 TCGv_reg tmp = tcg_temp_new();
1340 cond_prep(&cond);
1341 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1342 gen_helper_tcond(cpu_env, tmp);
1343 tcg_temp_free(tmp);
1345 save_gpr(ctx, rt, dest);
1347 cond_free(&ctx->null_cond);
1348 ctx->null_cond = cond;
1350 return DISAS_NEXT;
1353 #ifndef CONFIG_USER_ONLY
1354 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1355 from the top 2 bits of the base register. There are a few system
1356 instructions that have a 3-bit space specifier, for which SR0 is
1357 not special. To handle this, pass ~SP. */
1358 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1360 TCGv_ptr ptr;
1361 TCGv_reg tmp;
1362 TCGv_i64 spc;
1364 if (sp != 0) {
1365 if (sp < 0) {
1366 sp = ~sp;
1368 spc = get_temp_tl(ctx);
1369 load_spr(ctx, spc, sp);
1370 return spc;
1372 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1373 return cpu_srH;
1376 ptr = tcg_temp_new_ptr();
1377 tmp = tcg_temp_new();
1378 spc = get_temp_tl(ctx);
1380 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1381 tcg_gen_andi_reg(tmp, tmp, 030);
1382 tcg_gen_trunc_reg_ptr(ptr, tmp);
1383 tcg_temp_free(tmp);
1385 tcg_gen_add_ptr(ptr, ptr, cpu_env);
1386 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1387 tcg_temp_free_ptr(ptr);
1389 return spc;
1391 #endif
1393 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1394 unsigned rb, unsigned rx, int scale, target_sreg disp,
1395 unsigned sp, int modify, bool is_phys)
1397 TCGv_reg base = load_gpr(ctx, rb);
1398 TCGv_reg ofs;
1400 /* Note that RX is mutually exclusive with DISP. */
1401 if (rx) {
1402 ofs = get_temp(ctx);
1403 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1404 tcg_gen_add_reg(ofs, ofs, base);
1405 } else if (disp || modify) {
1406 ofs = get_temp(ctx);
1407 tcg_gen_addi_reg(ofs, base, disp);
1408 } else {
1409 ofs = base;
1412 *pofs = ofs;
1413 #ifdef CONFIG_USER_ONLY
1414 *pgva = (modify <= 0 ? ofs : base);
1415 #else
1416 TCGv_tl addr = get_temp_tl(ctx);
1417 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1418 if (ctx->tb_flags & PSW_W) {
1419 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1421 if (!is_phys) {
1422 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1424 *pgva = addr;
1425 #endif
1428 /* Emit a memory load. The modify parameter should be
1429 * < 0 for pre-modify,
1430 * > 0 for post-modify,
1431 * = 0 for no base register update.
1433 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1434 unsigned rx, int scale, target_sreg disp,
1435 unsigned sp, int modify, TCGMemOp mop)
1437 TCGv_reg ofs;
1438 TCGv_tl addr;
1440 /* Caller uses nullify_over/nullify_end. */
1441 assert(ctx->null_cond.c == TCG_COND_NEVER);
1443 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1444 ctx->mmu_idx == MMU_PHYS_IDX);
1445 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1446 if (modify) {
1447 save_gpr(ctx, rb, ofs);
1451 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1452 unsigned rx, int scale, target_sreg disp,
1453 unsigned sp, int modify, TCGMemOp mop)
1455 TCGv_reg ofs;
1456 TCGv_tl addr;
1458 /* Caller uses nullify_over/nullify_end. */
1459 assert(ctx->null_cond.c == TCG_COND_NEVER);
1461 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1462 ctx->mmu_idx == MMU_PHYS_IDX);
1463 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1464 if (modify) {
1465 save_gpr(ctx, rb, ofs);
1469 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1470 unsigned rx, int scale, target_sreg disp,
1471 unsigned sp, int modify, TCGMemOp mop)
1473 TCGv_reg ofs;
1474 TCGv_tl addr;
1476 /* Caller uses nullify_over/nullify_end. */
1477 assert(ctx->null_cond.c == TCG_COND_NEVER);
1479 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1480 ctx->mmu_idx == MMU_PHYS_IDX);
1481 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1482 if (modify) {
1483 save_gpr(ctx, rb, ofs);
1487 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1488 unsigned rx, int scale, target_sreg disp,
1489 unsigned sp, int modify, TCGMemOp mop)
1491 TCGv_reg ofs;
1492 TCGv_tl addr;
1494 /* Caller uses nullify_over/nullify_end. */
1495 assert(ctx->null_cond.c == TCG_COND_NEVER);
1497 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1498 ctx->mmu_idx == MMU_PHYS_IDX);
1499 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1500 if (modify) {
1501 save_gpr(ctx, rb, ofs);
1505 #if TARGET_REGISTER_BITS == 64
1506 #define do_load_reg do_load_64
1507 #define do_store_reg do_store_64
1508 #else
1509 #define do_load_reg do_load_32
1510 #define do_store_reg do_store_32
1511 #endif
1513 static DisasJumpType do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1514 unsigned rx, int scale, target_sreg disp,
1515 unsigned sp, int modify, TCGMemOp mop)
1517 TCGv_reg dest;
1519 nullify_over(ctx);
1521 if (modify == 0) {
1522 /* No base register update. */
1523 dest = dest_gpr(ctx, rt);
1524 } else {
1525 /* Make sure if RT == RB, we see the result of the load. */
1526 dest = get_temp(ctx);
1528 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1529 save_gpr(ctx, rt, dest);
1531 return nullify_end(ctx, DISAS_NEXT);
1534 static DisasJumpType do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1535 unsigned rx, int scale, target_sreg disp,
1536 unsigned sp, int modify)
1538 TCGv_i32 tmp;
1540 nullify_over(ctx);
1542 tmp = tcg_temp_new_i32();
1543 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1544 save_frw_i32(rt, tmp);
1545 tcg_temp_free_i32(tmp);
1547 if (rt == 0) {
1548 gen_helper_loaded_fr0(cpu_env);
1551 return nullify_end(ctx, DISAS_NEXT);
1554 static DisasJumpType do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1555 unsigned rx, int scale, target_sreg disp,
1556 unsigned sp, int modify)
1558 TCGv_i64 tmp;
1560 nullify_over(ctx);
1562 tmp = tcg_temp_new_i64();
1563 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1564 save_frd(rt, tmp);
1565 tcg_temp_free_i64(tmp);
1567 if (rt == 0) {
1568 gen_helper_loaded_fr0(cpu_env);
1571 return nullify_end(ctx, DISAS_NEXT);
1574 static DisasJumpType do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1575 target_sreg disp, unsigned sp,
1576 int modify, TCGMemOp mop)
1578 nullify_over(ctx);
1579 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1580 return nullify_end(ctx, DISAS_NEXT);
1583 static DisasJumpType do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1584 unsigned rx, int scale, target_sreg disp,
1585 unsigned sp, int modify)
1587 TCGv_i32 tmp;
1589 nullify_over(ctx);
1591 tmp = load_frw_i32(rt);
1592 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1593 tcg_temp_free_i32(tmp);
1595 return nullify_end(ctx, DISAS_NEXT);
1598 static DisasJumpType do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1599 unsigned rx, int scale, target_sreg disp,
1600 unsigned sp, int modify)
1602 TCGv_i64 tmp;
1604 nullify_over(ctx);
1606 tmp = load_frd(rt);
1607 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1608 tcg_temp_free_i64(tmp);
1610 return nullify_end(ctx, DISAS_NEXT);
1613 static DisasJumpType do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1614 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1616 TCGv_i32 tmp;
1618 nullify_over(ctx);
1619 tmp = load_frw0_i32(ra);
1621 func(tmp, cpu_env, tmp);
1623 save_frw_i32(rt, tmp);
1624 tcg_temp_free_i32(tmp);
1625 return nullify_end(ctx, DISAS_NEXT);
1628 static DisasJumpType do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1629 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1631 TCGv_i32 dst;
1632 TCGv_i64 src;
1634 nullify_over(ctx);
1635 src = load_frd(ra);
1636 dst = tcg_temp_new_i32();
1638 func(dst, cpu_env, src);
1640 tcg_temp_free_i64(src);
1641 save_frw_i32(rt, dst);
1642 tcg_temp_free_i32(dst);
1643 return nullify_end(ctx, DISAS_NEXT);
1646 static DisasJumpType do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1647 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1649 TCGv_i64 tmp;
1651 nullify_over(ctx);
1652 tmp = load_frd0(ra);
1654 func(tmp, cpu_env, tmp);
1656 save_frd(rt, tmp);
1657 tcg_temp_free_i64(tmp);
1658 return nullify_end(ctx, DISAS_NEXT);
1661 static DisasJumpType do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1662 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1664 TCGv_i32 src;
1665 TCGv_i64 dst;
1667 nullify_over(ctx);
1668 src = load_frw0_i32(ra);
1669 dst = tcg_temp_new_i64();
1671 func(dst, cpu_env, src);
1673 tcg_temp_free_i32(src);
1674 save_frd(rt, dst);
1675 tcg_temp_free_i64(dst);
1676 return nullify_end(ctx, DISAS_NEXT);
1679 static DisasJumpType do_fop_weww(DisasContext *ctx, unsigned rt,
1680 unsigned ra, unsigned rb,
1681 void (*func)(TCGv_i32, TCGv_env,
1682 TCGv_i32, TCGv_i32))
1684 TCGv_i32 a, b;
1686 nullify_over(ctx);
1687 a = load_frw0_i32(ra);
1688 b = load_frw0_i32(rb);
1690 func(a, cpu_env, a, b);
1692 tcg_temp_free_i32(b);
1693 save_frw_i32(rt, a);
1694 tcg_temp_free_i32(a);
1695 return nullify_end(ctx, DISAS_NEXT);
1698 static DisasJumpType do_fop_dedd(DisasContext *ctx, unsigned rt,
1699 unsigned ra, unsigned rb,
1700 void (*func)(TCGv_i64, TCGv_env,
1701 TCGv_i64, TCGv_i64))
1703 TCGv_i64 a, b;
1705 nullify_over(ctx);
1706 a = load_frd0(ra);
1707 b = load_frd0(rb);
1709 func(a, cpu_env, a, b);
1711 tcg_temp_free_i64(b);
1712 save_frd(rt, a);
1713 tcg_temp_free_i64(a);
1714 return nullify_end(ctx, DISAS_NEXT);
1717 /* Emit an unconditional branch to a direct target, which may or may not
1718 have already had nullification handled. */
1719 static DisasJumpType do_dbranch(DisasContext *ctx, target_ureg dest,
1720 unsigned link, bool is_n)
1722 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1723 if (link != 0) {
1724 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1726 ctx->iaoq_n = dest;
1727 if (is_n) {
1728 ctx->null_cond.c = TCG_COND_ALWAYS;
1730 return DISAS_NEXT;
1731 } else {
1732 nullify_over(ctx);
1734 if (link != 0) {
1735 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1738 if (is_n && use_nullify_skip(ctx)) {
1739 nullify_set(ctx, 0);
1740 gen_goto_tb(ctx, 0, dest, dest + 4);
1741 } else {
1742 nullify_set(ctx, is_n);
1743 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1746 nullify_end(ctx, DISAS_NEXT);
1748 nullify_set(ctx, 0);
1749 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1750 return DISAS_NORETURN;
1754 /* Emit a conditional branch to a direct target. If the branch itself
1755 is nullified, we should have already used nullify_over. */
1756 static DisasJumpType do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1757 DisasCond *cond)
1759 target_ureg dest = iaoq_dest(ctx, disp);
1760 TCGLabel *taken = NULL;
1761 TCGCond c = cond->c;
1762 bool n;
1764 assert(ctx->null_cond.c == TCG_COND_NEVER);
1766 /* Handle TRUE and NEVER as direct branches. */
1767 if (c == TCG_COND_ALWAYS) {
1768 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1770 if (c == TCG_COND_NEVER) {
1771 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1774 taken = gen_new_label();
1775 cond_prep(cond);
1776 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1777 cond_free(cond);
1779 /* Not taken: Condition not satisfied; nullify on backward branches. */
1780 n = is_n && disp < 0;
1781 if (n && use_nullify_skip(ctx)) {
1782 nullify_set(ctx, 0);
1783 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1784 } else {
1785 if (!n && ctx->null_lab) {
1786 gen_set_label(ctx->null_lab);
1787 ctx->null_lab = NULL;
1789 nullify_set(ctx, n);
1790 if (ctx->iaoq_n == -1) {
1791 /* The temporary iaoq_n_var died at the branch above.
1792 Regenerate it here instead of saving it. */
1793 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1795 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1798 gen_set_label(taken);
1800 /* Taken: Condition satisfied; nullify on forward branches. */
1801 n = is_n && disp >= 0;
1802 if (n && use_nullify_skip(ctx)) {
1803 nullify_set(ctx, 0);
1804 gen_goto_tb(ctx, 1, dest, dest + 4);
1805 } else {
1806 nullify_set(ctx, n);
1807 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1810 /* Not taken: the branch itself was nullified. */
1811 if (ctx->null_lab) {
1812 gen_set_label(ctx->null_lab);
1813 ctx->null_lab = NULL;
1814 return DISAS_IAQ_N_STALE;
1815 } else {
1816 return DISAS_NORETURN;
1820 /* Emit an unconditional branch to an indirect target. This handles
1821 nullification of the branch itself. */
1822 static DisasJumpType do_ibranch(DisasContext *ctx, TCGv_reg dest,
1823 unsigned link, bool is_n)
1825 TCGv_reg a0, a1, next, tmp;
1826 TCGCond c;
1828 assert(ctx->null_lab == NULL);
1830 if (ctx->null_cond.c == TCG_COND_NEVER) {
1831 if (link != 0) {
1832 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1834 next = get_temp(ctx);
1835 tcg_gen_mov_reg(next, dest);
1836 if (is_n) {
1837 if (use_nullify_skip(ctx)) {
1838 tcg_gen_mov_reg(cpu_iaoq_f, next);
1839 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1840 nullify_set(ctx, 0);
1841 return DISAS_IAQ_N_UPDATED;
1843 ctx->null_cond.c = TCG_COND_ALWAYS;
1845 ctx->iaoq_n = -1;
1846 ctx->iaoq_n_var = next;
1847 } else if (is_n && use_nullify_skip(ctx)) {
1848 /* The (conditional) branch, B, nullifies the next insn, N,
1849 and we're allowed to skip execution N (no single-step or
1850 tracepoint in effect). Since the goto_ptr that we must use
1851 for the indirect branch consumes no special resources, we
1852 can (conditionally) skip B and continue execution. */
1853 /* The use_nullify_skip test implies we have a known control path. */
1854 tcg_debug_assert(ctx->iaoq_b != -1);
1855 tcg_debug_assert(ctx->iaoq_n != -1);
1857 /* We do have to handle the non-local temporary, DEST, before
1858 branching. Since IOAQ_F is not really live at this point, we
1859 can simply store DEST optimistically. Similarly with IAOQ_B. */
1860 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1861 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1863 nullify_over(ctx);
1864 if (link != 0) {
1865 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1867 tcg_gen_lookup_and_goto_ptr();
1868 return nullify_end(ctx, DISAS_NEXT);
1869 } else {
1870 cond_prep(&ctx->null_cond);
1871 c = ctx->null_cond.c;
1872 a0 = ctx->null_cond.a0;
1873 a1 = ctx->null_cond.a1;
1875 tmp = tcg_temp_new();
1876 next = get_temp(ctx);
1878 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1879 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1880 ctx->iaoq_n = -1;
1881 ctx->iaoq_n_var = next;
1883 if (link != 0) {
1884 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1887 if (is_n) {
1888 /* The branch nullifies the next insn, which means the state of N
1889 after the branch is the inverse of the state of N that applied
1890 to the branch. */
1891 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1892 cond_free(&ctx->null_cond);
1893 ctx->null_cond = cond_make_n();
1894 ctx->psw_n_nonzero = true;
1895 } else {
1896 cond_free(&ctx->null_cond);
1900 return DISAS_NEXT;
1903 /* Implement
1904 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1905 * IAOQ_Next{30..31} ← GR[b]{30..31};
1906 * else
1907 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1908 * which keeps the privilege level from being increased.
1910 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1912 #ifdef CONFIG_USER_ONLY
1913 return offset;
1914 #else
1915 TCGv_reg dest;
1916 switch (ctx->privilege) {
1917 case 0:
1918 /* Privilege 0 is maximum and is allowed to decrease. */
1919 return offset;
1920 case 3:
1921 /* Privilege 3 is minimum and is never allowed increase. */
1922 dest = get_temp(ctx);
1923 tcg_gen_ori_reg(dest, offset, 3);
1924 break;
1925 default:
1926 dest = tcg_temp_new();
1927 tcg_gen_andi_reg(dest, offset, -4);
1928 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1929 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1930 tcg_temp_free(dest);
1931 break;
1933 return dest;
1934 #endif
1937 #ifdef CONFIG_USER_ONLY
1938 /* On Linux, page zero is normally marked execute only + gateway.
1939 Therefore normal read or write is supposed to fail, but specific
1940 offsets have kernel code mapped to raise permissions to implement
1941 system calls. Handling this via an explicit check here, rather
1942 in than the "be disp(sr2,r0)" instruction that probably sent us
1943 here, is the easiest way to handle the branch delay slot on the
1944 aforementioned BE. */
1945 static DisasJumpType do_page_zero(DisasContext *ctx)
1947 /* If by some means we get here with PSW[N]=1, that implies that
1948 the B,GATE instruction would be skipped, and we'd fault on the
1949 next insn within the privilaged page. */
1950 switch (ctx->null_cond.c) {
1951 case TCG_COND_NEVER:
1952 break;
1953 case TCG_COND_ALWAYS:
1954 tcg_gen_movi_reg(cpu_psw_n, 0);
1955 goto do_sigill;
1956 default:
1957 /* Since this is always the first (and only) insn within the
1958 TB, we should know the state of PSW[N] from TB->FLAGS. */
1959 g_assert_not_reached();
1962 /* Check that we didn't arrive here via some means that allowed
1963 non-sequential instruction execution. Normally the PSW[B] bit
1964 detects this by disallowing the B,GATE instruction to execute
1965 under such conditions. */
1966 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1967 goto do_sigill;
1970 switch (ctx->iaoq_f) {
1971 case 0x00: /* Null pointer call */
1972 gen_excp_1(EXCP_IMP);
1973 return DISAS_NORETURN;
1975 case 0xb0: /* LWS */
1976 gen_excp_1(EXCP_SYSCALL_LWS);
1977 return DISAS_NORETURN;
1979 case 0xe0: /* SET_THREAD_POINTER */
1980 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
1981 tcg_gen_mov_reg(cpu_iaoq_f, cpu_gr[31]);
1982 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
1983 return DISAS_IAQ_N_UPDATED;
1985 case 0x100: /* SYSCALL */
1986 gen_excp_1(EXCP_SYSCALL);
1987 return DISAS_NORETURN;
1989 default:
1990 do_sigill:
1991 gen_excp_1(EXCP_ILL);
1992 return DISAS_NORETURN;
1995 #endif
1997 static DisasJumpType trans_nop(DisasContext *ctx, uint32_t insn,
1998 const DisasInsn *di)
2000 cond_free(&ctx->null_cond);
2001 return DISAS_NEXT;
2004 static DisasJumpType trans_break(DisasContext *ctx, uint32_t insn,
2005 const DisasInsn *di)
2007 nullify_over(ctx);
2008 return nullify_end(ctx, gen_excp_iir(ctx, EXCP_BREAK));
2011 static DisasJumpType trans_sync(DisasContext *ctx, uint32_t insn,
2012 const DisasInsn *di)
2014 /* No point in nullifying the memory barrier. */
2015 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2017 cond_free(&ctx->null_cond);
2018 return DISAS_NEXT;
2021 static DisasJumpType trans_mfia(DisasContext *ctx, uint32_t insn,
2022 const DisasInsn *di)
2024 unsigned rt = extract32(insn, 0, 5);
2025 TCGv_reg tmp = dest_gpr(ctx, rt);
2026 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2027 save_gpr(ctx, rt, tmp);
2029 cond_free(&ctx->null_cond);
2030 return DISAS_NEXT;
2033 static DisasJumpType trans_mfsp(DisasContext *ctx, uint32_t insn,
2034 const DisasInsn *di)
2036 unsigned rt = extract32(insn, 0, 5);
2037 unsigned rs = assemble_sr3(insn);
2038 TCGv_i64 t0 = tcg_temp_new_i64();
2039 TCGv_reg t1 = tcg_temp_new();
2041 load_spr(ctx, t0, rs);
2042 tcg_gen_shri_i64(t0, t0, 32);
2043 tcg_gen_trunc_i64_reg(t1, t0);
2045 save_gpr(ctx, rt, t1);
2046 tcg_temp_free(t1);
2047 tcg_temp_free_i64(t0);
2049 cond_free(&ctx->null_cond);
2050 return DISAS_NEXT;
2053 static DisasJumpType trans_mfctl(DisasContext *ctx, uint32_t insn,
2054 const DisasInsn *di)
2056 unsigned rt = extract32(insn, 0, 5);
2057 unsigned ctl = extract32(insn, 21, 5);
2058 TCGv_reg tmp;
2059 DisasJumpType ret;
2061 switch (ctl) {
2062 case CR_SAR:
2063 #ifdef TARGET_HPPA64
2064 if (extract32(insn, 14, 1) == 0) {
2065 /* MFSAR without ,W masks low 5 bits. */
2066 tmp = dest_gpr(ctx, rt);
2067 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2068 save_gpr(ctx, rt, tmp);
2069 goto done;
2071 #endif
2072 save_gpr(ctx, rt, cpu_sar);
2073 goto done;
2074 case CR_IT: /* Interval Timer */
2075 /* FIXME: Respect PSW_S bit. */
2076 nullify_over(ctx);
2077 tmp = dest_gpr(ctx, rt);
2078 if (ctx->base.tb->cflags & CF_USE_ICOUNT) {
2079 gen_io_start();
2080 gen_helper_read_interval_timer(tmp);
2081 gen_io_end();
2082 ret = DISAS_IAQ_N_STALE;
2083 } else {
2084 gen_helper_read_interval_timer(tmp);
2085 ret = DISAS_NEXT;
2087 save_gpr(ctx, rt, tmp);
2088 return nullify_end(ctx, ret);
2089 case 26:
2090 case 27:
2091 break;
2092 default:
2093 /* All other control registers are privileged. */
2094 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2095 break;
2098 tmp = get_temp(ctx);
2099 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2100 save_gpr(ctx, rt, tmp);
2102 done:
2103 cond_free(&ctx->null_cond);
2104 return DISAS_NEXT;
2107 static DisasJumpType trans_mtsp(DisasContext *ctx, uint32_t insn,
2108 const DisasInsn *di)
2110 unsigned rr = extract32(insn, 16, 5);
2111 unsigned rs = assemble_sr3(insn);
2112 TCGv_i64 t64;
2114 if (rs >= 5) {
2115 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2117 nullify_over(ctx);
2119 t64 = tcg_temp_new_i64();
2120 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2121 tcg_gen_shli_i64(t64, t64, 32);
2123 if (rs >= 4) {
2124 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2125 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2126 } else {
2127 tcg_gen_mov_i64(cpu_sr[rs], t64);
2129 tcg_temp_free_i64(t64);
2131 return nullify_end(ctx, DISAS_NEXT);
2134 static DisasJumpType trans_mtctl(DisasContext *ctx, uint32_t insn,
2135 const DisasInsn *di)
2137 unsigned rin = extract32(insn, 16, 5);
2138 unsigned ctl = extract32(insn, 21, 5);
2139 TCGv_reg reg = load_gpr(ctx, rin);
2140 TCGv_reg tmp;
2142 if (ctl == CR_SAR) {
2143 tmp = tcg_temp_new();
2144 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2145 save_or_nullify(ctx, cpu_sar, tmp);
2146 tcg_temp_free(tmp);
2148 cond_free(&ctx->null_cond);
2149 return DISAS_NEXT;
2152 /* All other control registers are privileged or read-only. */
2153 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2155 #ifdef CONFIG_USER_ONLY
2156 g_assert_not_reached();
2157 #else
2158 DisasJumpType ret = DISAS_NEXT;
2160 nullify_over(ctx);
2161 switch (ctl) {
2162 case CR_IT:
2163 gen_helper_write_interval_timer(cpu_env, reg);
2164 break;
2165 case CR_EIRR:
2166 gen_helper_write_eirr(cpu_env, reg);
2167 break;
2168 case CR_EIEM:
2169 gen_helper_write_eiem(cpu_env, reg);
2170 ret = DISAS_IAQ_N_STALE_EXIT;
2171 break;
2173 case CR_IIASQ:
2174 case CR_IIAOQ:
2175 /* FIXME: Respect PSW_Q bit */
2176 /* The write advances the queue and stores to the back element. */
2177 tmp = get_temp(ctx);
2178 tcg_gen_ld_reg(tmp, cpu_env,
2179 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2180 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2181 tcg_gen_st_reg(reg, cpu_env,
2182 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2183 break;
2185 default:
2186 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2187 break;
2189 return nullify_end(ctx, ret);
2190 #endif
2193 static DisasJumpType trans_mtsarcm(DisasContext *ctx, uint32_t insn,
2194 const DisasInsn *di)
2196 unsigned rin = extract32(insn, 16, 5);
2197 TCGv_reg tmp = tcg_temp_new();
2199 tcg_gen_not_reg(tmp, load_gpr(ctx, rin));
2200 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2201 save_or_nullify(ctx, cpu_sar, tmp);
2202 tcg_temp_free(tmp);
2204 cond_free(&ctx->null_cond);
2205 return DISAS_NEXT;
2208 static DisasJumpType trans_ldsid(DisasContext *ctx, uint32_t insn,
2209 const DisasInsn *di)
2211 unsigned rt = extract32(insn, 0, 5);
2212 TCGv_reg dest = dest_gpr(ctx, rt);
2214 #ifdef CONFIG_USER_ONLY
2215 /* We don't implement space registers in user mode. */
2216 tcg_gen_movi_reg(dest, 0);
2217 #else
2218 unsigned rb = extract32(insn, 21, 5);
2219 unsigned sp = extract32(insn, 14, 2);
2220 TCGv_i64 t0 = tcg_temp_new_i64();
2222 tcg_gen_mov_i64(t0, space_select(ctx, sp, load_gpr(ctx, rb)));
2223 tcg_gen_shri_i64(t0, t0, 32);
2224 tcg_gen_trunc_i64_reg(dest, t0);
2226 tcg_temp_free_i64(t0);
2227 #endif
2228 save_gpr(ctx, rt, dest);
2230 cond_free(&ctx->null_cond);
2231 return DISAS_NEXT;
2234 #ifndef CONFIG_USER_ONLY
2235 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
2236 static target_ureg extract_sm_imm(uint32_t insn)
2238 target_ureg val = extract32(insn, 16, 10);
2240 if (val & PSW_SM_E) {
2241 val = (val & ~PSW_SM_E) | PSW_E;
2243 if (val & PSW_SM_W) {
2244 val = (val & ~PSW_SM_W) | PSW_W;
2246 return val;
2249 static DisasJumpType trans_rsm(DisasContext *ctx, uint32_t insn,
2250 const DisasInsn *di)
2252 unsigned rt = extract32(insn, 0, 5);
2253 target_ureg sm = extract_sm_imm(insn);
2254 TCGv_reg tmp;
2256 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2257 nullify_over(ctx);
2259 tmp = get_temp(ctx);
2260 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2261 tcg_gen_andi_reg(tmp, tmp, ~sm);
2262 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2263 save_gpr(ctx, rt, tmp);
2265 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2266 return nullify_end(ctx, DISAS_IAQ_N_STALE_EXIT);
2269 static DisasJumpType trans_ssm(DisasContext *ctx, uint32_t insn,
2270 const DisasInsn *di)
2272 unsigned rt = extract32(insn, 0, 5);
2273 target_ureg sm = extract_sm_imm(insn);
2274 TCGv_reg tmp;
2276 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2277 nullify_over(ctx);
2279 tmp = get_temp(ctx);
2280 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2281 tcg_gen_ori_reg(tmp, tmp, sm);
2282 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2283 save_gpr(ctx, rt, tmp);
2285 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2286 return nullify_end(ctx, DISAS_IAQ_N_STALE_EXIT);
2289 static DisasJumpType trans_mtsm(DisasContext *ctx, uint32_t insn,
2290 const DisasInsn *di)
2292 unsigned rr = extract32(insn, 16, 5);
2293 TCGv_reg tmp, reg;
2295 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2296 nullify_over(ctx);
2298 reg = load_gpr(ctx, rr);
2299 tmp = get_temp(ctx);
2300 gen_helper_swap_system_mask(tmp, cpu_env, reg);
2302 /* Exit the TB to recognize new interrupts. */
2303 return nullify_end(ctx, DISAS_IAQ_N_STALE_EXIT);
2306 static DisasJumpType trans_rfi(DisasContext *ctx, uint32_t insn,
2307 const DisasInsn *di)
2309 unsigned comp = extract32(insn, 5, 4);
2311 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2312 nullify_over(ctx);
2314 if (comp == 5) {
2315 gen_helper_rfi_r(cpu_env);
2316 } else {
2317 gen_helper_rfi(cpu_env);
2319 if (ctx->base.singlestep_enabled) {
2320 gen_excp_1(EXCP_DEBUG);
2321 } else {
2322 tcg_gen_exit_tb(0);
2325 /* Exit the TB to recognize new interrupts. */
2326 return nullify_end(ctx, DISAS_NORETURN);
2329 static DisasJumpType gen_hlt(DisasContext *ctx, int reset)
2331 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2332 nullify_over(ctx);
2333 if (reset) {
2334 gen_helper_reset(cpu_env);
2335 } else {
2336 gen_helper_halt(cpu_env);
2338 return nullify_end(ctx, DISAS_NORETURN);
2340 #endif /* !CONFIG_USER_ONLY */
2342 static const DisasInsn table_system[] = {
2343 { 0x00000000u, 0xfc001fe0u, trans_break },
2344 { 0x00001820u, 0xffe01fffu, trans_mtsp },
2345 { 0x00001840u, 0xfc00ffffu, trans_mtctl },
2346 { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm },
2347 { 0x000014a0u, 0xffffffe0u, trans_mfia },
2348 { 0x000004a0u, 0xffff1fe0u, trans_mfsp },
2349 { 0x000008a0u, 0xfc1fbfe0u, trans_mfctl },
2350 { 0x00000400u, 0xffffffffu, trans_sync }, /* sync */
2351 { 0x00100400u, 0xffffffffu, trans_sync }, /* syncdma */
2352 { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
2353 #ifndef CONFIG_USER_ONLY
2354 { 0x00000e60u, 0xfc00ffe0u, trans_rsm },
2355 { 0x00000d60u, 0xfc00ffe0u, trans_ssm },
2356 { 0x00001860u, 0xffe0ffffu, trans_mtsm },
2357 { 0x00000c00u, 0xfffffe1fu, trans_rfi },
2358 #endif
2361 static DisasJumpType trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
2362 const DisasInsn *di)
2364 unsigned rb = extract32(insn, 21, 5);
2365 unsigned rx = extract32(insn, 16, 5);
2366 TCGv_reg dest = dest_gpr(ctx, rb);
2367 TCGv_reg src1 = load_gpr(ctx, rb);
2368 TCGv_reg src2 = load_gpr(ctx, rx);
2370 /* The only thing we need to do is the base register modification. */
2371 tcg_gen_add_reg(dest, src1, src2);
2372 save_gpr(ctx, rb, dest);
2374 cond_free(&ctx->null_cond);
2375 return DISAS_NEXT;
2378 static DisasJumpType trans_probe(DisasContext *ctx, uint32_t insn,
2379 const DisasInsn *di)
2381 unsigned rt = extract32(insn, 0, 5);
2382 unsigned sp = extract32(insn, 14, 2);
2383 unsigned rr = extract32(insn, 16, 5);
2384 unsigned rb = extract32(insn, 21, 5);
2385 unsigned is_write = extract32(insn, 6, 1);
2386 unsigned is_imm = extract32(insn, 13, 1);
2387 TCGv_reg dest, ofs;
2388 TCGv_i32 level, want;
2389 TCGv_tl addr;
2391 nullify_over(ctx);
2393 dest = dest_gpr(ctx, rt);
2394 form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
2396 if (is_imm) {
2397 level = tcg_const_i32(extract32(insn, 16, 2));
2398 } else {
2399 level = tcg_temp_new_i32();
2400 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, rr));
2401 tcg_gen_andi_i32(level, level, 3);
2403 want = tcg_const_i32(is_write ? PAGE_WRITE : PAGE_READ);
2405 gen_helper_probe(dest, cpu_env, addr, level, want);
2407 tcg_temp_free_i32(want);
2408 tcg_temp_free_i32(level);
2410 save_gpr(ctx, rt, dest);
2411 return nullify_end(ctx, DISAS_NEXT);
2414 #ifndef CONFIG_USER_ONLY
2415 static DisasJumpType trans_ixtlbx(DisasContext *ctx, uint32_t insn,
2416 const DisasInsn *di)
2418 unsigned sp;
2419 unsigned rr = extract32(insn, 16, 5);
2420 unsigned rb = extract32(insn, 21, 5);
2421 unsigned is_data = insn & 0x1000;
2422 unsigned is_addr = insn & 0x40;
2423 TCGv_tl addr;
2424 TCGv_reg ofs, reg;
2426 if (is_data) {
2427 sp = extract32(insn, 14, 2);
2428 } else {
2429 sp = ~assemble_sr3(insn);
2432 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2433 nullify_over(ctx);
2435 form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
2436 reg = load_gpr(ctx, rr);
2437 if (is_addr) {
2438 gen_helper_itlba(cpu_env, addr, reg);
2439 } else {
2440 gen_helper_itlbp(cpu_env, addr, reg);
2443 /* Exit TB for ITLB change if mmu is enabled. This *should* not be
2444 the case, since the OS TLB fill handler runs with mmu disabled. */
2445 return nullify_end(ctx, !is_data && (ctx->tb_flags & PSW_C)
2446 ? DISAS_IAQ_N_STALE : DISAS_NEXT);
2449 static DisasJumpType trans_pxtlbx(DisasContext *ctx, uint32_t insn,
2450 const DisasInsn *di)
2452 unsigned m = extract32(insn, 5, 1);
2453 unsigned sp;
2454 unsigned rx = extract32(insn, 16, 5);
2455 unsigned rb = extract32(insn, 21, 5);
2456 unsigned is_data = insn & 0x1000;
2457 unsigned is_local = insn & 0x40;
2458 TCGv_tl addr;
2459 TCGv_reg ofs;
2461 if (is_data) {
2462 sp = extract32(insn, 14, 2);
2463 } else {
2464 sp = ~assemble_sr3(insn);
2467 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2468 nullify_over(ctx);
2470 form_gva(ctx, &addr, &ofs, rb, rx, 0, 0, sp, m, false);
2471 if (m) {
2472 save_gpr(ctx, rb, ofs);
2474 if (is_local) {
2475 gen_helper_ptlbe(cpu_env);
2476 } else {
2477 gen_helper_ptlb(cpu_env, addr);
2480 /* Exit TB for TLB change if mmu is enabled. */
2481 return nullify_end(ctx, !is_data && (ctx->tb_flags & PSW_C)
2482 ? DISAS_IAQ_N_STALE : DISAS_NEXT);
2485 static DisasJumpType trans_lpa(DisasContext *ctx, uint32_t insn,
2486 const DisasInsn *di)
2488 unsigned rt = extract32(insn, 0, 5);
2489 unsigned m = extract32(insn, 5, 1);
2490 unsigned sp = extract32(insn, 14, 2);
2491 unsigned rx = extract32(insn, 16, 5);
2492 unsigned rb = extract32(insn, 21, 5);
2493 TCGv_tl vaddr;
2494 TCGv_reg ofs, paddr;
2496 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2497 nullify_over(ctx);
2499 form_gva(ctx, &vaddr, &ofs, rb, rx, 0, 0, sp, m, false);
2501 paddr = tcg_temp_new();
2502 gen_helper_lpa(paddr, cpu_env, vaddr);
2504 /* Note that physical address result overrides base modification. */
2505 if (m) {
2506 save_gpr(ctx, rb, ofs);
2508 save_gpr(ctx, rt, paddr);
2509 tcg_temp_free(paddr);
2511 return nullify_end(ctx, DISAS_NEXT);
2514 static DisasJumpType trans_lci(DisasContext *ctx, uint32_t insn,
2515 const DisasInsn *di)
2517 unsigned rt = extract32(insn, 0, 5);
2518 TCGv_reg ci;
2520 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2522 /* The Coherence Index is an implementation-defined function of the
2523 physical address. Two addresses with the same CI have a coherent
2524 view of the cache. Our implementation is to return 0 for all,
2525 since the entire address space is coherent. */
2526 ci = tcg_const_reg(0);
2527 save_gpr(ctx, rt, ci);
2528 tcg_temp_free(ci);
2530 return DISAS_NEXT;
2532 #endif /* !CONFIG_USER_ONLY */
2534 static const DisasInsn table_mem_mgmt[] = {
2535 { 0x04003280u, 0xfc003fffu, trans_nop }, /* fdc, disp */
2536 { 0x04001280u, 0xfc003fffu, trans_nop }, /* fdc, index */
2537 { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
2538 { 0x040012c0u, 0xfc003fffu, trans_nop }, /* fdce */
2539 { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
2540 { 0x04000280u, 0xfc001fffu, trans_nop }, /* fic 0a */
2541 { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
2542 { 0x040013c0u, 0xfc003fffu, trans_nop }, /* fic 4f */
2543 { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
2544 { 0x040002c0u, 0xfc001fffu, trans_nop }, /* fice */
2545 { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
2546 { 0x04002700u, 0xfc003fffu, trans_nop }, /* pdc */
2547 { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
2548 { 0x04001180u, 0xfc003fa0u, trans_probe }, /* probe */
2549 { 0x04003180u, 0xfc003fa0u, trans_probe }, /* probei */
2550 #ifndef CONFIG_USER_ONLY
2551 { 0x04000000u, 0xfc001fffu, trans_ixtlbx }, /* iitlbp */
2552 { 0x04000040u, 0xfc001fffu, trans_ixtlbx }, /* iitlba */
2553 { 0x04001000u, 0xfc001fffu, trans_ixtlbx }, /* idtlbp */
2554 { 0x04001040u, 0xfc001fffu, trans_ixtlbx }, /* idtlba */
2555 { 0x04000200u, 0xfc001fdfu, trans_pxtlbx }, /* pitlb */
2556 { 0x04000240u, 0xfc001fdfu, trans_pxtlbx }, /* pitlbe */
2557 { 0x04001200u, 0xfc001fdfu, trans_pxtlbx }, /* pdtlb */
2558 { 0x04001240u, 0xfc001fdfu, trans_pxtlbx }, /* pdtlbe */
2559 { 0x04001340u, 0xfc003fc0u, trans_lpa },
2560 { 0x04001300u, 0xfc003fe0u, trans_lci },
2561 #endif
2564 static DisasJumpType trans_add(DisasContext *ctx, uint32_t insn,
2565 const DisasInsn *di)
2567 unsigned r2 = extract32(insn, 21, 5);
2568 unsigned r1 = extract32(insn, 16, 5);
2569 unsigned cf = extract32(insn, 12, 4);
2570 unsigned ext = extract32(insn, 8, 4);
2571 unsigned shift = extract32(insn, 6, 2);
2572 unsigned rt = extract32(insn, 0, 5);
2573 TCGv_reg tcg_r1, tcg_r2;
2574 bool is_c = false;
2575 bool is_l = false;
2576 bool is_tc = false;
2577 bool is_tsv = false;
2578 DisasJumpType ret;
2580 switch (ext) {
2581 case 0x6: /* ADD, SHLADD */
2582 break;
2583 case 0xa: /* ADD,L, SHLADD,L */
2584 is_l = true;
2585 break;
2586 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
2587 is_tsv = true;
2588 break;
2589 case 0x7: /* ADD,C */
2590 is_c = true;
2591 break;
2592 case 0xf: /* ADD,C,TSV */
2593 is_c = is_tsv = true;
2594 break;
2595 default:
2596 return gen_illegal(ctx);
2599 if (cf) {
2600 nullify_over(ctx);
2602 tcg_r1 = load_gpr(ctx, r1);
2603 tcg_r2 = load_gpr(ctx, r2);
2604 ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
2605 return nullify_end(ctx, ret);
2608 static DisasJumpType trans_sub(DisasContext *ctx, uint32_t insn,
2609 const DisasInsn *di)
2611 unsigned r2 = extract32(insn, 21, 5);
2612 unsigned r1 = extract32(insn, 16, 5);
2613 unsigned cf = extract32(insn, 12, 4);
2614 unsigned ext = extract32(insn, 6, 6);
2615 unsigned rt = extract32(insn, 0, 5);
2616 TCGv_reg tcg_r1, tcg_r2;
2617 bool is_b = false;
2618 bool is_tc = false;
2619 bool is_tsv = false;
2620 DisasJumpType ret;
2622 switch (ext) {
2623 case 0x10: /* SUB */
2624 break;
2625 case 0x30: /* SUB,TSV */
2626 is_tsv = true;
2627 break;
2628 case 0x14: /* SUB,B */
2629 is_b = true;
2630 break;
2631 case 0x34: /* SUB,B,TSV */
2632 is_b = is_tsv = true;
2633 break;
2634 case 0x13: /* SUB,TC */
2635 is_tc = true;
2636 break;
2637 case 0x33: /* SUB,TSV,TC */
2638 is_tc = is_tsv = true;
2639 break;
2640 default:
2641 return gen_illegal(ctx);
2644 if (cf) {
2645 nullify_over(ctx);
2647 tcg_r1 = load_gpr(ctx, r1);
2648 tcg_r2 = load_gpr(ctx, r2);
2649 ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
2650 return nullify_end(ctx, ret);
2653 static DisasJumpType trans_log(DisasContext *ctx, uint32_t insn,
2654 const DisasInsn *di)
2656 unsigned r2 = extract32(insn, 21, 5);
2657 unsigned r1 = extract32(insn, 16, 5);
2658 unsigned cf = extract32(insn, 12, 4);
2659 unsigned rt = extract32(insn, 0, 5);
2660 TCGv_reg tcg_r1, tcg_r2;
2661 DisasJumpType ret;
2663 if (cf) {
2664 nullify_over(ctx);
2666 tcg_r1 = load_gpr(ctx, r1);
2667 tcg_r2 = load_gpr(ctx, r2);
2668 ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f.ttt);
2669 return nullify_end(ctx, ret);
2672 /* OR r,0,t -> COPY (according to gas) */
2673 static DisasJumpType trans_copy(DisasContext *ctx, uint32_t insn,
2674 const DisasInsn *di)
2676 unsigned r1 = extract32(insn, 16, 5);
2677 unsigned rt = extract32(insn, 0, 5);
2679 if (r1 == 0) {
2680 TCGv_reg dest = dest_gpr(ctx, rt);
2681 tcg_gen_movi_reg(dest, 0);
2682 save_gpr(ctx, rt, dest);
2683 } else {
2684 save_gpr(ctx, rt, cpu_gr[r1]);
2686 cond_free(&ctx->null_cond);
2687 return DISAS_NEXT;
2690 static DisasJumpType trans_cmpclr(DisasContext *ctx, uint32_t insn,
2691 const DisasInsn *di)
2693 unsigned r2 = extract32(insn, 21, 5);
2694 unsigned r1 = extract32(insn, 16, 5);
2695 unsigned cf = extract32(insn, 12, 4);
2696 unsigned rt = extract32(insn, 0, 5);
2697 TCGv_reg tcg_r1, tcg_r2;
2698 DisasJumpType ret;
2700 if (cf) {
2701 nullify_over(ctx);
2703 tcg_r1 = load_gpr(ctx, r1);
2704 tcg_r2 = load_gpr(ctx, r2);
2705 ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
2706 return nullify_end(ctx, ret);
2709 static DisasJumpType trans_uxor(DisasContext *ctx, uint32_t insn,
2710 const DisasInsn *di)
2712 unsigned r2 = extract32(insn, 21, 5);
2713 unsigned r1 = extract32(insn, 16, 5);
2714 unsigned cf = extract32(insn, 12, 4);
2715 unsigned rt = extract32(insn, 0, 5);
2716 TCGv_reg tcg_r1, tcg_r2;
2717 DisasJumpType ret;
2719 if (cf) {
2720 nullify_over(ctx);
2722 tcg_r1 = load_gpr(ctx, r1);
2723 tcg_r2 = load_gpr(ctx, r2);
2724 ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_reg);
2725 return nullify_end(ctx, ret);
2728 static DisasJumpType trans_uaddcm(DisasContext *ctx, uint32_t insn,
2729 const DisasInsn *di)
2731 unsigned r2 = extract32(insn, 21, 5);
2732 unsigned r1 = extract32(insn, 16, 5);
2733 unsigned cf = extract32(insn, 12, 4);
2734 unsigned is_tc = extract32(insn, 6, 1);
2735 unsigned rt = extract32(insn, 0, 5);
2736 TCGv_reg tcg_r1, tcg_r2, tmp;
2737 DisasJumpType ret;
2739 if (cf) {
2740 nullify_over(ctx);
2742 tcg_r1 = load_gpr(ctx, r1);
2743 tcg_r2 = load_gpr(ctx, r2);
2744 tmp = get_temp(ctx);
2745 tcg_gen_not_reg(tmp, tcg_r2);
2746 ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_reg);
2747 return nullify_end(ctx, ret);
2750 static DisasJumpType trans_dcor(DisasContext *ctx, uint32_t insn,
2751 const DisasInsn *di)
2753 unsigned r2 = extract32(insn, 21, 5);
2754 unsigned cf = extract32(insn, 12, 4);
2755 unsigned is_i = extract32(insn, 6, 1);
2756 unsigned rt = extract32(insn, 0, 5);
2757 TCGv_reg tmp;
2758 DisasJumpType ret;
2760 nullify_over(ctx);
2762 tmp = get_temp(ctx);
2763 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2764 if (!is_i) {
2765 tcg_gen_not_reg(tmp, tmp);
2767 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2768 tcg_gen_muli_reg(tmp, tmp, 6);
2769 ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
2770 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2772 return nullify_end(ctx, ret);
2775 static DisasJumpType trans_ds(DisasContext *ctx, uint32_t insn,
2776 const DisasInsn *di)
2778 unsigned r2 = extract32(insn, 21, 5);
2779 unsigned r1 = extract32(insn, 16, 5);
2780 unsigned cf = extract32(insn, 12, 4);
2781 unsigned rt = extract32(insn, 0, 5);
2782 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2784 nullify_over(ctx);
2786 in1 = load_gpr(ctx, r1);
2787 in2 = load_gpr(ctx, r2);
2789 add1 = tcg_temp_new();
2790 add2 = tcg_temp_new();
2791 addc = tcg_temp_new();
2792 dest = tcg_temp_new();
2793 zero = tcg_const_reg(0);
2795 /* Form R1 << 1 | PSW[CB]{8}. */
2796 tcg_gen_add_reg(add1, in1, in1);
2797 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2799 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2800 carry{8} requires that we subtract via + ~R2 + 1, as described in
2801 the manual. By extracting and masking V, we can produce the
2802 proper inputs to the addition without movcond. */
2803 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2804 tcg_gen_xor_reg(add2, in2, addc);
2805 tcg_gen_andi_reg(addc, addc, 1);
2806 /* ??? This is only correct for 32-bit. */
2807 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2808 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2810 tcg_temp_free(addc);
2811 tcg_temp_free(zero);
2813 /* Write back the result register. */
2814 save_gpr(ctx, rt, dest);
2816 /* Write back PSW[CB]. */
2817 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2818 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2820 /* Write back PSW[V] for the division step. */
2821 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2822 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2824 /* Install the new nullification. */
2825 if (cf) {
2826 TCGv_reg sv = NULL;
2827 if (cf >> 1 == 6) {
2828 /* ??? The lshift is supposed to contribute to overflow. */
2829 sv = do_add_sv(ctx, dest, add1, add2);
2831 ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
2834 tcg_temp_free(add1);
2835 tcg_temp_free(add2);
2836 tcg_temp_free(dest);
2838 return nullify_end(ctx, DISAS_NEXT);
2841 #ifndef CONFIG_USER_ONLY
2842 /* These are QEMU extensions and are nops in the real architecture:
2844 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2845 * or %r31,%r31,%r31 -- death loop; offline cpu
2846 * currently implemented as idle.
2848 static DisasJumpType trans_pause(DisasContext *ctx, uint32_t insn,
2849 const DisasInsn *di)
2851 TCGv_i32 tmp;
2853 /* No need to check for supervisor, as userland can only pause
2854 until the next timer interrupt. */
2855 nullify_over(ctx);
2857 /* Advance the instruction queue. */
2858 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2859 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2860 nullify_set(ctx, 0);
2862 /* Tell the qemu main loop to halt until this cpu has work. */
2863 tmp = tcg_const_i32(1);
2864 tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) +
2865 offsetof(CPUState, halted));
2866 tcg_temp_free_i32(tmp);
2867 gen_excp_1(EXCP_HALTED);
2869 return nullify_end(ctx, DISAS_NORETURN);
2871 #endif
2873 static const DisasInsn table_arith_log[] = {
2874 { 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */
2875 { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
2876 #ifndef CONFIG_USER_ONLY
2877 { 0x094a024au, 0xffffffffu, trans_pause }, /* or r10,r10,r10 */
2878 { 0x0bff025fu, 0xffffffffu, trans_pause }, /* or r31,r31,r31 */
2879 #endif
2880 { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_reg },
2881 { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_reg },
2882 { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_reg },
2883 { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_reg },
2884 { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
2885 { 0x08000380u, 0xfc000fe0u, trans_uxor },
2886 { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
2887 { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
2888 { 0x08000440u, 0xfc000fe0u, trans_ds },
2889 { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
2890 { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
2891 { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
2892 { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
2895 static DisasJumpType trans_addi(DisasContext *ctx, uint32_t insn)
2897 target_sreg im = low_sextract(insn, 0, 11);
2898 unsigned e1 = extract32(insn, 11, 1);
2899 unsigned cf = extract32(insn, 12, 4);
2900 unsigned rt = extract32(insn, 16, 5);
2901 unsigned r2 = extract32(insn, 21, 5);
2902 unsigned o1 = extract32(insn, 26, 1);
2903 TCGv_reg tcg_im, tcg_r2;
2904 DisasJumpType ret;
2906 if (cf) {
2907 nullify_over(ctx);
2910 tcg_im = load_const(ctx, im);
2911 tcg_r2 = load_gpr(ctx, r2);
2912 ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2914 return nullify_end(ctx, ret);
2917 static DisasJumpType trans_subi(DisasContext *ctx, uint32_t insn)
2919 target_sreg im = low_sextract(insn, 0, 11);
2920 unsigned e1 = extract32(insn, 11, 1);
2921 unsigned cf = extract32(insn, 12, 4);
2922 unsigned rt = extract32(insn, 16, 5);
2923 unsigned r2 = extract32(insn, 21, 5);
2924 TCGv_reg tcg_im, tcg_r2;
2925 DisasJumpType ret;
2927 if (cf) {
2928 nullify_over(ctx);
2931 tcg_im = load_const(ctx, im);
2932 tcg_r2 = load_gpr(ctx, r2);
2933 ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2935 return nullify_end(ctx, ret);
2938 static DisasJumpType trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2940 target_sreg im = low_sextract(insn, 0, 11);
2941 unsigned cf = extract32(insn, 12, 4);
2942 unsigned rt = extract32(insn, 16, 5);
2943 unsigned r2 = extract32(insn, 21, 5);
2944 TCGv_reg tcg_im, tcg_r2;
2945 DisasJumpType ret;
2947 if (cf) {
2948 nullify_over(ctx);
2951 tcg_im = load_const(ctx, im);
2952 tcg_r2 = load_gpr(ctx, r2);
2953 ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2955 return nullify_end(ctx, ret);
2958 static DisasJumpType trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
2959 const DisasInsn *di)
2961 unsigned rt = extract32(insn, 0, 5);
2962 unsigned m = extract32(insn, 5, 1);
2963 unsigned sz = extract32(insn, 6, 2);
2964 unsigned a = extract32(insn, 13, 1);
2965 unsigned sp = extract32(insn, 14, 2);
2966 int disp = low_sextract(insn, 16, 5);
2967 unsigned rb = extract32(insn, 21, 5);
2968 int modify = (m ? (a ? -1 : 1) : 0);
2969 TCGMemOp mop = MO_TE | sz;
2971 return do_load(ctx, rt, rb, 0, 0, disp, sp, modify, mop);
2974 static DisasJumpType trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
2975 const DisasInsn *di)
2977 unsigned rt = extract32(insn, 0, 5);
2978 unsigned m = extract32(insn, 5, 1);
2979 unsigned sz = extract32(insn, 6, 2);
2980 unsigned u = extract32(insn, 13, 1);
2981 unsigned sp = extract32(insn, 14, 2);
2982 unsigned rx = extract32(insn, 16, 5);
2983 unsigned rb = extract32(insn, 21, 5);
2984 TCGMemOp mop = MO_TE | sz;
2986 return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, sp, m, mop);
2989 static DisasJumpType trans_st_idx_i(DisasContext *ctx, uint32_t insn,
2990 const DisasInsn *di)
2992 int disp = low_sextract(insn, 0, 5);
2993 unsigned m = extract32(insn, 5, 1);
2994 unsigned sz = extract32(insn, 6, 2);
2995 unsigned a = extract32(insn, 13, 1);
2996 unsigned sp = extract32(insn, 14, 2);
2997 unsigned rr = extract32(insn, 16, 5);
2998 unsigned rb = extract32(insn, 21, 5);
2999 int modify = (m ? (a ? -1 : 1) : 0);
3000 TCGMemOp mop = MO_TE | sz;
3002 return do_store(ctx, rr, rb, disp, sp, modify, mop);
3005 static DisasJumpType trans_ldcw(DisasContext *ctx, uint32_t insn,
3006 const DisasInsn *di)
3008 unsigned rt = extract32(insn, 0, 5);
3009 unsigned m = extract32(insn, 5, 1);
3010 unsigned i = extract32(insn, 12, 1);
3011 unsigned au = extract32(insn, 13, 1);
3012 unsigned sp = extract32(insn, 14, 2);
3013 unsigned rx = extract32(insn, 16, 5);
3014 unsigned rb = extract32(insn, 21, 5);
3015 TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
3016 TCGv_reg zero, dest, ofs;
3017 TCGv_tl addr;
3018 int modify, disp = 0, scale = 0;
3020 nullify_over(ctx);
3022 if (i) {
3023 modify = (m ? (au ? -1 : 1) : 0);
3024 disp = low_sextract(rx, 0, 5);
3025 rx = 0;
3026 } else {
3027 modify = m;
3028 if (au) {
3029 scale = mop & MO_SIZE;
3032 if (modify) {
3033 /* Base register modification. Make sure if RT == RB,
3034 we see the result of the load. */
3035 dest = get_temp(ctx);
3036 } else {
3037 dest = dest_gpr(ctx, rt);
3040 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
3041 ctx->mmu_idx == MMU_PHYS_IDX);
3042 zero = tcg_const_reg(0);
3043 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
3044 if (modify) {
3045 save_gpr(ctx, rb, ofs);
3047 save_gpr(ctx, rt, dest);
3049 return nullify_end(ctx, DISAS_NEXT);
3052 static DisasJumpType trans_stby(DisasContext *ctx, uint32_t insn,
3053 const DisasInsn *di)
3055 target_sreg disp = low_sextract(insn, 0, 5);
3056 unsigned m = extract32(insn, 5, 1);
3057 unsigned a = extract32(insn, 13, 1);
3058 unsigned sp = extract32(insn, 14, 2);
3059 unsigned rt = extract32(insn, 16, 5);
3060 unsigned rb = extract32(insn, 21, 5);
3061 TCGv_reg ofs, val;
3062 TCGv_tl addr;
3064 nullify_over(ctx);
3066 form_gva(ctx, &addr, &ofs, rb, 0, 0, disp, sp, m,
3067 ctx->mmu_idx == MMU_PHYS_IDX);
3068 val = load_gpr(ctx, rt);
3069 if (a) {
3070 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3071 gen_helper_stby_e_parallel(cpu_env, addr, val);
3072 } else {
3073 gen_helper_stby_e(cpu_env, addr, val);
3075 } else {
3076 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3077 gen_helper_stby_b_parallel(cpu_env, addr, val);
3078 } else {
3079 gen_helper_stby_b(cpu_env, addr, val);
3083 if (m) {
3084 tcg_gen_andi_reg(ofs, ofs, ~3);
3085 save_gpr(ctx, rb, ofs);
3088 return nullify_end(ctx, DISAS_NEXT);
3091 #ifndef CONFIG_USER_ONLY
3092 static DisasJumpType trans_ldwa_idx_i(DisasContext *ctx, uint32_t insn,
3093 const DisasInsn *di)
3095 int hold_mmu_idx = ctx->mmu_idx;
3096 DisasJumpType ret;
3098 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3100 /* ??? needs fixing for hppa64 -- ldda does not follow the same
3101 format wrt the sub-opcode in bits 6:9. */
3102 ctx->mmu_idx = MMU_PHYS_IDX;
3103 ret = trans_ld_idx_i(ctx, insn, di);
3104 ctx->mmu_idx = hold_mmu_idx;
3105 return ret;
3108 static DisasJumpType trans_ldwa_idx_x(DisasContext *ctx, uint32_t insn,
3109 const DisasInsn *di)
3111 int hold_mmu_idx = ctx->mmu_idx;
3112 DisasJumpType ret;
3114 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3116 /* ??? needs fixing for hppa64 -- ldda does not follow the same
3117 format wrt the sub-opcode in bits 6:9. */
3118 ctx->mmu_idx = MMU_PHYS_IDX;
3119 ret = trans_ld_idx_x(ctx, insn, di);
3120 ctx->mmu_idx = hold_mmu_idx;
3121 return ret;
3124 static DisasJumpType trans_stwa_idx_i(DisasContext *ctx, uint32_t insn,
3125 const DisasInsn *di)
3127 int hold_mmu_idx = ctx->mmu_idx;
3128 DisasJumpType ret;
3130 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3132 /* ??? needs fixing for hppa64 -- ldda does not follow the same
3133 format wrt the sub-opcode in bits 6:9. */
3134 ctx->mmu_idx = MMU_PHYS_IDX;
3135 ret = trans_st_idx_i(ctx, insn, di);
3136 ctx->mmu_idx = hold_mmu_idx;
3137 return ret;
3139 #endif
3141 static const DisasInsn table_index_mem[] = {
3142 { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
3143 { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
3144 { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
3145 { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
3146 { 0x0c001300u, 0xfc0013c0, trans_stby },
3147 #ifndef CONFIG_USER_ONLY
3148 { 0x0c000180u, 0xfc00d3c0, trans_ldwa_idx_x }, /* LDWA, rx */
3149 { 0x0c001180u, 0xfc00d3c0, trans_ldwa_idx_i }, /* LDWA, im */
3150 { 0x0c001380u, 0xfc00d3c0, trans_stwa_idx_i }, /* STWA, im */
3151 #endif
3154 static DisasJumpType trans_ldil(DisasContext *ctx, uint32_t insn)
3156 unsigned rt = extract32(insn, 21, 5);
3157 target_sreg i = assemble_21(insn);
3158 TCGv_reg tcg_rt = dest_gpr(ctx, rt);
3160 tcg_gen_movi_reg(tcg_rt, i);
3161 save_gpr(ctx, rt, tcg_rt);
3162 cond_free(&ctx->null_cond);
3164 return DISAS_NEXT;
3167 static DisasJumpType trans_addil(DisasContext *ctx, uint32_t insn)
3169 unsigned rt = extract32(insn, 21, 5);
3170 target_sreg i = assemble_21(insn);
3171 TCGv_reg tcg_rt = load_gpr(ctx, rt);
3172 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
3174 tcg_gen_addi_reg(tcg_r1, tcg_rt, i);
3175 save_gpr(ctx, 1, tcg_r1);
3176 cond_free(&ctx->null_cond);
3178 return DISAS_NEXT;
3181 static DisasJumpType trans_ldo(DisasContext *ctx, uint32_t insn)
3183 unsigned rb = extract32(insn, 21, 5);
3184 unsigned rt = extract32(insn, 16, 5);
3185 target_sreg i = assemble_16(insn);
3186 TCGv_reg tcg_rt = dest_gpr(ctx, rt);
3188 /* Special case rb == 0, for the LDI pseudo-op.
3189 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
3190 if (rb == 0) {
3191 tcg_gen_movi_reg(tcg_rt, i);
3192 } else {
3193 tcg_gen_addi_reg(tcg_rt, cpu_gr[rb], i);
3195 save_gpr(ctx, rt, tcg_rt);
3196 cond_free(&ctx->null_cond);
3198 return DISAS_NEXT;
3201 static DisasJumpType trans_load(DisasContext *ctx, uint32_t insn,
3202 bool is_mod, TCGMemOp mop)
3204 unsigned rb = extract32(insn, 21, 5);
3205 unsigned rt = extract32(insn, 16, 5);
3206 unsigned sp = extract32(insn, 14, 2);
3207 target_sreg i = assemble_16(insn);
3209 return do_load(ctx, rt, rb, 0, 0, i, sp,
3210 is_mod ? (i < 0 ? -1 : 1) : 0, mop);
3213 static DisasJumpType trans_load_w(DisasContext *ctx, uint32_t insn)
3215 unsigned rb = extract32(insn, 21, 5);
3216 unsigned rt = extract32(insn, 16, 5);
3217 unsigned sp = extract32(insn, 14, 2);
3218 target_sreg i = assemble_16a(insn);
3219 unsigned ext2 = extract32(insn, 1, 2);
3221 switch (ext2) {
3222 case 0:
3223 case 1:
3224 /* FLDW without modification. */
3225 return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
3226 case 2:
3227 /* LDW with modification. Note that the sign of I selects
3228 post-dec vs pre-inc. */
3229 return do_load(ctx, rt, rb, 0, 0, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
3230 default:
3231 return gen_illegal(ctx);
3235 static DisasJumpType trans_fload_mod(DisasContext *ctx, uint32_t insn)
3237 target_sreg i = assemble_16a(insn);
3238 unsigned t1 = extract32(insn, 1, 1);
3239 unsigned a = extract32(insn, 2, 1);
3240 unsigned sp = extract32(insn, 14, 2);
3241 unsigned t0 = extract32(insn, 16, 5);
3242 unsigned rb = extract32(insn, 21, 5);
3244 /* FLDW with modification. */
3245 return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3248 static DisasJumpType trans_store(DisasContext *ctx, uint32_t insn,
3249 bool is_mod, TCGMemOp mop)
3251 unsigned rb = extract32(insn, 21, 5);
3252 unsigned rt = extract32(insn, 16, 5);
3253 unsigned sp = extract32(insn, 14, 2);
3254 target_sreg i = assemble_16(insn);
3256 return do_store(ctx, rt, rb, i, sp, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
3259 static DisasJumpType trans_store_w(DisasContext *ctx, uint32_t insn)
3261 unsigned rb = extract32(insn, 21, 5);
3262 unsigned rt = extract32(insn, 16, 5);
3263 unsigned sp = extract32(insn, 14, 2);
3264 target_sreg i = assemble_16a(insn);
3265 unsigned ext2 = extract32(insn, 1, 2);
3267 switch (ext2) {
3268 case 0:
3269 case 1:
3270 /* FSTW without modification. */
3271 return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
3272 case 2:
3273 /* STW with modification. */
3274 return do_store(ctx, rt, rb, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
3275 default:
3276 return gen_illegal(ctx);
3280 static DisasJumpType trans_fstore_mod(DisasContext *ctx, uint32_t insn)
3282 target_sreg i = assemble_16a(insn);
3283 unsigned t1 = extract32(insn, 1, 1);
3284 unsigned a = extract32(insn, 2, 1);
3285 unsigned sp = extract32(insn, 14, 2);
3286 unsigned t0 = extract32(insn, 16, 5);
3287 unsigned rb = extract32(insn, 21, 5);
3289 /* FSTW with modification. */
3290 return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3293 static DisasJumpType trans_copr_w(DisasContext *ctx, uint32_t insn)
3295 unsigned t0 = extract32(insn, 0, 5);
3296 unsigned m = extract32(insn, 5, 1);
3297 unsigned t1 = extract32(insn, 6, 1);
3298 unsigned ext3 = extract32(insn, 7, 3);
3299 /* unsigned cc = extract32(insn, 10, 2); */
3300 unsigned i = extract32(insn, 12, 1);
3301 unsigned ua = extract32(insn, 13, 1);
3302 unsigned sp = extract32(insn, 14, 2);
3303 unsigned rx = extract32(insn, 16, 5);
3304 unsigned rb = extract32(insn, 21, 5);
3305 unsigned rt = t1 * 32 + t0;
3306 int modify = (m ? (ua ? -1 : 1) : 0);
3307 int disp, scale;
3309 if (i == 0) {
3310 scale = (ua ? 2 : 0);
3311 disp = 0;
3312 modify = m;
3313 } else {
3314 disp = low_sextract(rx, 0, 5);
3315 scale = 0;
3316 rx = 0;
3317 modify = (m ? (ua ? -1 : 1) : 0);
3320 switch (ext3) {
3321 case 0: /* FLDW */
3322 return do_floadw(ctx, rt, rb, rx, scale, disp, sp, modify);
3323 case 4: /* FSTW */
3324 return do_fstorew(ctx, rt, rb, rx, scale, disp, sp, modify);
3326 return gen_illegal(ctx);
3329 static DisasJumpType trans_copr_dw(DisasContext *ctx, uint32_t insn)
3331 unsigned rt = extract32(insn, 0, 5);
3332 unsigned m = extract32(insn, 5, 1);
3333 unsigned ext4 = extract32(insn, 6, 4);
3334 /* unsigned cc = extract32(insn, 10, 2); */
3335 unsigned i = extract32(insn, 12, 1);
3336 unsigned ua = extract32(insn, 13, 1);
3337 unsigned sp = extract32(insn, 14, 2);
3338 unsigned rx = extract32(insn, 16, 5);
3339 unsigned rb = extract32(insn, 21, 5);
3340 int modify = (m ? (ua ? -1 : 1) : 0);
3341 int disp, scale;
3343 if (i == 0) {
3344 scale = (ua ? 3 : 0);
3345 disp = 0;
3346 modify = m;
3347 } else {
3348 disp = low_sextract(rx, 0, 5);
3349 scale = 0;
3350 rx = 0;
3351 modify = (m ? (ua ? -1 : 1) : 0);
3354 switch (ext4) {
3355 case 0: /* FLDD */
3356 return do_floadd(ctx, rt, rb, rx, scale, disp, sp, modify);
3357 case 8: /* FSTD */
3358 return do_fstored(ctx, rt, rb, rx, scale, disp, sp, modify);
3359 default:
3360 return gen_illegal(ctx);
3364 static DisasJumpType trans_cmpb(DisasContext *ctx, uint32_t insn,
3365 bool is_true, bool is_imm, bool is_dw)
3367 target_sreg disp = assemble_12(insn) * 4;
3368 unsigned n = extract32(insn, 1, 1);
3369 unsigned c = extract32(insn, 13, 3);
3370 unsigned r = extract32(insn, 21, 5);
3371 unsigned cf = c * 2 + !is_true;
3372 TCGv_reg dest, in1, in2, sv;
3373 DisasCond cond;
3375 nullify_over(ctx);
3377 if (is_imm) {
3378 in1 = load_const(ctx, low_sextract(insn, 16, 5));
3379 } else {
3380 in1 = load_gpr(ctx, extract32(insn, 16, 5));
3382 in2 = load_gpr(ctx, r);
3383 dest = get_temp(ctx);
3385 tcg_gen_sub_reg(dest, in1, in2);
3387 sv = NULL;
3388 if (c == 6) {
3389 sv = do_sub_sv(ctx, dest, in1, in2);
3392 cond = do_sub_cond(cf, dest, in1, in2, sv);
3393 return do_cbranch(ctx, disp, n, &cond);
3396 static DisasJumpType trans_addb(DisasContext *ctx, uint32_t insn,
3397 bool is_true, bool is_imm)
3399 target_sreg disp = assemble_12(insn) * 4;
3400 unsigned n = extract32(insn, 1, 1);
3401 unsigned c = extract32(insn, 13, 3);
3402 unsigned r = extract32(insn, 21, 5);
3403 unsigned cf = c * 2 + !is_true;
3404 TCGv_reg dest, in1, in2, sv, cb_msb;
3405 DisasCond cond;
3407 nullify_over(ctx);
3409 if (is_imm) {
3410 in1 = load_const(ctx, low_sextract(insn, 16, 5));
3411 } else {
3412 in1 = load_gpr(ctx, extract32(insn, 16, 5));
3414 in2 = load_gpr(ctx, r);
3415 dest = dest_gpr(ctx, r);
3416 sv = NULL;
3417 cb_msb = NULL;
3419 switch (c) {
3420 default:
3421 tcg_gen_add_reg(dest, in1, in2);
3422 break;
3423 case 4: case 5:
3424 cb_msb = get_temp(ctx);
3425 tcg_gen_movi_reg(cb_msb, 0);
3426 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3427 break;
3428 case 6:
3429 tcg_gen_add_reg(dest, in1, in2);
3430 sv = do_add_sv(ctx, dest, in1, in2);
3431 break;
3434 cond = do_cond(cf, dest, cb_msb, sv);
3435 return do_cbranch(ctx, disp, n, &cond);
3438 static DisasJumpType trans_bb(DisasContext *ctx, uint32_t insn)
3440 target_sreg disp = assemble_12(insn) * 4;
3441 unsigned n = extract32(insn, 1, 1);
3442 unsigned c = extract32(insn, 15, 1);
3443 unsigned r = extract32(insn, 16, 5);
3444 unsigned p = extract32(insn, 21, 5);
3445 unsigned i = extract32(insn, 26, 1);
3446 TCGv_reg tmp, tcg_r;
3447 DisasCond cond;
3449 nullify_over(ctx);
3451 tmp = tcg_temp_new();
3452 tcg_r = load_gpr(ctx, r);
3453 if (i) {
3454 tcg_gen_shli_reg(tmp, tcg_r, p);
3455 } else {
3456 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3459 cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
3460 tcg_temp_free(tmp);
3461 return do_cbranch(ctx, disp, n, &cond);
3464 static DisasJumpType trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
3466 target_sreg disp = assemble_12(insn) * 4;
3467 unsigned n = extract32(insn, 1, 1);
3468 unsigned c = extract32(insn, 13, 3);
3469 unsigned t = extract32(insn, 16, 5);
3470 unsigned r = extract32(insn, 21, 5);
3471 TCGv_reg dest;
3472 DisasCond cond;
3474 nullify_over(ctx);
3476 dest = dest_gpr(ctx, r);
3477 if (is_imm) {
3478 tcg_gen_movi_reg(dest, low_sextract(t, 0, 5));
3479 } else if (t == 0) {
3480 tcg_gen_movi_reg(dest, 0);
3481 } else {
3482 tcg_gen_mov_reg(dest, cpu_gr[t]);
3485 cond = do_sed_cond(c, dest);
3486 return do_cbranch(ctx, disp, n, &cond);
3489 static DisasJumpType trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
3490 const DisasInsn *di)
3492 unsigned rt = extract32(insn, 0, 5);
3493 unsigned c = extract32(insn, 13, 3);
3494 unsigned r1 = extract32(insn, 16, 5);
3495 unsigned r2 = extract32(insn, 21, 5);
3496 TCGv_reg dest;
3498 if (c) {
3499 nullify_over(ctx);
3502 dest = dest_gpr(ctx, rt);
3503 if (r1 == 0) {
3504 tcg_gen_ext32u_reg(dest, load_gpr(ctx, r2));
3505 tcg_gen_shr_reg(dest, dest, cpu_sar);
3506 } else if (r1 == r2) {
3507 TCGv_i32 t32 = tcg_temp_new_i32();
3508 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, r2));
3509 tcg_gen_rotr_i32(t32, t32, cpu_sar);
3510 tcg_gen_extu_i32_reg(dest, t32);
3511 tcg_temp_free_i32(t32);
3512 } else {
3513 TCGv_i64 t = tcg_temp_new_i64();
3514 TCGv_i64 s = tcg_temp_new_i64();
3516 tcg_gen_concat_reg_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
3517 tcg_gen_extu_reg_i64(s, cpu_sar);
3518 tcg_gen_shr_i64(t, t, s);
3519 tcg_gen_trunc_i64_reg(dest, t);
3521 tcg_temp_free_i64(t);
3522 tcg_temp_free_i64(s);
3524 save_gpr(ctx, rt, dest);
3526 /* Install the new nullification. */
3527 cond_free(&ctx->null_cond);
3528 if (c) {
3529 ctx->null_cond = do_sed_cond(c, dest);
3531 return nullify_end(ctx, DISAS_NEXT);
3534 static DisasJumpType trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
3535 const DisasInsn *di)
3537 unsigned rt = extract32(insn, 0, 5);
3538 unsigned cpos = extract32(insn, 5, 5);
3539 unsigned c = extract32(insn, 13, 3);
3540 unsigned r1 = extract32(insn, 16, 5);
3541 unsigned r2 = extract32(insn, 21, 5);
3542 unsigned sa = 31 - cpos;
3543 TCGv_reg dest, t2;
3545 if (c) {
3546 nullify_over(ctx);
3549 dest = dest_gpr(ctx, rt);
3550 t2 = load_gpr(ctx, r2);
3551 if (r1 == r2) {
3552 TCGv_i32 t32 = tcg_temp_new_i32();
3553 tcg_gen_trunc_reg_i32(t32, t2);
3554 tcg_gen_rotri_i32(t32, t32, sa);
3555 tcg_gen_extu_i32_reg(dest, t32);
3556 tcg_temp_free_i32(t32);
3557 } else if (r1 == 0) {
3558 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3559 } else {
3560 TCGv_reg t0 = tcg_temp_new();
3561 tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
3562 tcg_gen_deposit_reg(dest, t0, cpu_gr[r1], 32 - sa, sa);
3563 tcg_temp_free(t0);
3565 save_gpr(ctx, rt, dest);
3567 /* Install the new nullification. */
3568 cond_free(&ctx->null_cond);
3569 if (c) {
3570 ctx->null_cond = do_sed_cond(c, dest);
3572 return nullify_end(ctx, DISAS_NEXT);
3575 static DisasJumpType trans_extrw_sar(DisasContext *ctx, uint32_t insn,
3576 const DisasInsn *di)
3578 unsigned clen = extract32(insn, 0, 5);
3579 unsigned is_se = extract32(insn, 10, 1);
3580 unsigned c = extract32(insn, 13, 3);
3581 unsigned rt = extract32(insn, 16, 5);
3582 unsigned rr = extract32(insn, 21, 5);
3583 unsigned len = 32 - clen;
3584 TCGv_reg dest, src, tmp;
3586 if (c) {
3587 nullify_over(ctx);
3590 dest = dest_gpr(ctx, rt);
3591 src = load_gpr(ctx, rr);
3592 tmp = tcg_temp_new();
3594 /* Recall that SAR is using big-endian bit numbering. */
3595 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3596 if (is_se) {
3597 tcg_gen_sar_reg(dest, src, tmp);
3598 tcg_gen_sextract_reg(dest, dest, 0, len);
3599 } else {
3600 tcg_gen_shr_reg(dest, src, tmp);
3601 tcg_gen_extract_reg(dest, dest, 0, len);
3603 tcg_temp_free(tmp);
3604 save_gpr(ctx, rt, dest);
3606 /* Install the new nullification. */
3607 cond_free(&ctx->null_cond);
3608 if (c) {
3609 ctx->null_cond = do_sed_cond(c, dest);
3611 return nullify_end(ctx, DISAS_NEXT);
3614 static DisasJumpType trans_extrw_imm(DisasContext *ctx, uint32_t insn,
3615 const DisasInsn *di)
3617 unsigned clen = extract32(insn, 0, 5);
3618 unsigned pos = extract32(insn, 5, 5);
3619 unsigned is_se = extract32(insn, 10, 1);
3620 unsigned c = extract32(insn, 13, 3);
3621 unsigned rt = extract32(insn, 16, 5);
3622 unsigned rr = extract32(insn, 21, 5);
3623 unsigned len = 32 - clen;
3624 unsigned cpos = 31 - pos;
3625 TCGv_reg dest, src;
3627 if (c) {
3628 nullify_over(ctx);
3631 dest = dest_gpr(ctx, rt);
3632 src = load_gpr(ctx, rr);
3633 if (is_se) {
3634 tcg_gen_sextract_reg(dest, src, cpos, len);
3635 } else {
3636 tcg_gen_extract_reg(dest, src, cpos, len);
3638 save_gpr(ctx, rt, dest);
3640 /* Install the new nullification. */
3641 cond_free(&ctx->null_cond);
3642 if (c) {
3643 ctx->null_cond = do_sed_cond(c, dest);
3645 return nullify_end(ctx, DISAS_NEXT);
3648 static const DisasInsn table_sh_ex[] = {
3649 { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
3650 { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
3651 { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
3652 { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
3655 static DisasJumpType trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
3656 const DisasInsn *di)
3658 unsigned clen = extract32(insn, 0, 5);
3659 unsigned cpos = extract32(insn, 5, 5);
3660 unsigned nz = extract32(insn, 10, 1);
3661 unsigned c = extract32(insn, 13, 3);
3662 target_sreg val = low_sextract(insn, 16, 5);
3663 unsigned rt = extract32(insn, 21, 5);
3664 unsigned len = 32 - clen;
3665 target_sreg mask0, mask1;
3666 TCGv_reg dest;
3668 if (c) {
3669 nullify_over(ctx);
3671 if (cpos + len > 32) {
3672 len = 32 - cpos;
3675 dest = dest_gpr(ctx, rt);
3676 mask0 = deposit64(0, cpos, len, val);
3677 mask1 = deposit64(-1, cpos, len, val);
3679 if (nz) {
3680 TCGv_reg src = load_gpr(ctx, rt);
3681 if (mask1 != -1) {
3682 tcg_gen_andi_reg(dest, src, mask1);
3683 src = dest;
3685 tcg_gen_ori_reg(dest, src, mask0);
3686 } else {
3687 tcg_gen_movi_reg(dest, mask0);
3689 save_gpr(ctx, rt, dest);
3691 /* Install the new nullification. */
3692 cond_free(&ctx->null_cond);
3693 if (c) {
3694 ctx->null_cond = do_sed_cond(c, dest);
3696 return nullify_end(ctx, DISAS_NEXT);
3699 static DisasJumpType trans_depw_imm(DisasContext *ctx, uint32_t insn,
3700 const DisasInsn *di)
3702 unsigned clen = extract32(insn, 0, 5);
3703 unsigned cpos = extract32(insn, 5, 5);
3704 unsigned nz = extract32(insn, 10, 1);
3705 unsigned c = extract32(insn, 13, 3);
3706 unsigned rr = extract32(insn, 16, 5);
3707 unsigned rt = extract32(insn, 21, 5);
3708 unsigned rs = nz ? rt : 0;
3709 unsigned len = 32 - clen;
3710 TCGv_reg dest, val;
3712 if (c) {
3713 nullify_over(ctx);
3715 if (cpos + len > 32) {
3716 len = 32 - cpos;
3719 dest = dest_gpr(ctx, rt);
3720 val = load_gpr(ctx, rr);
3721 if (rs == 0) {
3722 tcg_gen_deposit_z_reg(dest, val, cpos, len);
3723 } else {
3724 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, cpos, len);
3726 save_gpr(ctx, rt, dest);
3728 /* Install the new nullification. */
3729 cond_free(&ctx->null_cond);
3730 if (c) {
3731 ctx->null_cond = do_sed_cond(c, dest);
3733 return nullify_end(ctx, DISAS_NEXT);
3736 static DisasJumpType trans_depw_sar(DisasContext *ctx, uint32_t insn,
3737 const DisasInsn *di)
3739 unsigned clen = extract32(insn, 0, 5);
3740 unsigned nz = extract32(insn, 10, 1);
3741 unsigned i = extract32(insn, 12, 1);
3742 unsigned c = extract32(insn, 13, 3);
3743 unsigned rt = extract32(insn, 21, 5);
3744 unsigned rs = nz ? rt : 0;
3745 unsigned len = 32 - clen;
3746 TCGv_reg val, mask, tmp, shift, dest;
3747 unsigned msb = 1U << (len - 1);
3749 if (c) {
3750 nullify_over(ctx);
3753 if (i) {
3754 val = load_const(ctx, low_sextract(insn, 16, 5));
3755 } else {
3756 val = load_gpr(ctx, extract32(insn, 16, 5));
3758 dest = dest_gpr(ctx, rt);
3759 shift = tcg_temp_new();
3760 tmp = tcg_temp_new();
3762 /* Convert big-endian bit numbering in SAR to left-shift. */
3763 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3765 mask = tcg_const_reg(msb + (msb - 1));
3766 tcg_gen_and_reg(tmp, val, mask);
3767 if (rs) {
3768 tcg_gen_shl_reg(mask, mask, shift);
3769 tcg_gen_shl_reg(tmp, tmp, shift);
3770 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3771 tcg_gen_or_reg(dest, dest, tmp);
3772 } else {
3773 tcg_gen_shl_reg(dest, tmp, shift);
3775 tcg_temp_free(shift);
3776 tcg_temp_free(mask);
3777 tcg_temp_free(tmp);
3778 save_gpr(ctx, rt, dest);
3780 /* Install the new nullification. */
3781 cond_free(&ctx->null_cond);
3782 if (c) {
3783 ctx->null_cond = do_sed_cond(c, dest);
3785 return nullify_end(ctx, DISAS_NEXT);
3788 static const DisasInsn table_depw[] = {
3789 { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
3790 { 0xd4000800u, 0xfc001800u, trans_depw_imm },
3791 { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
3794 static DisasJumpType trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
3796 unsigned n = extract32(insn, 1, 1);
3797 unsigned b = extract32(insn, 21, 5);
3798 target_sreg disp = assemble_17(insn);
3799 TCGv_reg tmp;
3801 #ifdef CONFIG_USER_ONLY
3802 /* ??? It seems like there should be a good way of using
3803 "be disp(sr2, r0)", the canonical gateway entry mechanism
3804 to our advantage. But that appears to be inconvenient to
3805 manage along side branch delay slots. Therefore we handle
3806 entry into the gateway page via absolute address. */
3807 /* Since we don't implement spaces, just branch. Do notice the special
3808 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3809 goto_tb to the TB containing the syscall. */
3810 if (b == 0) {
3811 return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
3813 #else
3814 int sp = assemble_sr3(insn);
3815 nullify_over(ctx);
3816 #endif
3818 tmp = get_temp(ctx);
3819 tcg_gen_addi_reg(tmp, load_gpr(ctx, b), disp);
3820 tmp = do_ibranch_priv(ctx, tmp);
3822 #ifdef CONFIG_USER_ONLY
3823 return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
3824 #else
3825 TCGv_i64 new_spc = tcg_temp_new_i64();
3827 load_spr(ctx, new_spc, sp);
3828 if (is_l) {
3829 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3830 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3832 if (n && use_nullify_skip(ctx)) {
3833 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3834 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3835 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3836 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3837 } else {
3838 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3839 if (ctx->iaoq_b == -1) {
3840 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3842 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3843 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3844 nullify_set(ctx, n);
3846 tcg_temp_free_i64(new_spc);
3847 tcg_gen_lookup_and_goto_ptr();
3848 return nullify_end(ctx, DISAS_NORETURN);
3849 #endif
3852 static DisasJumpType trans_bl(DisasContext *ctx, uint32_t insn,
3853 const DisasInsn *di)
3855 unsigned n = extract32(insn, 1, 1);
3856 unsigned link = extract32(insn, 21, 5);
3857 target_sreg disp = assemble_17(insn);
3859 return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
3862 static DisasJumpType trans_b_gate(DisasContext *ctx, uint32_t insn,
3863 const DisasInsn *di)
3865 unsigned n = extract32(insn, 1, 1);
3866 unsigned link = extract32(insn, 21, 5);
3867 target_sreg disp = assemble_17(insn);
3868 target_ureg dest = iaoq_dest(ctx, disp);
3870 /* Make sure the caller hasn't done something weird with the queue.
3871 * ??? This is not quite the same as the PSW[B] bit, which would be
3872 * expensive to track. Real hardware will trap for
3873 * b gateway
3874 * b gateway+4 (in delay slot of first branch)
3875 * However, checking for a non-sequential instruction queue *will*
3876 * diagnose the security hole
3877 * b gateway
3878 * b evil
3879 * in which instructions at evil would run with increased privs.
3881 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3882 return gen_illegal(ctx);
3885 #ifndef CONFIG_USER_ONLY
3886 if (ctx->tb_flags & PSW_C) {
3887 CPUHPPAState *env = ctx->cs->env_ptr;
3888 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3889 /* If we could not find a TLB entry, then we need to generate an
3890 ITLB miss exception so the kernel will provide it.
3891 The resulting TLB fill operation will invalidate this TB and
3892 we will re-translate, at which point we *will* be able to find
3893 the TLB entry and determine if this is in fact a gateway page. */
3894 if (type < 0) {
3895 return gen_excp(ctx, EXCP_ITLB_MISS);
3897 /* No change for non-gateway pages or for priv decrease. */
3898 if (type >= 4 && type - 4 < ctx->privilege) {
3899 dest = deposit32(dest, 0, 2, type - 4);
3901 } else {
3902 dest &= -4; /* priv = 0 */
3904 #endif
3906 return do_dbranch(ctx, dest, link, n);
3909 static DisasJumpType trans_bl_long(DisasContext *ctx, uint32_t insn,
3910 const DisasInsn *di)
3912 unsigned n = extract32(insn, 1, 1);
3913 target_sreg disp = assemble_22(insn);
3915 return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
3918 static DisasJumpType trans_blr(DisasContext *ctx, uint32_t insn,
3919 const DisasInsn *di)
3921 unsigned n = extract32(insn, 1, 1);
3922 unsigned rx = extract32(insn, 16, 5);
3923 unsigned link = extract32(insn, 21, 5);
3924 TCGv_reg tmp = get_temp(ctx);
3926 tcg_gen_shli_reg(tmp, load_gpr(ctx, rx), 3);
3927 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3928 /* The computation here never changes privilege level. */
3929 return do_ibranch(ctx, tmp, link, n);
3932 static DisasJumpType trans_bv(DisasContext *ctx, uint32_t insn,
3933 const DisasInsn *di)
3935 unsigned n = extract32(insn, 1, 1);
3936 unsigned rx = extract32(insn, 16, 5);
3937 unsigned rb = extract32(insn, 21, 5);
3938 TCGv_reg dest;
3940 if (rx == 0) {
3941 dest = load_gpr(ctx, rb);
3942 } else {
3943 dest = get_temp(ctx);
3944 tcg_gen_shli_reg(dest, load_gpr(ctx, rx), 3);
3945 tcg_gen_add_reg(dest, dest, load_gpr(ctx, rb));
3947 dest = do_ibranch_priv(ctx, dest);
3948 return do_ibranch(ctx, dest, 0, n);
3951 static DisasJumpType trans_bve(DisasContext *ctx, uint32_t insn,
3952 const DisasInsn *di)
3954 unsigned n = extract32(insn, 1, 1);
3955 unsigned rb = extract32(insn, 21, 5);
3956 unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3957 TCGv_reg dest;
3959 #ifdef CONFIG_USER_ONLY
3960 dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
3961 return do_ibranch(ctx, dest, link, n);
3962 #else
3963 nullify_over(ctx);
3964 dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
3966 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3967 if (ctx->iaoq_b == -1) {
3968 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3970 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3971 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3972 if (link) {
3973 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
3975 nullify_set(ctx, n);
3976 tcg_gen_lookup_and_goto_ptr();
3977 return nullify_end(ctx, DISAS_NORETURN);
3978 #endif
3981 static const DisasInsn table_branch[] = {
3982 { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3983 { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3984 { 0xe8004000u, 0xfc00fffdu, trans_blr },
3985 { 0xe800c000u, 0xfc00fffdu, trans_bv },
3986 { 0xe800d000u, 0xfc00dffcu, trans_bve },
3987 { 0xe8002000u, 0xfc00e000u, trans_b_gate },
3990 static DisasJumpType trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3991 const DisasInsn *di)
3993 unsigned rt = extract32(insn, 0, 5);
3994 unsigned ra = extract32(insn, 21, 5);
3995 return do_fop_wew(ctx, rt, ra, di->f.wew);
3998 static DisasJumpType trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3999 const DisasInsn *di)
4001 unsigned rt = assemble_rt64(insn);
4002 unsigned ra = assemble_ra64(insn);
4003 return do_fop_wew(ctx, rt, ra, di->f.wew);
4006 static DisasJumpType trans_fop_ded(DisasContext *ctx, uint32_t insn,
4007 const DisasInsn *di)
4009 unsigned rt = extract32(insn, 0, 5);
4010 unsigned ra = extract32(insn, 21, 5);
4011 return do_fop_ded(ctx, rt, ra, di->f.ded);
4014 static DisasJumpType trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
4015 const DisasInsn *di)
4017 unsigned rt = extract32(insn, 0, 5);
4018 unsigned ra = extract32(insn, 21, 5);
4019 return do_fop_wed(ctx, rt, ra, di->f.wed);
4022 static DisasJumpType trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
4023 const DisasInsn *di)
4025 unsigned rt = assemble_rt64(insn);
4026 unsigned ra = extract32(insn, 21, 5);
4027 return do_fop_wed(ctx, rt, ra, di->f.wed);
4030 static DisasJumpType trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
4031 const DisasInsn *di)
4033 unsigned rt = extract32(insn, 0, 5);
4034 unsigned ra = extract32(insn, 21, 5);
4035 return do_fop_dew(ctx, rt, ra, di->f.dew);
4038 static DisasJumpType trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
4039 const DisasInsn *di)
4041 unsigned rt = extract32(insn, 0, 5);
4042 unsigned ra = assemble_ra64(insn);
4043 return do_fop_dew(ctx, rt, ra, di->f.dew);
4046 static DisasJumpType trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
4047 const DisasInsn *di)
4049 unsigned rt = extract32(insn, 0, 5);
4050 unsigned rb = extract32(insn, 16, 5);
4051 unsigned ra = extract32(insn, 21, 5);
4052 return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
4055 static DisasJumpType trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
4056 const DisasInsn *di)
4058 unsigned rt = assemble_rt64(insn);
4059 unsigned rb = assemble_rb64(insn);
4060 unsigned ra = assemble_ra64(insn);
4061 return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
4064 static DisasJumpType trans_fop_dedd(DisasContext *ctx, uint32_t insn,
4065 const DisasInsn *di)
4067 unsigned rt = extract32(insn, 0, 5);
4068 unsigned rb = extract32(insn, 16, 5);
4069 unsigned ra = extract32(insn, 21, 5);
4070 return do_fop_dedd(ctx, rt, ra, rb, di->f.dedd);
4073 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4075 tcg_gen_mov_i32(dst, src);
4078 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4080 tcg_gen_mov_i64(dst, src);
4083 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4085 tcg_gen_andi_i32(dst, src, INT32_MAX);
4088 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4090 tcg_gen_andi_i64(dst, src, INT64_MAX);
4093 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4095 tcg_gen_xori_i32(dst, src, INT32_MIN);
4098 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4100 tcg_gen_xori_i64(dst, src, INT64_MIN);
4103 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4105 tcg_gen_ori_i32(dst, src, INT32_MIN);
4108 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4110 tcg_gen_ori_i64(dst, src, INT64_MIN);
4113 static DisasJumpType do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
4114 unsigned y, unsigned c)
4116 TCGv_i32 ta, tb, tc, ty;
4118 nullify_over(ctx);
4120 ta = load_frw0_i32(ra);
4121 tb = load_frw0_i32(rb);
4122 ty = tcg_const_i32(y);
4123 tc = tcg_const_i32(c);
4125 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
4127 tcg_temp_free_i32(ta);
4128 tcg_temp_free_i32(tb);
4129 tcg_temp_free_i32(ty);
4130 tcg_temp_free_i32(tc);
4132 return nullify_end(ctx, DISAS_NEXT);
4135 static DisasJumpType trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
4136 const DisasInsn *di)
4138 unsigned c = extract32(insn, 0, 5);
4139 unsigned y = extract32(insn, 13, 3);
4140 unsigned rb = extract32(insn, 16, 5);
4141 unsigned ra = extract32(insn, 21, 5);
4142 return do_fcmp_s(ctx, ra, rb, y, c);
4145 static DisasJumpType trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
4146 const DisasInsn *di)
4148 unsigned c = extract32(insn, 0, 5);
4149 unsigned y = extract32(insn, 13, 3);
4150 unsigned rb = assemble_rb64(insn);
4151 unsigned ra = assemble_ra64(insn);
4152 return do_fcmp_s(ctx, ra, rb, y, c);
4155 static DisasJumpType trans_fcmp_d(DisasContext *ctx, uint32_t insn,
4156 const DisasInsn *di)
4158 unsigned c = extract32(insn, 0, 5);
4159 unsigned y = extract32(insn, 13, 3);
4160 unsigned rb = extract32(insn, 16, 5);
4161 unsigned ra = extract32(insn, 21, 5);
4162 TCGv_i64 ta, tb;
4163 TCGv_i32 tc, ty;
4165 nullify_over(ctx);
4167 ta = load_frd0(ra);
4168 tb = load_frd0(rb);
4169 ty = tcg_const_i32(y);
4170 tc = tcg_const_i32(c);
4172 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
4174 tcg_temp_free_i64(ta);
4175 tcg_temp_free_i64(tb);
4176 tcg_temp_free_i32(ty);
4177 tcg_temp_free_i32(tc);
4179 return nullify_end(ctx, DISAS_NEXT);
4182 static DisasJumpType trans_ftest_t(DisasContext *ctx, uint32_t insn,
4183 const DisasInsn *di)
4185 unsigned y = extract32(insn, 13, 3);
4186 unsigned cbit = (y ^ 1) - 1;
4187 TCGv_reg t;
4189 nullify_over(ctx);
4191 t = tcg_temp_new();
4192 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
4193 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
4194 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4195 tcg_temp_free(t);
4197 return nullify_end(ctx, DISAS_NEXT);
4200 static DisasJumpType trans_ftest_q(DisasContext *ctx, uint32_t insn,
4201 const DisasInsn *di)
4203 unsigned c = extract32(insn, 0, 5);
4204 int mask;
4205 bool inv = false;
4206 TCGv_reg t;
4208 nullify_over(ctx);
4210 t = tcg_temp_new();
4211 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
4213 switch (c) {
4214 case 0: /* simple */
4215 tcg_gen_andi_reg(t, t, 0x4000000);
4216 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4217 goto done;
4218 case 2: /* rej */
4219 inv = true;
4220 /* fallthru */
4221 case 1: /* acc */
4222 mask = 0x43ff800;
4223 break;
4224 case 6: /* rej8 */
4225 inv = true;
4226 /* fallthru */
4227 case 5: /* acc8 */
4228 mask = 0x43f8000;
4229 break;
4230 case 9: /* acc6 */
4231 mask = 0x43e0000;
4232 break;
4233 case 13: /* acc4 */
4234 mask = 0x4380000;
4235 break;
4236 case 17: /* acc2 */
4237 mask = 0x4200000;
4238 break;
4239 default:
4240 return gen_illegal(ctx);
4242 if (inv) {
4243 TCGv_reg c = load_const(ctx, mask);
4244 tcg_gen_or_reg(t, t, c);
4245 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4246 } else {
4247 tcg_gen_andi_reg(t, t, mask);
4248 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4250 done:
4251 return nullify_end(ctx, DISAS_NEXT);
4254 static DisasJumpType trans_xmpyu(DisasContext *ctx, uint32_t insn,
4255 const DisasInsn *di)
4257 unsigned rt = extract32(insn, 0, 5);
4258 unsigned rb = assemble_rb64(insn);
4259 unsigned ra = assemble_ra64(insn);
4260 TCGv_i64 a, b;
4262 nullify_over(ctx);
4264 a = load_frw0_i64(ra);
4265 b = load_frw0_i64(rb);
4266 tcg_gen_mul_i64(a, a, b);
4267 save_frd(rt, a);
4268 tcg_temp_free_i64(a);
4269 tcg_temp_free_i64(b);
4271 return nullify_end(ctx, DISAS_NEXT);
4274 #define FOP_DED trans_fop_ded, .f.ded
4275 #define FOP_DEDD trans_fop_dedd, .f.dedd
4277 #define FOP_WEW trans_fop_wew_0c, .f.wew
4278 #define FOP_DEW trans_fop_dew_0c, .f.dew
4279 #define FOP_WED trans_fop_wed_0c, .f.wed
4280 #define FOP_WEWW trans_fop_weww_0c, .f.weww
4282 static const DisasInsn table_float_0c[] = {
4283 /* floating point class zero */
4284 { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
4285 { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
4286 { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
4287 { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
4288 { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
4289 { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
4291 { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
4292 { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4293 { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4294 { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4295 { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4296 { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4298 /* floating point class three */
4299 { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
4300 { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
4301 { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
4302 { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
4304 { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4305 { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4306 { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4307 { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4309 /* floating point class one */
4310 /* float/float */
4311 { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
4312 { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
4313 /* int/float */
4314 { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
4315 { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
4316 { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
4317 { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4318 /* float/int */
4319 { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
4320 { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
4321 { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
4322 { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4323 /* float/int truncate */
4324 { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
4325 { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
4326 { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
4327 { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4328 /* uint/float */
4329 { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
4330 { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
4331 { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
4332 { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4333 /* float/uint */
4334 { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
4335 { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
4336 { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
4337 { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4338 /* float/uint truncate */
4339 { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
4340 { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
4341 { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
4342 { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4344 /* floating point class two */
4345 { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
4346 { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
4347 { 0x30002420, 0xffffffe0, trans_ftest_q },
4348 { 0x30000420, 0xffff1fff, trans_ftest_t },
4350 /* FID. Note that ra == rt == 0, which via fcpy puts 0 into fr0.
4351 This is machine/revision == 0, which is reserved for simulator. */
4352 { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
4355 #undef FOP_WEW
4356 #undef FOP_DEW
4357 #undef FOP_WED
4358 #undef FOP_WEWW
4359 #define FOP_WEW trans_fop_wew_0e, .f.wew
4360 #define FOP_DEW trans_fop_dew_0e, .f.dew
4361 #define FOP_WED trans_fop_wed_0e, .f.wed
4362 #define FOP_WEWW trans_fop_weww_0e, .f.weww
4364 static const DisasInsn table_float_0e[] = {
4365 /* floating point class zero */
4366 { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
4367 { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
4368 { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
4369 { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
4370 { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
4371 { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
4373 { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
4374 { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4375 { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4376 { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4377 { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4378 { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4380 /* floating point class three */
4381 { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
4382 { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
4383 { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
4384 { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
4386 { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4387 { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4388 { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4389 { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4391 { 0x38004700, 0xfc00ef60, trans_xmpyu },
4393 /* floating point class one */
4394 /* float/float */
4395 { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
4396 { 0x38002200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_d },
4397 /* int/float */
4398 { 0x38008200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_w_s },
4399 { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
4400 { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
4401 { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4402 /* float/int */
4403 { 0x38010200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_w },
4404 { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
4405 { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
4406 { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4407 /* float/int truncate */
4408 { 0x38018200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_w },
4409 { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
4410 { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
4411 { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4412 /* uint/float */
4413 { 0x38028200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_uw_s },
4414 { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
4415 { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
4416 { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4417 /* float/uint */
4418 { 0x38030200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_uw },
4419 { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
4420 { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
4421 { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4422 /* float/uint truncate */
4423 { 0x38038200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_uw },
4424 { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
4425 { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
4426 { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4428 /* floating point class two */
4429 { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
4430 { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
4433 #undef FOP_WEW
4434 #undef FOP_DEW
4435 #undef FOP_WED
4436 #undef FOP_WEWW
4437 #undef FOP_DED
4438 #undef FOP_DEDD
4440 /* Convert the fmpyadd single-precision register encodings to standard. */
4441 static inline int fmpyadd_s_reg(unsigned r)
4443 return (r & 16) * 2 + 16 + (r & 15);
4446 static DisasJumpType trans_fmpyadd(DisasContext *ctx,
4447 uint32_t insn, bool is_sub)
4449 unsigned tm = extract32(insn, 0, 5);
4450 unsigned f = extract32(insn, 5, 1);
4451 unsigned ra = extract32(insn, 6, 5);
4452 unsigned ta = extract32(insn, 11, 5);
4453 unsigned rm2 = extract32(insn, 16, 5);
4454 unsigned rm1 = extract32(insn, 21, 5);
4456 nullify_over(ctx);
4458 /* Independent multiply & add/sub, with undefined behaviour
4459 if outputs overlap inputs. */
4460 if (f == 0) {
4461 tm = fmpyadd_s_reg(tm);
4462 ra = fmpyadd_s_reg(ra);
4463 ta = fmpyadd_s_reg(ta);
4464 rm2 = fmpyadd_s_reg(rm2);
4465 rm1 = fmpyadd_s_reg(rm1);
4466 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4467 do_fop_weww(ctx, ta, ta, ra,
4468 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4469 } else {
4470 do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
4471 do_fop_dedd(ctx, ta, ta, ra,
4472 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4475 return nullify_end(ctx, DISAS_NEXT);
4478 static DisasJumpType trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
4479 const DisasInsn *di)
4481 unsigned rt = assemble_rt64(insn);
4482 unsigned neg = extract32(insn, 5, 1);
4483 unsigned rm1 = assemble_ra64(insn);
4484 unsigned rm2 = assemble_rb64(insn);
4485 unsigned ra3 = assemble_rc64(insn);
4486 TCGv_i32 a, b, c;
4488 nullify_over(ctx);
4489 a = load_frw0_i32(rm1);
4490 b = load_frw0_i32(rm2);
4491 c = load_frw0_i32(ra3);
4493 if (neg) {
4494 gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
4495 } else {
4496 gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
4499 tcg_temp_free_i32(b);
4500 tcg_temp_free_i32(c);
4501 save_frw_i32(rt, a);
4502 tcg_temp_free_i32(a);
4503 return nullify_end(ctx, DISAS_NEXT);
4506 static DisasJumpType trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
4507 const DisasInsn *di)
4509 unsigned rt = extract32(insn, 0, 5);
4510 unsigned neg = extract32(insn, 5, 1);
4511 unsigned rm1 = extract32(insn, 21, 5);
4512 unsigned rm2 = extract32(insn, 16, 5);
4513 unsigned ra3 = assemble_rc64(insn);
4514 TCGv_i64 a, b, c;
4516 nullify_over(ctx);
4517 a = load_frd0(rm1);
4518 b = load_frd0(rm2);
4519 c = load_frd0(ra3);
4521 if (neg) {
4522 gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
4523 } else {
4524 gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
4527 tcg_temp_free_i64(b);
4528 tcg_temp_free_i64(c);
4529 save_frd(rt, a);
4530 tcg_temp_free_i64(a);
4531 return nullify_end(ctx, DISAS_NEXT);
4534 static const DisasInsn table_fp_fused[] = {
4535 { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
4536 { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
4539 static DisasJumpType translate_table_int(DisasContext *ctx, uint32_t insn,
4540 const DisasInsn table[], size_t n)
4542 size_t i;
4543 for (i = 0; i < n; ++i) {
4544 if ((insn & table[i].mask) == table[i].insn) {
4545 return table[i].trans(ctx, insn, &table[i]);
4548 qemu_log_mask(LOG_UNIMP, "UNIMP insn %08x @ " TARGET_FMT_lx "\n",
4549 insn, ctx->base.pc_next);
4550 return gen_illegal(ctx);
4553 #define translate_table(ctx, insn, table) \
4554 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
4556 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
4558 uint32_t opc = extract32(insn, 26, 6);
4560 switch (opc) {
4561 case 0x00: /* system op */
4562 return translate_table(ctx, insn, table_system);
4563 case 0x01:
4564 return translate_table(ctx, insn, table_mem_mgmt);
4565 case 0x02:
4566 return translate_table(ctx, insn, table_arith_log);
4567 case 0x03:
4568 return translate_table(ctx, insn, table_index_mem);
4569 case 0x06:
4570 return trans_fmpyadd(ctx, insn, false);
4571 case 0x08:
4572 return trans_ldil(ctx, insn);
4573 case 0x09:
4574 return trans_copr_w(ctx, insn);
4575 case 0x0A:
4576 return trans_addil(ctx, insn);
4577 case 0x0B:
4578 return trans_copr_dw(ctx, insn);
4579 case 0x0C:
4580 return translate_table(ctx, insn, table_float_0c);
4581 case 0x0D:
4582 return trans_ldo(ctx, insn);
4583 case 0x0E:
4584 return translate_table(ctx, insn, table_float_0e);
4586 case 0x10:
4587 return trans_load(ctx, insn, false, MO_UB);
4588 case 0x11:
4589 return trans_load(ctx, insn, false, MO_TEUW);
4590 case 0x12:
4591 return trans_load(ctx, insn, false, MO_TEUL);
4592 case 0x13:
4593 return trans_load(ctx, insn, true, MO_TEUL);
4594 case 0x16:
4595 return trans_fload_mod(ctx, insn);
4596 case 0x17:
4597 return trans_load_w(ctx, insn);
4598 case 0x18:
4599 return trans_store(ctx, insn, false, MO_UB);
4600 case 0x19:
4601 return trans_store(ctx, insn, false, MO_TEUW);
4602 case 0x1A:
4603 return trans_store(ctx, insn, false, MO_TEUL);
4604 case 0x1B:
4605 return trans_store(ctx, insn, true, MO_TEUL);
4606 case 0x1E:
4607 return trans_fstore_mod(ctx, insn);
4608 case 0x1F:
4609 return trans_store_w(ctx, insn);
4611 case 0x20:
4612 return trans_cmpb(ctx, insn, true, false, false);
4613 case 0x21:
4614 return trans_cmpb(ctx, insn, true, true, false);
4615 case 0x22:
4616 return trans_cmpb(ctx, insn, false, false, false);
4617 case 0x23:
4618 return trans_cmpb(ctx, insn, false, true, false);
4619 case 0x24:
4620 return trans_cmpiclr(ctx, insn);
4621 case 0x25:
4622 return trans_subi(ctx, insn);
4623 case 0x26:
4624 return trans_fmpyadd(ctx, insn, true);
4625 case 0x27:
4626 return trans_cmpb(ctx, insn, true, false, true);
4627 case 0x28:
4628 return trans_addb(ctx, insn, true, false);
4629 case 0x29:
4630 return trans_addb(ctx, insn, true, true);
4631 case 0x2A:
4632 return trans_addb(ctx, insn, false, false);
4633 case 0x2B:
4634 return trans_addb(ctx, insn, false, true);
4635 case 0x2C:
4636 case 0x2D:
4637 return trans_addi(ctx, insn);
4638 case 0x2E:
4639 return translate_table(ctx, insn, table_fp_fused);
4640 case 0x2F:
4641 return trans_cmpb(ctx, insn, false, false, true);
4643 case 0x30:
4644 case 0x31:
4645 return trans_bb(ctx, insn);
4646 case 0x32:
4647 return trans_movb(ctx, insn, false);
4648 case 0x33:
4649 return trans_movb(ctx, insn, true);
4650 case 0x34:
4651 return translate_table(ctx, insn, table_sh_ex);
4652 case 0x35:
4653 return translate_table(ctx, insn, table_depw);
4654 case 0x38:
4655 return trans_be(ctx, insn, false);
4656 case 0x39:
4657 return trans_be(ctx, insn, true);
4658 case 0x3A:
4659 return translate_table(ctx, insn, table_branch);
4661 case 0x04: /* spopn */
4662 case 0x05: /* diag */
4663 case 0x0F: /* product specific */
4664 break;
4666 case 0x07: /* unassigned */
4667 case 0x15: /* unassigned */
4668 case 0x1D: /* unassigned */
4669 case 0x37: /* unassigned */
4670 break;
4671 case 0x3F:
4672 #ifndef CONFIG_USER_ONLY
4673 /* Unassigned, but use as system-halt. */
4674 if (insn == 0xfffdead0) {
4675 return gen_hlt(ctx, 0); /* halt system */
4677 if (insn == 0xfffdead1) {
4678 return gen_hlt(ctx, 1); /* reset system */
4680 #endif
4681 break;
4682 default:
4683 break;
4685 return gen_illegal(ctx);
4688 static int hppa_tr_init_disas_context(DisasContextBase *dcbase,
4689 CPUState *cs, int max_insns)
4691 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4692 int bound;
4694 ctx->cs = cs;
4695 ctx->tb_flags = ctx->base.tb->flags;
4697 #ifdef CONFIG_USER_ONLY
4698 ctx->privilege = MMU_USER_IDX;
4699 ctx->mmu_idx = MMU_USER_IDX;
4700 ctx->iaoq_f = ctx->base.pc_first;
4701 ctx->iaoq_b = ctx->base.tb->cs_base;
4702 #else
4703 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4704 ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4706 /* Recover the IAOQ values from the GVA + PRIV. */
4707 uint64_t cs_base = ctx->base.tb->cs_base;
4708 uint64_t iasq_f = cs_base & ~0xffffffffull;
4709 int32_t diff = cs_base;
4711 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4712 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4713 #endif
4714 ctx->iaoq_n = -1;
4715 ctx->iaoq_n_var = NULL;
4717 /* Bound the number of instructions by those left on the page. */
4718 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4719 bound = MIN(max_insns, bound);
4721 ctx->ntempr = 0;
4722 ctx->ntempl = 0;
4723 memset(ctx->tempr, 0, sizeof(ctx->tempr));
4724 memset(ctx->templ, 0, sizeof(ctx->templ));
4726 return bound;
4729 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4731 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4733 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4734 ctx->null_cond = cond_make_f();
4735 ctx->psw_n_nonzero = false;
4736 if (ctx->tb_flags & PSW_N) {
4737 ctx->null_cond.c = TCG_COND_ALWAYS;
4738 ctx->psw_n_nonzero = true;
4740 ctx->null_lab = NULL;
4743 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4745 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4747 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4750 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
4751 const CPUBreakpoint *bp)
4753 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4755 ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG);
4756 ctx->base.pc_next += 4;
4757 return true;
4760 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4762 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4763 CPUHPPAState *env = cs->env_ptr;
4764 DisasJumpType ret;
4765 int i, n;
4767 /* Execute one insn. */
4768 #ifdef CONFIG_USER_ONLY
4769 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4770 ret = do_page_zero(ctx);
4771 assert(ret != DISAS_NEXT);
4772 } else
4773 #endif
4775 /* Always fetch the insn, even if nullified, so that we check
4776 the page permissions for execute. */
4777 uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
4779 /* Set up the IA queue for the next insn.
4780 This will be overwritten by a branch. */
4781 if (ctx->iaoq_b == -1) {
4782 ctx->iaoq_n = -1;
4783 ctx->iaoq_n_var = get_temp(ctx);
4784 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4785 } else {
4786 ctx->iaoq_n = ctx->iaoq_b + 4;
4787 ctx->iaoq_n_var = NULL;
4790 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4791 ctx->null_cond.c = TCG_COND_NEVER;
4792 ret = DISAS_NEXT;
4793 } else {
4794 ctx->insn = insn;
4795 ret = translate_one(ctx, insn);
4796 assert(ctx->null_lab == NULL);
4800 /* Free any temporaries allocated. */
4801 for (i = 0, n = ctx->ntempr; i < n; ++i) {
4802 tcg_temp_free(ctx->tempr[i]);
4803 ctx->tempr[i] = NULL;
4805 for (i = 0, n = ctx->ntempl; i < n; ++i) {
4806 tcg_temp_free_tl(ctx->templ[i]);
4807 ctx->templ[i] = NULL;
4809 ctx->ntempr = 0;
4810 ctx->ntempl = 0;
4812 /* Advance the insn queue. Note that this check also detects
4813 a priority change within the instruction queue. */
4814 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4815 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4816 && use_goto_tb(ctx, ctx->iaoq_b)
4817 && (ctx->null_cond.c == TCG_COND_NEVER
4818 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4819 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4820 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4821 ret = DISAS_NORETURN;
4822 } else {
4823 ret = DISAS_IAQ_N_STALE;
4826 ctx->iaoq_f = ctx->iaoq_b;
4827 ctx->iaoq_b = ctx->iaoq_n;
4828 ctx->base.is_jmp = ret;
4829 ctx->base.pc_next += 4;
4831 if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
4832 return;
4834 if (ctx->iaoq_f == -1) {
4835 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4836 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4837 #ifndef CONFIG_USER_ONLY
4838 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4839 #endif
4840 nullify_save(ctx);
4841 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
4842 } else if (ctx->iaoq_b == -1) {
4843 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4847 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4849 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4850 DisasJumpType is_jmp = ctx->base.is_jmp;
4852 switch (is_jmp) {
4853 case DISAS_NORETURN:
4854 break;
4855 case DISAS_TOO_MANY:
4856 case DISAS_IAQ_N_STALE:
4857 case DISAS_IAQ_N_STALE_EXIT:
4858 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4859 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4860 nullify_save(ctx);
4861 /* FALLTHRU */
4862 case DISAS_IAQ_N_UPDATED:
4863 if (ctx->base.singlestep_enabled) {
4864 gen_excp_1(EXCP_DEBUG);
4865 } else if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
4866 tcg_gen_exit_tb(0);
4867 } else {
4868 tcg_gen_lookup_and_goto_ptr();
4870 break;
4871 default:
4872 g_assert_not_reached();
4876 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4878 target_ulong pc = dcbase->pc_first;
4880 #ifdef CONFIG_USER_ONLY
4881 switch (pc) {
4882 case 0x00:
4883 qemu_log("IN:\n0x00000000: (null)\n");
4884 return;
4885 case 0xb0:
4886 qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
4887 return;
4888 case 0xe0:
4889 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
4890 return;
4891 case 0x100:
4892 qemu_log("IN:\n0x00000100: syscall\n");
4893 return;
4895 #endif
4897 qemu_log("IN: %s\n", lookup_symbol(pc));
4898 log_target_disas(cs, pc, dcbase->tb->size);
4901 static const TranslatorOps hppa_tr_ops = {
4902 .init_disas_context = hppa_tr_init_disas_context,
4903 .tb_start = hppa_tr_tb_start,
4904 .insn_start = hppa_tr_insn_start,
4905 .breakpoint_check = hppa_tr_breakpoint_check,
4906 .translate_insn = hppa_tr_translate_insn,
4907 .tb_stop = hppa_tr_tb_stop,
4908 .disas_log = hppa_tr_disas_log,
4911 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4914 DisasContext ctx;
4915 translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
4918 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4919 target_ulong *data)
4921 env->iaoq_f = data[0];
4922 if (data[1] != (target_ureg)-1) {
4923 env->iaoq_b = data[1];
4925 /* Since we were executing the instruction at IAOQ_F, and took some
4926 sort of action that provoked the cpu_restore_state, we can infer
4927 that the instruction was not nullified. */
4928 env->psw_n = 0;