target/hppa: Convert conditional branches
[qemu/ar7.git] / target / hppa / translate.c
blob7bdb9001305920aa7465d48fdce6c19bc1a80859
1 /*
2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
33 /* Since we have a distinction between register size and address size,
34 we need to redefine all of these. */
36 #undef TCGv
37 #undef tcg_temp_new
38 #undef tcg_global_reg_new
39 #undef tcg_global_mem_new
40 #undef tcg_temp_local_new
41 #undef tcg_temp_free
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #define tcg_temp_free_tl tcg_temp_free_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
49 #else
50 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
51 #endif
52 #else
53 #define TCGv_tl TCGv_i32
54 #define tcg_temp_new_tl tcg_temp_new_i32
55 #define tcg_temp_free_tl tcg_temp_free_i32
56 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
57 #endif
59 #if TARGET_REGISTER_BITS == 64
60 #define TCGv_reg TCGv_i64
62 #define tcg_temp_new tcg_temp_new_i64
63 #define tcg_global_reg_new tcg_global_reg_new_i64
64 #define tcg_global_mem_new tcg_global_mem_new_i64
65 #define tcg_temp_local_new tcg_temp_local_new_i64
66 #define tcg_temp_free tcg_temp_free_i64
68 #define tcg_gen_movi_reg tcg_gen_movi_i64
69 #define tcg_gen_mov_reg tcg_gen_mov_i64
70 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
71 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
72 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
73 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
74 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
75 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
76 #define tcg_gen_ld_reg tcg_gen_ld_i64
77 #define tcg_gen_st8_reg tcg_gen_st8_i64
78 #define tcg_gen_st16_reg tcg_gen_st16_i64
79 #define tcg_gen_st32_reg tcg_gen_st32_i64
80 #define tcg_gen_st_reg tcg_gen_st_i64
81 #define tcg_gen_add_reg tcg_gen_add_i64
82 #define tcg_gen_addi_reg tcg_gen_addi_i64
83 #define tcg_gen_sub_reg tcg_gen_sub_i64
84 #define tcg_gen_neg_reg tcg_gen_neg_i64
85 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
86 #define tcg_gen_subi_reg tcg_gen_subi_i64
87 #define tcg_gen_and_reg tcg_gen_and_i64
88 #define tcg_gen_andi_reg tcg_gen_andi_i64
89 #define tcg_gen_or_reg tcg_gen_or_i64
90 #define tcg_gen_ori_reg tcg_gen_ori_i64
91 #define tcg_gen_xor_reg tcg_gen_xor_i64
92 #define tcg_gen_xori_reg tcg_gen_xori_i64
93 #define tcg_gen_not_reg tcg_gen_not_i64
94 #define tcg_gen_shl_reg tcg_gen_shl_i64
95 #define tcg_gen_shli_reg tcg_gen_shli_i64
96 #define tcg_gen_shr_reg tcg_gen_shr_i64
97 #define tcg_gen_shri_reg tcg_gen_shri_i64
98 #define tcg_gen_sar_reg tcg_gen_sar_i64
99 #define tcg_gen_sari_reg tcg_gen_sari_i64
100 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
101 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
102 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
103 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
104 #define tcg_gen_mul_reg tcg_gen_mul_i64
105 #define tcg_gen_muli_reg tcg_gen_muli_i64
106 #define tcg_gen_div_reg tcg_gen_div_i64
107 #define tcg_gen_rem_reg tcg_gen_rem_i64
108 #define tcg_gen_divu_reg tcg_gen_divu_i64
109 #define tcg_gen_remu_reg tcg_gen_remu_i64
110 #define tcg_gen_discard_reg tcg_gen_discard_i64
111 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
112 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
113 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
114 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
115 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
116 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
117 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
118 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
119 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
120 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
121 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
122 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
123 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
124 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
125 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
126 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
127 #define tcg_gen_andc_reg tcg_gen_andc_i64
128 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
129 #define tcg_gen_nand_reg tcg_gen_nand_i64
130 #define tcg_gen_nor_reg tcg_gen_nor_i64
131 #define tcg_gen_orc_reg tcg_gen_orc_i64
132 #define tcg_gen_clz_reg tcg_gen_clz_i64
133 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
134 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
135 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
136 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
137 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
138 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
139 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
140 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
141 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
142 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
143 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
144 #define tcg_gen_extract_reg tcg_gen_extract_i64
145 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
146 #define tcg_const_reg tcg_const_i64
147 #define tcg_const_local_reg tcg_const_local_i64
148 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
149 #define tcg_gen_add2_reg tcg_gen_add2_i64
150 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
151 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
152 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
153 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
154 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
155 #else
156 #define TCGv_reg TCGv_i32
157 #define tcg_temp_new tcg_temp_new_i32
158 #define tcg_global_reg_new tcg_global_reg_new_i32
159 #define tcg_global_mem_new tcg_global_mem_new_i32
160 #define tcg_temp_local_new tcg_temp_local_new_i32
161 #define tcg_temp_free tcg_temp_free_i32
163 #define tcg_gen_movi_reg tcg_gen_movi_i32
164 #define tcg_gen_mov_reg tcg_gen_mov_i32
165 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
166 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
167 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
168 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
169 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
170 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
171 #define tcg_gen_ld_reg tcg_gen_ld_i32
172 #define tcg_gen_st8_reg tcg_gen_st8_i32
173 #define tcg_gen_st16_reg tcg_gen_st16_i32
174 #define tcg_gen_st32_reg tcg_gen_st32_i32
175 #define tcg_gen_st_reg tcg_gen_st_i32
176 #define tcg_gen_add_reg tcg_gen_add_i32
177 #define tcg_gen_addi_reg tcg_gen_addi_i32
178 #define tcg_gen_sub_reg tcg_gen_sub_i32
179 #define tcg_gen_neg_reg tcg_gen_neg_i32
180 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
181 #define tcg_gen_subi_reg tcg_gen_subi_i32
182 #define tcg_gen_and_reg tcg_gen_and_i32
183 #define tcg_gen_andi_reg tcg_gen_andi_i32
184 #define tcg_gen_or_reg tcg_gen_or_i32
185 #define tcg_gen_ori_reg tcg_gen_ori_i32
186 #define tcg_gen_xor_reg tcg_gen_xor_i32
187 #define tcg_gen_xori_reg tcg_gen_xori_i32
188 #define tcg_gen_not_reg tcg_gen_not_i32
189 #define tcg_gen_shl_reg tcg_gen_shl_i32
190 #define tcg_gen_shli_reg tcg_gen_shli_i32
191 #define tcg_gen_shr_reg tcg_gen_shr_i32
192 #define tcg_gen_shri_reg tcg_gen_shri_i32
193 #define tcg_gen_sar_reg tcg_gen_sar_i32
194 #define tcg_gen_sari_reg tcg_gen_sari_i32
195 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
196 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
197 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
198 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
199 #define tcg_gen_mul_reg tcg_gen_mul_i32
200 #define tcg_gen_muli_reg tcg_gen_muli_i32
201 #define tcg_gen_div_reg tcg_gen_div_i32
202 #define tcg_gen_rem_reg tcg_gen_rem_i32
203 #define tcg_gen_divu_reg tcg_gen_divu_i32
204 #define tcg_gen_remu_reg tcg_gen_remu_i32
205 #define tcg_gen_discard_reg tcg_gen_discard_i32
206 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
207 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
208 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
209 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
210 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
211 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
212 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
213 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
214 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
215 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
216 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
217 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
218 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
219 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
220 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
221 #define tcg_gen_andc_reg tcg_gen_andc_i32
222 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
223 #define tcg_gen_nand_reg tcg_gen_nand_i32
224 #define tcg_gen_nor_reg tcg_gen_nor_i32
225 #define tcg_gen_orc_reg tcg_gen_orc_i32
226 #define tcg_gen_clz_reg tcg_gen_clz_i32
227 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
228 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
229 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
230 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
231 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
232 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
233 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
234 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
235 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
236 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
237 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
238 #define tcg_gen_extract_reg tcg_gen_extract_i32
239 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
240 #define tcg_const_reg tcg_const_i32
241 #define tcg_const_local_reg tcg_const_local_i32
242 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
243 #define tcg_gen_add2_reg tcg_gen_add2_i32
244 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
245 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
246 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
247 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
248 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
249 #endif /* TARGET_REGISTER_BITS */
251 typedef struct DisasCond {
252 TCGCond c;
253 TCGv_reg a0, a1;
254 bool a0_is_n;
255 bool a1_is_0;
256 } DisasCond;
258 typedef struct DisasContext {
259 DisasContextBase base;
260 CPUState *cs;
262 target_ureg iaoq_f;
263 target_ureg iaoq_b;
264 target_ureg iaoq_n;
265 TCGv_reg iaoq_n_var;
267 int ntempr, ntempl;
268 TCGv_reg tempr[8];
269 TCGv_tl templ[4];
271 DisasCond null_cond;
272 TCGLabel *null_lab;
274 uint32_t insn;
275 uint32_t tb_flags;
276 int mmu_idx;
277 int privilege;
278 bool psw_n_nonzero;
279 } DisasContext;
281 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
282 static int expand_sm_imm(int val)
284 if (val & PSW_SM_E) {
285 val = (val & ~PSW_SM_E) | PSW_E;
287 if (val & PSW_SM_W) {
288 val = (val & ~PSW_SM_W) | PSW_W;
290 return val;
293 /* Inverted space register indicates 0 means sr0 not inferred from base. */
294 static int expand_sr3x(int val)
296 return ~val;
299 /* Convert the M:A bits within a memory insn to the tri-state value
300 we use for the final M. */
301 static int ma_to_m(int val)
303 return val & 2 ? (val & 1 ? -1 : 1) : 0;
306 /* Used for branch targets. */
307 static int expand_shl2(int val)
309 return val << 2;
313 /* Include the auto-generated decoder. */
314 #include "decode.inc.c"
316 /* We are not using a goto_tb (for whatever reason), but have updated
317 the iaq (for whatever reason), so don't do it again on exit. */
318 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
320 /* We are exiting the TB, but have neither emitted a goto_tb, nor
321 updated the iaq for the next instruction to be executed. */
322 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
324 /* Similarly, but we want to return to the main loop immediately
325 to recognize unmasked interrupts. */
326 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
328 typedef struct DisasInsn {
329 uint32_t insn, mask;
330 bool (*trans)(DisasContext *ctx, uint32_t insn,
331 const struct DisasInsn *f);
332 union {
333 void (*ttt)(TCGv_reg, TCGv_reg, TCGv_reg);
334 void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
335 void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
336 void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
337 void (*ded)(TCGv_i64, TCGv_env, TCGv_i64);
338 void (*wed)(TCGv_i32, TCGv_env, TCGv_i64);
339 void (*dew)(TCGv_i64, TCGv_env, TCGv_i32);
340 } f;
341 } DisasInsn;
343 /* global register indexes */
344 static TCGv_reg cpu_gr[32];
345 static TCGv_i64 cpu_sr[4];
346 static TCGv_i64 cpu_srH;
347 static TCGv_reg cpu_iaoq_f;
348 static TCGv_reg cpu_iaoq_b;
349 static TCGv_i64 cpu_iasq_f;
350 static TCGv_i64 cpu_iasq_b;
351 static TCGv_reg cpu_sar;
352 static TCGv_reg cpu_psw_n;
353 static TCGv_reg cpu_psw_v;
354 static TCGv_reg cpu_psw_cb;
355 static TCGv_reg cpu_psw_cb_msb;
357 #include "exec/gen-icount.h"
359 void hppa_translate_init(void)
361 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
363 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
364 static const GlobalVar vars[] = {
365 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
366 DEF_VAR(psw_n),
367 DEF_VAR(psw_v),
368 DEF_VAR(psw_cb),
369 DEF_VAR(psw_cb_msb),
370 DEF_VAR(iaoq_f),
371 DEF_VAR(iaoq_b),
374 #undef DEF_VAR
376 /* Use the symbolic register names that match the disassembler. */
377 static const char gr_names[32][4] = {
378 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
379 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
380 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
381 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
383 /* SR[4-7] are not global registers so that we can index them. */
384 static const char sr_names[5][4] = {
385 "sr0", "sr1", "sr2", "sr3", "srH"
388 int i;
390 cpu_gr[0] = NULL;
391 for (i = 1; i < 32; i++) {
392 cpu_gr[i] = tcg_global_mem_new(cpu_env,
393 offsetof(CPUHPPAState, gr[i]),
394 gr_names[i]);
396 for (i = 0; i < 4; i++) {
397 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
398 offsetof(CPUHPPAState, sr[i]),
399 sr_names[i]);
401 cpu_srH = tcg_global_mem_new_i64(cpu_env,
402 offsetof(CPUHPPAState, sr[4]),
403 sr_names[4]);
405 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
406 const GlobalVar *v = &vars[i];
407 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
410 cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
411 offsetof(CPUHPPAState, iasq_f),
412 "iasq_f");
413 cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
414 offsetof(CPUHPPAState, iasq_b),
415 "iasq_b");
418 static DisasCond cond_make_f(void)
420 return (DisasCond){
421 .c = TCG_COND_NEVER,
422 .a0 = NULL,
423 .a1 = NULL,
427 static DisasCond cond_make_n(void)
429 return (DisasCond){
430 .c = TCG_COND_NE,
431 .a0 = cpu_psw_n,
432 .a0_is_n = true,
433 .a1 = NULL,
434 .a1_is_0 = true
438 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
440 DisasCond r = { .c = c, .a1 = NULL, .a1_is_0 = true };
442 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
443 r.a0 = tcg_temp_new();
444 tcg_gen_mov_reg(r.a0, a0);
446 return r;
449 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
451 DisasCond r = { .c = c };
453 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
454 r.a0 = tcg_temp_new();
455 tcg_gen_mov_reg(r.a0, a0);
456 r.a1 = tcg_temp_new();
457 tcg_gen_mov_reg(r.a1, a1);
459 return r;
462 static void cond_prep(DisasCond *cond)
464 if (cond->a1_is_0) {
465 cond->a1_is_0 = false;
466 cond->a1 = tcg_const_reg(0);
470 static void cond_free(DisasCond *cond)
472 switch (cond->c) {
473 default:
474 if (!cond->a0_is_n) {
475 tcg_temp_free(cond->a0);
477 if (!cond->a1_is_0) {
478 tcg_temp_free(cond->a1);
480 cond->a0_is_n = false;
481 cond->a1_is_0 = false;
482 cond->a0 = NULL;
483 cond->a1 = NULL;
484 /* fallthru */
485 case TCG_COND_ALWAYS:
486 cond->c = TCG_COND_NEVER;
487 break;
488 case TCG_COND_NEVER:
489 break;
493 static TCGv_reg get_temp(DisasContext *ctx)
495 unsigned i = ctx->ntempr++;
496 g_assert(i < ARRAY_SIZE(ctx->tempr));
497 return ctx->tempr[i] = tcg_temp_new();
500 #ifndef CONFIG_USER_ONLY
501 static TCGv_tl get_temp_tl(DisasContext *ctx)
503 unsigned i = ctx->ntempl++;
504 g_assert(i < ARRAY_SIZE(ctx->templ));
505 return ctx->templ[i] = tcg_temp_new_tl();
507 #endif
509 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
511 TCGv_reg t = get_temp(ctx);
512 tcg_gen_movi_reg(t, v);
513 return t;
516 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
518 if (reg == 0) {
519 TCGv_reg t = get_temp(ctx);
520 tcg_gen_movi_reg(t, 0);
521 return t;
522 } else {
523 return cpu_gr[reg];
527 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
529 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
530 return get_temp(ctx);
531 } else {
532 return cpu_gr[reg];
536 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
538 if (ctx->null_cond.c != TCG_COND_NEVER) {
539 cond_prep(&ctx->null_cond);
540 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
541 ctx->null_cond.a1, dest, t);
542 } else {
543 tcg_gen_mov_reg(dest, t);
547 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
549 if (reg != 0) {
550 save_or_nullify(ctx, cpu_gr[reg], t);
554 #ifdef HOST_WORDS_BIGENDIAN
555 # define HI_OFS 0
556 # define LO_OFS 4
557 #else
558 # define HI_OFS 4
559 # define LO_OFS 0
560 #endif
562 static TCGv_i32 load_frw_i32(unsigned rt)
564 TCGv_i32 ret = tcg_temp_new_i32();
565 tcg_gen_ld_i32(ret, cpu_env,
566 offsetof(CPUHPPAState, fr[rt & 31])
567 + (rt & 32 ? LO_OFS : HI_OFS));
568 return ret;
571 static TCGv_i32 load_frw0_i32(unsigned rt)
573 if (rt == 0) {
574 return tcg_const_i32(0);
575 } else {
576 return load_frw_i32(rt);
580 static TCGv_i64 load_frw0_i64(unsigned rt)
582 if (rt == 0) {
583 return tcg_const_i64(0);
584 } else {
585 TCGv_i64 ret = tcg_temp_new_i64();
586 tcg_gen_ld32u_i64(ret, cpu_env,
587 offsetof(CPUHPPAState, fr[rt & 31])
588 + (rt & 32 ? LO_OFS : HI_OFS));
589 return ret;
593 static void save_frw_i32(unsigned rt, TCGv_i32 val)
595 tcg_gen_st_i32(val, cpu_env,
596 offsetof(CPUHPPAState, fr[rt & 31])
597 + (rt & 32 ? LO_OFS : HI_OFS));
600 #undef HI_OFS
601 #undef LO_OFS
603 static TCGv_i64 load_frd(unsigned rt)
605 TCGv_i64 ret = tcg_temp_new_i64();
606 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
607 return ret;
610 static TCGv_i64 load_frd0(unsigned rt)
612 if (rt == 0) {
613 return tcg_const_i64(0);
614 } else {
615 return load_frd(rt);
619 static void save_frd(unsigned rt, TCGv_i64 val)
621 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
624 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
626 #ifdef CONFIG_USER_ONLY
627 tcg_gen_movi_i64(dest, 0);
628 #else
629 if (reg < 4) {
630 tcg_gen_mov_i64(dest, cpu_sr[reg]);
631 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
632 tcg_gen_mov_i64(dest, cpu_srH);
633 } else {
634 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
636 #endif
639 /* Skip over the implementation of an insn that has been nullified.
640 Use this when the insn is too complex for a conditional move. */
641 static void nullify_over(DisasContext *ctx)
643 if (ctx->null_cond.c != TCG_COND_NEVER) {
644 /* The always condition should have been handled in the main loop. */
645 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
647 ctx->null_lab = gen_new_label();
648 cond_prep(&ctx->null_cond);
650 /* If we're using PSW[N], copy it to a temp because... */
651 if (ctx->null_cond.a0_is_n) {
652 ctx->null_cond.a0_is_n = false;
653 ctx->null_cond.a0 = tcg_temp_new();
654 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
656 /* ... we clear it before branching over the implementation,
657 so that (1) it's clear after nullifying this insn and
658 (2) if this insn nullifies the next, PSW[N] is valid. */
659 if (ctx->psw_n_nonzero) {
660 ctx->psw_n_nonzero = false;
661 tcg_gen_movi_reg(cpu_psw_n, 0);
664 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
665 ctx->null_cond.a1, ctx->null_lab);
666 cond_free(&ctx->null_cond);
670 /* Save the current nullification state to PSW[N]. */
671 static void nullify_save(DisasContext *ctx)
673 if (ctx->null_cond.c == TCG_COND_NEVER) {
674 if (ctx->psw_n_nonzero) {
675 tcg_gen_movi_reg(cpu_psw_n, 0);
677 return;
679 if (!ctx->null_cond.a0_is_n) {
680 cond_prep(&ctx->null_cond);
681 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
682 ctx->null_cond.a0, ctx->null_cond.a1);
683 ctx->psw_n_nonzero = true;
685 cond_free(&ctx->null_cond);
688 /* Set a PSW[N] to X. The intention is that this is used immediately
689 before a goto_tb/exit_tb, so that there is no fallthru path to other
690 code within the TB. Therefore we do not update psw_n_nonzero. */
691 static void nullify_set(DisasContext *ctx, bool x)
693 if (ctx->psw_n_nonzero || x) {
694 tcg_gen_movi_reg(cpu_psw_n, x);
698 /* Mark the end of an instruction that may have been nullified.
699 This is the pair to nullify_over. Always returns true so that
700 it may be tail-called from a translate function. */
701 static bool nullify_end(DisasContext *ctx)
703 TCGLabel *null_lab = ctx->null_lab;
704 DisasJumpType status = ctx->base.is_jmp;
706 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
707 For UPDATED, we cannot update on the nullified path. */
708 assert(status != DISAS_IAQ_N_UPDATED);
710 if (likely(null_lab == NULL)) {
711 /* The current insn wasn't conditional or handled the condition
712 applied to it without a branch, so the (new) setting of
713 NULL_COND can be applied directly to the next insn. */
714 return true;
716 ctx->null_lab = NULL;
718 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
719 /* The next instruction will be unconditional,
720 and NULL_COND already reflects that. */
721 gen_set_label(null_lab);
722 } else {
723 /* The insn that we just executed is itself nullifying the next
724 instruction. Store the condition in the PSW[N] global.
725 We asserted PSW[N] = 0 in nullify_over, so that after the
726 label we have the proper value in place. */
727 nullify_save(ctx);
728 gen_set_label(null_lab);
729 ctx->null_cond = cond_make_n();
731 if (status == DISAS_NORETURN) {
732 ctx->base.is_jmp = DISAS_NEXT;
734 return true;
737 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
739 if (unlikely(ival == -1)) {
740 tcg_gen_mov_reg(dest, vval);
741 } else {
742 tcg_gen_movi_reg(dest, ival);
746 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
748 return ctx->iaoq_f + disp + 8;
751 static void gen_excp_1(int exception)
753 TCGv_i32 t = tcg_const_i32(exception);
754 gen_helper_excp(cpu_env, t);
755 tcg_temp_free_i32(t);
758 static void gen_excp(DisasContext *ctx, int exception)
760 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
761 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
762 nullify_save(ctx);
763 gen_excp_1(exception);
764 ctx->base.is_jmp = DISAS_NORETURN;
767 static bool gen_excp_iir(DisasContext *ctx, int exc)
769 TCGv_reg tmp;
771 nullify_over(ctx);
772 tmp = tcg_const_reg(ctx->insn);
773 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
774 tcg_temp_free(tmp);
775 gen_excp(ctx, exc);
776 return nullify_end(ctx);
779 static bool gen_illegal(DisasContext *ctx)
781 return gen_excp_iir(ctx, EXCP_ILL);
784 #ifdef CONFIG_USER_ONLY
785 #define CHECK_MOST_PRIVILEGED(EXCP) \
786 return gen_excp_iir(ctx, EXCP)
787 #else
788 #define CHECK_MOST_PRIVILEGED(EXCP) \
789 do { \
790 if (ctx->privilege != 0) { \
791 return gen_excp_iir(ctx, EXCP); \
793 } while (0)
794 #endif
796 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
798 /* Suppress goto_tb in the case of single-steping and IO. */
799 if ((tb_cflags(ctx->base.tb) & CF_LAST_IO)
800 || ctx->base.singlestep_enabled) {
801 return false;
803 return true;
806 /* If the next insn is to be nullified, and it's on the same page,
807 and we're not attempting to set a breakpoint on it, then we can
808 totally skip the nullified insn. This avoids creating and
809 executing a TB that merely branches to the next TB. */
810 static bool use_nullify_skip(DisasContext *ctx)
812 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
813 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
816 static void gen_goto_tb(DisasContext *ctx, int which,
817 target_ureg f, target_ureg b)
819 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
820 tcg_gen_goto_tb(which);
821 tcg_gen_movi_reg(cpu_iaoq_f, f);
822 tcg_gen_movi_reg(cpu_iaoq_b, b);
823 tcg_gen_exit_tb(ctx->base.tb, which);
824 } else {
825 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
826 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
827 if (ctx->base.singlestep_enabled) {
828 gen_excp_1(EXCP_DEBUG);
829 } else {
830 tcg_gen_lookup_and_goto_ptr();
835 /* PA has a habit of taking the LSB of a field and using that as the sign,
836 with the rest of the field becoming the least significant bits. */
837 static target_sreg low_sextract(uint32_t val, int pos, int len)
839 target_ureg x = -(target_ureg)extract32(val, pos, 1);
840 x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
841 return x;
844 static unsigned assemble_rt64(uint32_t insn)
846 unsigned r1 = extract32(insn, 6, 1);
847 unsigned r0 = extract32(insn, 0, 5);
848 return r1 * 32 + r0;
851 static unsigned assemble_ra64(uint32_t insn)
853 unsigned r1 = extract32(insn, 7, 1);
854 unsigned r0 = extract32(insn, 21, 5);
855 return r1 * 32 + r0;
858 static unsigned assemble_rb64(uint32_t insn)
860 unsigned r1 = extract32(insn, 12, 1);
861 unsigned r0 = extract32(insn, 16, 5);
862 return r1 * 32 + r0;
865 static unsigned assemble_rc64(uint32_t insn)
867 unsigned r2 = extract32(insn, 8, 1);
868 unsigned r1 = extract32(insn, 13, 3);
869 unsigned r0 = extract32(insn, 9, 2);
870 return r2 * 32 + r1 * 4 + r0;
873 static inline unsigned assemble_sr3(uint32_t insn)
875 unsigned s2 = extract32(insn, 13, 1);
876 unsigned s0 = extract32(insn, 14, 2);
877 return s2 * 4 + s0;
880 static target_sreg assemble_16(uint32_t insn)
882 /* Take the name from PA2.0, which produces a 16-bit number
883 only with wide mode; otherwise a 14-bit number. Since we don't
884 implement wide mode, this is always the 14-bit number. */
885 return low_sextract(insn, 0, 14);
888 static target_sreg assemble_16a(uint32_t insn)
890 /* Take the name from PA2.0, which produces a 14-bit shifted number
891 only with wide mode; otherwise a 12-bit shifted number. Since we
892 don't implement wide mode, this is always the 12-bit number. */
893 target_ureg x = -(target_ureg)(insn & 1);
894 x = (x << 11) | extract32(insn, 2, 11);
895 return x << 2;
898 static target_sreg assemble_17(uint32_t insn)
900 target_ureg x = -(target_ureg)(insn & 1);
901 x = (x << 5) | extract32(insn, 16, 5);
902 x = (x << 1) | extract32(insn, 2, 1);
903 x = (x << 10) | extract32(insn, 3, 10);
904 return x << 2;
907 static target_sreg assemble_21(uint32_t insn)
909 target_ureg x = -(target_ureg)(insn & 1);
910 x = (x << 11) | extract32(insn, 1, 11);
911 x = (x << 2) | extract32(insn, 14, 2);
912 x = (x << 5) | extract32(insn, 16, 5);
913 x = (x << 2) | extract32(insn, 12, 2);
914 return x << 11;
917 static target_sreg assemble_22(uint32_t insn)
919 target_ureg x = -(target_ureg)(insn & 1);
920 x = (x << 10) | extract32(insn, 16, 10);
921 x = (x << 1) | extract32(insn, 2, 1);
922 x = (x << 10) | extract32(insn, 3, 10);
923 return x << 2;
926 /* The parisc documentation describes only the general interpretation of
927 the conditions, without describing their exact implementation. The
928 interpretations do not stand up well when considering ADD,C and SUB,B.
929 However, considering the Addition, Subtraction and Logical conditions
930 as a whole it would appear that these relations are similar to what
931 a traditional NZCV set of flags would produce. */
933 static DisasCond do_cond(unsigned cf, TCGv_reg res,
934 TCGv_reg cb_msb, TCGv_reg sv)
936 DisasCond cond;
937 TCGv_reg tmp;
939 switch (cf >> 1) {
940 case 0: /* Never / TR */
941 cond = cond_make_f();
942 break;
943 case 1: /* = / <> (Z / !Z) */
944 cond = cond_make_0(TCG_COND_EQ, res);
945 break;
946 case 2: /* < / >= (N / !N) */
947 cond = cond_make_0(TCG_COND_LT, res);
948 break;
949 case 3: /* <= / > (N | Z / !N & !Z) */
950 cond = cond_make_0(TCG_COND_LE, res);
951 break;
952 case 4: /* NUV / UV (!C / C) */
953 cond = cond_make_0(TCG_COND_EQ, cb_msb);
954 break;
955 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
956 tmp = tcg_temp_new();
957 tcg_gen_neg_reg(tmp, cb_msb);
958 tcg_gen_and_reg(tmp, tmp, res);
959 cond = cond_make_0(TCG_COND_EQ, tmp);
960 tcg_temp_free(tmp);
961 break;
962 case 6: /* SV / NSV (V / !V) */
963 cond = cond_make_0(TCG_COND_LT, sv);
964 break;
965 case 7: /* OD / EV */
966 tmp = tcg_temp_new();
967 tcg_gen_andi_reg(tmp, res, 1);
968 cond = cond_make_0(TCG_COND_NE, tmp);
969 tcg_temp_free(tmp);
970 break;
971 default:
972 g_assert_not_reached();
974 if (cf & 1) {
975 cond.c = tcg_invert_cond(cond.c);
978 return cond;
981 /* Similar, but for the special case of subtraction without borrow, we
982 can use the inputs directly. This can allow other computation to be
983 deleted as unused. */
985 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
986 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
988 DisasCond cond;
990 switch (cf >> 1) {
991 case 1: /* = / <> */
992 cond = cond_make(TCG_COND_EQ, in1, in2);
993 break;
994 case 2: /* < / >= */
995 cond = cond_make(TCG_COND_LT, in1, in2);
996 break;
997 case 3: /* <= / > */
998 cond = cond_make(TCG_COND_LE, in1, in2);
999 break;
1000 case 4: /* << / >>= */
1001 cond = cond_make(TCG_COND_LTU, in1, in2);
1002 break;
1003 case 5: /* <<= / >> */
1004 cond = cond_make(TCG_COND_LEU, in1, in2);
1005 break;
1006 default:
1007 return do_cond(cf, res, sv, sv);
1009 if (cf & 1) {
1010 cond.c = tcg_invert_cond(cond.c);
1013 return cond;
1016 /* Similar, but for logicals, where the carry and overflow bits are not
1017 computed, and use of them is undefined. */
1019 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
1021 switch (cf >> 1) {
1022 case 4: case 5: case 6:
1023 cf &= 1;
1024 break;
1026 return do_cond(cf, res, res, res);
1029 /* Similar, but for shift/extract/deposit conditions. */
1031 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
1033 unsigned c, f;
1035 /* Convert the compressed condition codes to standard.
1036 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1037 4-7 are the reverse of 0-3. */
1038 c = orig & 3;
1039 if (c == 3) {
1040 c = 7;
1042 f = (orig & 4) / 4;
1044 return do_log_cond(c * 2 + f, res);
1047 /* Similar, but for unit conditions. */
1049 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1050 TCGv_reg in1, TCGv_reg in2)
1052 DisasCond cond;
1053 TCGv_reg tmp, cb = NULL;
1055 if (cf & 8) {
1056 /* Since we want to test lots of carry-out bits all at once, do not
1057 * do our normal thing and compute carry-in of bit B+1 since that
1058 * leaves us with carry bits spread across two words.
1060 cb = tcg_temp_new();
1061 tmp = tcg_temp_new();
1062 tcg_gen_or_reg(cb, in1, in2);
1063 tcg_gen_and_reg(tmp, in1, in2);
1064 tcg_gen_andc_reg(cb, cb, res);
1065 tcg_gen_or_reg(cb, cb, tmp);
1066 tcg_temp_free(tmp);
1069 switch (cf >> 1) {
1070 case 0: /* never / TR */
1071 case 1: /* undefined */
1072 case 5: /* undefined */
1073 cond = cond_make_f();
1074 break;
1076 case 2: /* SBZ / NBZ */
1077 /* See hasless(v,1) from
1078 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1080 tmp = tcg_temp_new();
1081 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1082 tcg_gen_andc_reg(tmp, tmp, res);
1083 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1084 cond = cond_make_0(TCG_COND_NE, tmp);
1085 tcg_temp_free(tmp);
1086 break;
1088 case 3: /* SHZ / NHZ */
1089 tmp = tcg_temp_new();
1090 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1091 tcg_gen_andc_reg(tmp, tmp, res);
1092 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1093 cond = cond_make_0(TCG_COND_NE, tmp);
1094 tcg_temp_free(tmp);
1095 break;
1097 case 4: /* SDC / NDC */
1098 tcg_gen_andi_reg(cb, cb, 0x88888888u);
1099 cond = cond_make_0(TCG_COND_NE, cb);
1100 break;
1102 case 6: /* SBC / NBC */
1103 tcg_gen_andi_reg(cb, cb, 0x80808080u);
1104 cond = cond_make_0(TCG_COND_NE, cb);
1105 break;
1107 case 7: /* SHC / NHC */
1108 tcg_gen_andi_reg(cb, cb, 0x80008000u);
1109 cond = cond_make_0(TCG_COND_NE, cb);
1110 break;
1112 default:
1113 g_assert_not_reached();
1115 if (cf & 8) {
1116 tcg_temp_free(cb);
1118 if (cf & 1) {
1119 cond.c = tcg_invert_cond(cond.c);
1122 return cond;
1125 /* Compute signed overflow for addition. */
1126 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1127 TCGv_reg in1, TCGv_reg in2)
1129 TCGv_reg sv = get_temp(ctx);
1130 TCGv_reg tmp = tcg_temp_new();
1132 tcg_gen_xor_reg(sv, res, in1);
1133 tcg_gen_xor_reg(tmp, in1, in2);
1134 tcg_gen_andc_reg(sv, sv, tmp);
1135 tcg_temp_free(tmp);
1137 return sv;
1140 /* Compute signed overflow for subtraction. */
1141 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1142 TCGv_reg in1, TCGv_reg in2)
1144 TCGv_reg sv = get_temp(ctx);
1145 TCGv_reg tmp = tcg_temp_new();
1147 tcg_gen_xor_reg(sv, res, in1);
1148 tcg_gen_xor_reg(tmp, in1, in2);
1149 tcg_gen_and_reg(sv, sv, tmp);
1150 tcg_temp_free(tmp);
1152 return sv;
1155 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1156 TCGv_reg in2, unsigned shift, bool is_l,
1157 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1159 TCGv_reg dest, cb, cb_msb, sv, tmp;
1160 unsigned c = cf >> 1;
1161 DisasCond cond;
1163 dest = tcg_temp_new();
1164 cb = NULL;
1165 cb_msb = NULL;
1167 if (shift) {
1168 tmp = get_temp(ctx);
1169 tcg_gen_shli_reg(tmp, in1, shift);
1170 in1 = tmp;
1173 if (!is_l || c == 4 || c == 5) {
1174 TCGv_reg zero = tcg_const_reg(0);
1175 cb_msb = get_temp(ctx);
1176 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1177 if (is_c) {
1178 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1180 tcg_temp_free(zero);
1181 if (!is_l) {
1182 cb = get_temp(ctx);
1183 tcg_gen_xor_reg(cb, in1, in2);
1184 tcg_gen_xor_reg(cb, cb, dest);
1186 } else {
1187 tcg_gen_add_reg(dest, in1, in2);
1188 if (is_c) {
1189 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1193 /* Compute signed overflow if required. */
1194 sv = NULL;
1195 if (is_tsv || c == 6) {
1196 sv = do_add_sv(ctx, dest, in1, in2);
1197 if (is_tsv) {
1198 /* ??? Need to include overflow from shift. */
1199 gen_helper_tsv(cpu_env, sv);
1203 /* Emit any conditional trap before any writeback. */
1204 cond = do_cond(cf, dest, cb_msb, sv);
1205 if (is_tc) {
1206 cond_prep(&cond);
1207 tmp = tcg_temp_new();
1208 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1209 gen_helper_tcond(cpu_env, tmp);
1210 tcg_temp_free(tmp);
1213 /* Write back the result. */
1214 if (!is_l) {
1215 save_or_nullify(ctx, cpu_psw_cb, cb);
1216 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1218 save_gpr(ctx, rt, dest);
1219 tcg_temp_free(dest);
1221 /* Install the new nullification. */
1222 cond_free(&ctx->null_cond);
1223 ctx->null_cond = cond;
1226 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1227 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1229 TCGv_reg tcg_r1, tcg_r2;
1231 if (a->cf) {
1232 nullify_over(ctx);
1234 tcg_r1 = load_gpr(ctx, a->r1);
1235 tcg_r2 = load_gpr(ctx, a->r2);
1236 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1237 return nullify_end(ctx);
1240 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1241 TCGv_reg in2, bool is_tsv, bool is_b,
1242 bool is_tc, unsigned cf)
1244 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1245 unsigned c = cf >> 1;
1246 DisasCond cond;
1248 dest = tcg_temp_new();
1249 cb = tcg_temp_new();
1250 cb_msb = tcg_temp_new();
1252 zero = tcg_const_reg(0);
1253 if (is_b) {
1254 /* DEST,C = IN1 + ~IN2 + C. */
1255 tcg_gen_not_reg(cb, in2);
1256 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1257 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1258 tcg_gen_xor_reg(cb, cb, in1);
1259 tcg_gen_xor_reg(cb, cb, dest);
1260 } else {
1261 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1262 operations by seeding the high word with 1 and subtracting. */
1263 tcg_gen_movi_reg(cb_msb, 1);
1264 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1265 tcg_gen_eqv_reg(cb, in1, in2);
1266 tcg_gen_xor_reg(cb, cb, dest);
1268 tcg_temp_free(zero);
1270 /* Compute signed overflow if required. */
1271 sv = NULL;
1272 if (is_tsv || c == 6) {
1273 sv = do_sub_sv(ctx, dest, in1, in2);
1274 if (is_tsv) {
1275 gen_helper_tsv(cpu_env, sv);
1279 /* Compute the condition. We cannot use the special case for borrow. */
1280 if (!is_b) {
1281 cond = do_sub_cond(cf, dest, in1, in2, sv);
1282 } else {
1283 cond = do_cond(cf, dest, cb_msb, sv);
1286 /* Emit any conditional trap before any writeback. */
1287 if (is_tc) {
1288 cond_prep(&cond);
1289 tmp = tcg_temp_new();
1290 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1291 gen_helper_tcond(cpu_env, tmp);
1292 tcg_temp_free(tmp);
1295 /* Write back the result. */
1296 save_or_nullify(ctx, cpu_psw_cb, cb);
1297 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1298 save_gpr(ctx, rt, dest);
1299 tcg_temp_free(dest);
1301 /* Install the new nullification. */
1302 cond_free(&ctx->null_cond);
1303 ctx->null_cond = cond;
1306 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1307 bool is_tsv, bool is_b, bool is_tc)
1309 TCGv_reg tcg_r1, tcg_r2;
1311 if (a->cf) {
1312 nullify_over(ctx);
1314 tcg_r1 = load_gpr(ctx, a->r1);
1315 tcg_r2 = load_gpr(ctx, a->r2);
1316 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1317 return nullify_end(ctx);
1320 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1321 TCGv_reg in2, unsigned cf)
1323 TCGv_reg dest, sv;
1324 DisasCond cond;
1326 dest = tcg_temp_new();
1327 tcg_gen_sub_reg(dest, in1, in2);
1329 /* Compute signed overflow if required. */
1330 sv = NULL;
1331 if ((cf >> 1) == 6) {
1332 sv = do_sub_sv(ctx, dest, in1, in2);
1335 /* Form the condition for the compare. */
1336 cond = do_sub_cond(cf, dest, in1, in2, sv);
1338 /* Clear. */
1339 tcg_gen_movi_reg(dest, 0);
1340 save_gpr(ctx, rt, dest);
1341 tcg_temp_free(dest);
1343 /* Install the new nullification. */
1344 cond_free(&ctx->null_cond);
1345 ctx->null_cond = cond;
1348 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1349 TCGv_reg in2, unsigned cf,
1350 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1352 TCGv_reg dest = dest_gpr(ctx, rt);
1354 /* Perform the operation, and writeback. */
1355 fn(dest, in1, in2);
1356 save_gpr(ctx, rt, dest);
1358 /* Install the new nullification. */
1359 cond_free(&ctx->null_cond);
1360 if (cf) {
1361 ctx->null_cond = do_log_cond(cf, dest);
1365 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1366 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1368 TCGv_reg tcg_r1, tcg_r2;
1370 if (a->cf) {
1371 nullify_over(ctx);
1373 tcg_r1 = load_gpr(ctx, a->r1);
1374 tcg_r2 = load_gpr(ctx, a->r2);
1375 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1376 return nullify_end(ctx);
1379 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1380 TCGv_reg in2, unsigned cf, bool is_tc,
1381 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1383 TCGv_reg dest;
1384 DisasCond cond;
1386 if (cf == 0) {
1387 dest = dest_gpr(ctx, rt);
1388 fn(dest, in1, in2);
1389 save_gpr(ctx, rt, dest);
1390 cond_free(&ctx->null_cond);
1391 } else {
1392 dest = tcg_temp_new();
1393 fn(dest, in1, in2);
1395 cond = do_unit_cond(cf, dest, in1, in2);
1397 if (is_tc) {
1398 TCGv_reg tmp = tcg_temp_new();
1399 cond_prep(&cond);
1400 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1401 gen_helper_tcond(cpu_env, tmp);
1402 tcg_temp_free(tmp);
1404 save_gpr(ctx, rt, dest);
1406 cond_free(&ctx->null_cond);
1407 ctx->null_cond = cond;
1411 #ifndef CONFIG_USER_ONLY
1412 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1413 from the top 2 bits of the base register. There are a few system
1414 instructions that have a 3-bit space specifier, for which SR0 is
1415 not special. To handle this, pass ~SP. */
1416 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1418 TCGv_ptr ptr;
1419 TCGv_reg tmp;
1420 TCGv_i64 spc;
1422 if (sp != 0) {
1423 if (sp < 0) {
1424 sp = ~sp;
1426 spc = get_temp_tl(ctx);
1427 load_spr(ctx, spc, sp);
1428 return spc;
1430 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1431 return cpu_srH;
1434 ptr = tcg_temp_new_ptr();
1435 tmp = tcg_temp_new();
1436 spc = get_temp_tl(ctx);
1438 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1439 tcg_gen_andi_reg(tmp, tmp, 030);
1440 tcg_gen_trunc_reg_ptr(ptr, tmp);
1441 tcg_temp_free(tmp);
1443 tcg_gen_add_ptr(ptr, ptr, cpu_env);
1444 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1445 tcg_temp_free_ptr(ptr);
1447 return spc;
1449 #endif
1451 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1452 unsigned rb, unsigned rx, int scale, target_sreg disp,
1453 unsigned sp, int modify, bool is_phys)
1455 TCGv_reg base = load_gpr(ctx, rb);
1456 TCGv_reg ofs;
1458 /* Note that RX is mutually exclusive with DISP. */
1459 if (rx) {
1460 ofs = get_temp(ctx);
1461 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1462 tcg_gen_add_reg(ofs, ofs, base);
1463 } else if (disp || modify) {
1464 ofs = get_temp(ctx);
1465 tcg_gen_addi_reg(ofs, base, disp);
1466 } else {
1467 ofs = base;
1470 *pofs = ofs;
1471 #ifdef CONFIG_USER_ONLY
1472 *pgva = (modify <= 0 ? ofs : base);
1473 #else
1474 TCGv_tl addr = get_temp_tl(ctx);
1475 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1476 if (ctx->tb_flags & PSW_W) {
1477 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1479 if (!is_phys) {
1480 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1482 *pgva = addr;
1483 #endif
1486 /* Emit a memory load. The modify parameter should be
1487 * < 0 for pre-modify,
1488 * > 0 for post-modify,
1489 * = 0 for no base register update.
1491 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1492 unsigned rx, int scale, target_sreg disp,
1493 unsigned sp, int modify, TCGMemOp mop)
1495 TCGv_reg ofs;
1496 TCGv_tl addr;
1498 /* Caller uses nullify_over/nullify_end. */
1499 assert(ctx->null_cond.c == TCG_COND_NEVER);
1501 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1502 ctx->mmu_idx == MMU_PHYS_IDX);
1503 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1504 if (modify) {
1505 save_gpr(ctx, rb, ofs);
1509 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1510 unsigned rx, int scale, target_sreg disp,
1511 unsigned sp, int modify, TCGMemOp mop)
1513 TCGv_reg ofs;
1514 TCGv_tl addr;
1516 /* Caller uses nullify_over/nullify_end. */
1517 assert(ctx->null_cond.c == TCG_COND_NEVER);
1519 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1520 ctx->mmu_idx == MMU_PHYS_IDX);
1521 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1522 if (modify) {
1523 save_gpr(ctx, rb, ofs);
1527 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1528 unsigned rx, int scale, target_sreg disp,
1529 unsigned sp, int modify, TCGMemOp mop)
1531 TCGv_reg ofs;
1532 TCGv_tl addr;
1534 /* Caller uses nullify_over/nullify_end. */
1535 assert(ctx->null_cond.c == TCG_COND_NEVER);
1537 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1538 ctx->mmu_idx == MMU_PHYS_IDX);
1539 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1540 if (modify) {
1541 save_gpr(ctx, rb, ofs);
1545 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1546 unsigned rx, int scale, target_sreg disp,
1547 unsigned sp, int modify, TCGMemOp mop)
1549 TCGv_reg ofs;
1550 TCGv_tl addr;
1552 /* Caller uses nullify_over/nullify_end. */
1553 assert(ctx->null_cond.c == TCG_COND_NEVER);
1555 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1556 ctx->mmu_idx == MMU_PHYS_IDX);
1557 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1558 if (modify) {
1559 save_gpr(ctx, rb, ofs);
1563 #if TARGET_REGISTER_BITS == 64
1564 #define do_load_reg do_load_64
1565 #define do_store_reg do_store_64
1566 #else
1567 #define do_load_reg do_load_32
1568 #define do_store_reg do_store_32
1569 #endif
1571 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1572 unsigned rx, int scale, target_sreg disp,
1573 unsigned sp, int modify, TCGMemOp mop)
1575 TCGv_reg dest;
1577 nullify_over(ctx);
1579 if (modify == 0) {
1580 /* No base register update. */
1581 dest = dest_gpr(ctx, rt);
1582 } else {
1583 /* Make sure if RT == RB, we see the result of the load. */
1584 dest = get_temp(ctx);
1586 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1587 save_gpr(ctx, rt, dest);
1589 return nullify_end(ctx);
1592 static void do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1593 unsigned rx, int scale, target_sreg disp,
1594 unsigned sp, int modify)
1596 TCGv_i32 tmp;
1598 nullify_over(ctx);
1600 tmp = tcg_temp_new_i32();
1601 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1602 save_frw_i32(rt, tmp);
1603 tcg_temp_free_i32(tmp);
1605 if (rt == 0) {
1606 gen_helper_loaded_fr0(cpu_env);
1609 nullify_end(ctx);
1612 static void do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1613 unsigned rx, int scale, target_sreg disp,
1614 unsigned sp, int modify)
1616 TCGv_i64 tmp;
1618 nullify_over(ctx);
1620 tmp = tcg_temp_new_i64();
1621 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1622 save_frd(rt, tmp);
1623 tcg_temp_free_i64(tmp);
1625 if (rt == 0) {
1626 gen_helper_loaded_fr0(cpu_env);
1629 nullify_end(ctx);
1632 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1633 target_sreg disp, unsigned sp,
1634 int modify, TCGMemOp mop)
1636 nullify_over(ctx);
1637 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1638 return nullify_end(ctx);
1641 static void do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1642 unsigned rx, int scale, target_sreg disp,
1643 unsigned sp, int modify)
1645 TCGv_i32 tmp;
1647 nullify_over(ctx);
1649 tmp = load_frw_i32(rt);
1650 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1651 tcg_temp_free_i32(tmp);
1653 nullify_end(ctx);
1656 static void do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1657 unsigned rx, int scale, target_sreg disp,
1658 unsigned sp, int modify)
1660 TCGv_i64 tmp;
1662 nullify_over(ctx);
1664 tmp = load_frd(rt);
1665 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1666 tcg_temp_free_i64(tmp);
1668 nullify_end(ctx);
1671 static void do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1672 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1674 TCGv_i32 tmp;
1676 nullify_over(ctx);
1677 tmp = load_frw0_i32(ra);
1679 func(tmp, cpu_env, tmp);
1681 save_frw_i32(rt, tmp);
1682 tcg_temp_free_i32(tmp);
1683 nullify_end(ctx);
1686 static void do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1687 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1689 TCGv_i32 dst;
1690 TCGv_i64 src;
1692 nullify_over(ctx);
1693 src = load_frd(ra);
1694 dst = tcg_temp_new_i32();
1696 func(dst, cpu_env, src);
1698 tcg_temp_free_i64(src);
1699 save_frw_i32(rt, dst);
1700 tcg_temp_free_i32(dst);
1701 nullify_end(ctx);
1704 static void do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1705 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1707 TCGv_i64 tmp;
1709 nullify_over(ctx);
1710 tmp = load_frd0(ra);
1712 func(tmp, cpu_env, tmp);
1714 save_frd(rt, tmp);
1715 tcg_temp_free_i64(tmp);
1716 nullify_end(ctx);
1719 static void do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1720 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1722 TCGv_i32 src;
1723 TCGv_i64 dst;
1725 nullify_over(ctx);
1726 src = load_frw0_i32(ra);
1727 dst = tcg_temp_new_i64();
1729 func(dst, cpu_env, src);
1731 tcg_temp_free_i32(src);
1732 save_frd(rt, dst);
1733 tcg_temp_free_i64(dst);
1734 nullify_end(ctx);
1737 static void do_fop_weww(DisasContext *ctx, unsigned rt,
1738 unsigned ra, unsigned rb,
1739 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1741 TCGv_i32 a, b;
1743 nullify_over(ctx);
1744 a = load_frw0_i32(ra);
1745 b = load_frw0_i32(rb);
1747 func(a, cpu_env, a, b);
1749 tcg_temp_free_i32(b);
1750 save_frw_i32(rt, a);
1751 tcg_temp_free_i32(a);
1752 nullify_end(ctx);
1755 static void do_fop_dedd(DisasContext *ctx, unsigned rt,
1756 unsigned ra, unsigned rb,
1757 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1759 TCGv_i64 a, b;
1761 nullify_over(ctx);
1762 a = load_frd0(ra);
1763 b = load_frd0(rb);
1765 func(a, cpu_env, a, b);
1767 tcg_temp_free_i64(b);
1768 save_frd(rt, a);
1769 tcg_temp_free_i64(a);
1770 nullify_end(ctx);
1773 /* Emit an unconditional branch to a direct target, which may or may not
1774 have already had nullification handled. */
1775 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1776 unsigned link, bool is_n)
1778 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1779 if (link != 0) {
1780 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1782 ctx->iaoq_n = dest;
1783 if (is_n) {
1784 ctx->null_cond.c = TCG_COND_ALWAYS;
1786 } else {
1787 nullify_over(ctx);
1789 if (link != 0) {
1790 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1793 if (is_n && use_nullify_skip(ctx)) {
1794 nullify_set(ctx, 0);
1795 gen_goto_tb(ctx, 0, dest, dest + 4);
1796 } else {
1797 nullify_set(ctx, is_n);
1798 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1801 nullify_end(ctx);
1803 nullify_set(ctx, 0);
1804 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1805 ctx->base.is_jmp = DISAS_NORETURN;
1807 return true;
1810 /* Emit a conditional branch to a direct target. If the branch itself
1811 is nullified, we should have already used nullify_over. */
1812 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1813 DisasCond *cond)
1815 target_ureg dest = iaoq_dest(ctx, disp);
1816 TCGLabel *taken = NULL;
1817 TCGCond c = cond->c;
1818 bool n;
1820 assert(ctx->null_cond.c == TCG_COND_NEVER);
1822 /* Handle TRUE and NEVER as direct branches. */
1823 if (c == TCG_COND_ALWAYS) {
1824 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1826 if (c == TCG_COND_NEVER) {
1827 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1830 taken = gen_new_label();
1831 cond_prep(cond);
1832 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1833 cond_free(cond);
1835 /* Not taken: Condition not satisfied; nullify on backward branches. */
1836 n = is_n && disp < 0;
1837 if (n && use_nullify_skip(ctx)) {
1838 nullify_set(ctx, 0);
1839 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1840 } else {
1841 if (!n && ctx->null_lab) {
1842 gen_set_label(ctx->null_lab);
1843 ctx->null_lab = NULL;
1845 nullify_set(ctx, n);
1846 if (ctx->iaoq_n == -1) {
1847 /* The temporary iaoq_n_var died at the branch above.
1848 Regenerate it here instead of saving it. */
1849 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1851 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1854 gen_set_label(taken);
1856 /* Taken: Condition satisfied; nullify on forward branches. */
1857 n = is_n && disp >= 0;
1858 if (n && use_nullify_skip(ctx)) {
1859 nullify_set(ctx, 0);
1860 gen_goto_tb(ctx, 1, dest, dest + 4);
1861 } else {
1862 nullify_set(ctx, n);
1863 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1866 /* Not taken: the branch itself was nullified. */
1867 if (ctx->null_lab) {
1868 gen_set_label(ctx->null_lab);
1869 ctx->null_lab = NULL;
1870 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1871 } else {
1872 ctx->base.is_jmp = DISAS_NORETURN;
1874 return true;
1877 /* Emit an unconditional branch to an indirect target. This handles
1878 nullification of the branch itself. */
1879 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1880 unsigned link, bool is_n)
1882 TCGv_reg a0, a1, next, tmp;
1883 TCGCond c;
1885 assert(ctx->null_lab == NULL);
1887 if (ctx->null_cond.c == TCG_COND_NEVER) {
1888 if (link != 0) {
1889 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1891 next = get_temp(ctx);
1892 tcg_gen_mov_reg(next, dest);
1893 if (is_n) {
1894 if (use_nullify_skip(ctx)) {
1895 tcg_gen_mov_reg(cpu_iaoq_f, next);
1896 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1897 nullify_set(ctx, 0);
1898 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1899 return true;
1901 ctx->null_cond.c = TCG_COND_ALWAYS;
1903 ctx->iaoq_n = -1;
1904 ctx->iaoq_n_var = next;
1905 } else if (is_n && use_nullify_skip(ctx)) {
1906 /* The (conditional) branch, B, nullifies the next insn, N,
1907 and we're allowed to skip execution N (no single-step or
1908 tracepoint in effect). Since the goto_ptr that we must use
1909 for the indirect branch consumes no special resources, we
1910 can (conditionally) skip B and continue execution. */
1911 /* The use_nullify_skip test implies we have a known control path. */
1912 tcg_debug_assert(ctx->iaoq_b != -1);
1913 tcg_debug_assert(ctx->iaoq_n != -1);
1915 /* We do have to handle the non-local temporary, DEST, before
1916 branching. Since IOAQ_F is not really live at this point, we
1917 can simply store DEST optimistically. Similarly with IAOQ_B. */
1918 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1919 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1921 nullify_over(ctx);
1922 if (link != 0) {
1923 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1925 tcg_gen_lookup_and_goto_ptr();
1926 return nullify_end(ctx);
1927 } else {
1928 cond_prep(&ctx->null_cond);
1929 c = ctx->null_cond.c;
1930 a0 = ctx->null_cond.a0;
1931 a1 = ctx->null_cond.a1;
1933 tmp = tcg_temp_new();
1934 next = get_temp(ctx);
1936 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1937 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1938 ctx->iaoq_n = -1;
1939 ctx->iaoq_n_var = next;
1941 if (link != 0) {
1942 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1945 if (is_n) {
1946 /* The branch nullifies the next insn, which means the state of N
1947 after the branch is the inverse of the state of N that applied
1948 to the branch. */
1949 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1950 cond_free(&ctx->null_cond);
1951 ctx->null_cond = cond_make_n();
1952 ctx->psw_n_nonzero = true;
1953 } else {
1954 cond_free(&ctx->null_cond);
1957 return true;
1960 /* Implement
1961 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1962 * IAOQ_Next{30..31} ← GR[b]{30..31};
1963 * else
1964 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1965 * which keeps the privilege level from being increased.
1967 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1969 TCGv_reg dest;
1970 switch (ctx->privilege) {
1971 case 0:
1972 /* Privilege 0 is maximum and is allowed to decrease. */
1973 return offset;
1974 case 3:
1975 /* Privilege 3 is minimum and is never allowed increase. */
1976 dest = get_temp(ctx);
1977 tcg_gen_ori_reg(dest, offset, 3);
1978 break;
1979 default:
1980 dest = tcg_temp_new();
1981 tcg_gen_andi_reg(dest, offset, -4);
1982 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1983 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1984 tcg_temp_free(dest);
1985 break;
1987 return dest;
1990 #ifdef CONFIG_USER_ONLY
1991 /* On Linux, page zero is normally marked execute only + gateway.
1992 Therefore normal read or write is supposed to fail, but specific
1993 offsets have kernel code mapped to raise permissions to implement
1994 system calls. Handling this via an explicit check here, rather
1995 in than the "be disp(sr2,r0)" instruction that probably sent us
1996 here, is the easiest way to handle the branch delay slot on the
1997 aforementioned BE. */
1998 static void do_page_zero(DisasContext *ctx)
2000 /* If by some means we get here with PSW[N]=1, that implies that
2001 the B,GATE instruction would be skipped, and we'd fault on the
2002 next insn within the privilaged page. */
2003 switch (ctx->null_cond.c) {
2004 case TCG_COND_NEVER:
2005 break;
2006 case TCG_COND_ALWAYS:
2007 tcg_gen_movi_reg(cpu_psw_n, 0);
2008 goto do_sigill;
2009 default:
2010 /* Since this is always the first (and only) insn within the
2011 TB, we should know the state of PSW[N] from TB->FLAGS. */
2012 g_assert_not_reached();
2015 /* Check that we didn't arrive here via some means that allowed
2016 non-sequential instruction execution. Normally the PSW[B] bit
2017 detects this by disallowing the B,GATE instruction to execute
2018 under such conditions. */
2019 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2020 goto do_sigill;
2023 switch (ctx->iaoq_f & -4) {
2024 case 0x00: /* Null pointer call */
2025 gen_excp_1(EXCP_IMP);
2026 ctx->base.is_jmp = DISAS_NORETURN;
2027 break;
2029 case 0xb0: /* LWS */
2030 gen_excp_1(EXCP_SYSCALL_LWS);
2031 ctx->base.is_jmp = DISAS_NORETURN;
2032 break;
2034 case 0xe0: /* SET_THREAD_POINTER */
2035 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
2036 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
2037 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
2038 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2039 break;
2041 case 0x100: /* SYSCALL */
2042 gen_excp_1(EXCP_SYSCALL);
2043 ctx->base.is_jmp = DISAS_NORETURN;
2044 break;
2046 default:
2047 do_sigill:
2048 gen_excp_1(EXCP_ILL);
2049 ctx->base.is_jmp = DISAS_NORETURN;
2050 break;
2053 #endif
2055 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2057 cond_free(&ctx->null_cond);
2058 return true;
2061 static bool trans_break(DisasContext *ctx, arg_break *a)
2063 return gen_excp_iir(ctx, EXCP_BREAK);
2066 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2068 /* No point in nullifying the memory barrier. */
2069 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2071 cond_free(&ctx->null_cond);
2072 return true;
2075 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2077 unsigned rt = a->t;
2078 TCGv_reg tmp = dest_gpr(ctx, rt);
2079 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2080 save_gpr(ctx, rt, tmp);
2082 cond_free(&ctx->null_cond);
2083 return true;
2086 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2088 unsigned rt = a->t;
2089 unsigned rs = a->sp;
2090 TCGv_i64 t0 = tcg_temp_new_i64();
2091 TCGv_reg t1 = tcg_temp_new();
2093 load_spr(ctx, t0, rs);
2094 tcg_gen_shri_i64(t0, t0, 32);
2095 tcg_gen_trunc_i64_reg(t1, t0);
2097 save_gpr(ctx, rt, t1);
2098 tcg_temp_free(t1);
2099 tcg_temp_free_i64(t0);
2101 cond_free(&ctx->null_cond);
2102 return true;
2105 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2107 unsigned rt = a->t;
2108 unsigned ctl = a->r;
2109 TCGv_reg tmp;
2111 switch (ctl) {
2112 case CR_SAR:
2113 #ifdef TARGET_HPPA64
2114 if (a->e == 0) {
2115 /* MFSAR without ,W masks low 5 bits. */
2116 tmp = dest_gpr(ctx, rt);
2117 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2118 save_gpr(ctx, rt, tmp);
2119 goto done;
2121 #endif
2122 save_gpr(ctx, rt, cpu_sar);
2123 goto done;
2124 case CR_IT: /* Interval Timer */
2125 /* FIXME: Respect PSW_S bit. */
2126 nullify_over(ctx);
2127 tmp = dest_gpr(ctx, rt);
2128 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2129 gen_io_start();
2130 gen_helper_read_interval_timer(tmp);
2131 gen_io_end();
2132 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2133 } else {
2134 gen_helper_read_interval_timer(tmp);
2136 save_gpr(ctx, rt, tmp);
2137 return nullify_end(ctx);
2138 case 26:
2139 case 27:
2140 break;
2141 default:
2142 /* All other control registers are privileged. */
2143 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2144 break;
2147 tmp = get_temp(ctx);
2148 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2149 save_gpr(ctx, rt, tmp);
2151 done:
2152 cond_free(&ctx->null_cond);
2153 return true;
2156 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2158 unsigned rr = a->r;
2159 unsigned rs = a->sp;
2160 TCGv_i64 t64;
2162 if (rs >= 5) {
2163 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2165 nullify_over(ctx);
2167 t64 = tcg_temp_new_i64();
2168 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2169 tcg_gen_shli_i64(t64, t64, 32);
2171 if (rs >= 4) {
2172 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2173 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2174 } else {
2175 tcg_gen_mov_i64(cpu_sr[rs], t64);
2177 tcg_temp_free_i64(t64);
2179 return nullify_end(ctx);
2182 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2184 unsigned ctl = a->t;
2185 TCGv_reg reg = load_gpr(ctx, a->r);
2186 TCGv_reg tmp;
2188 if (ctl == CR_SAR) {
2189 tmp = tcg_temp_new();
2190 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2191 save_or_nullify(ctx, cpu_sar, tmp);
2192 tcg_temp_free(tmp);
2194 cond_free(&ctx->null_cond);
2195 return true;
2198 /* All other control registers are privileged or read-only. */
2199 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2201 #ifndef CONFIG_USER_ONLY
2202 nullify_over(ctx);
2203 switch (ctl) {
2204 case CR_IT:
2205 gen_helper_write_interval_timer(cpu_env, reg);
2206 break;
2207 case CR_EIRR:
2208 gen_helper_write_eirr(cpu_env, reg);
2209 break;
2210 case CR_EIEM:
2211 gen_helper_write_eiem(cpu_env, reg);
2212 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2213 break;
2215 case CR_IIASQ:
2216 case CR_IIAOQ:
2217 /* FIXME: Respect PSW_Q bit */
2218 /* The write advances the queue and stores to the back element. */
2219 tmp = get_temp(ctx);
2220 tcg_gen_ld_reg(tmp, cpu_env,
2221 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2222 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2223 tcg_gen_st_reg(reg, cpu_env,
2224 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2225 break;
2227 default:
2228 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2229 break;
2231 return nullify_end(ctx);
2232 #endif
2235 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2237 TCGv_reg tmp = tcg_temp_new();
2239 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2240 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2241 save_or_nullify(ctx, cpu_sar, tmp);
2242 tcg_temp_free(tmp);
2244 cond_free(&ctx->null_cond);
2245 return true;
2248 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2250 TCGv_reg dest = dest_gpr(ctx, a->t);
2252 #ifdef CONFIG_USER_ONLY
2253 /* We don't implement space registers in user mode. */
2254 tcg_gen_movi_reg(dest, 0);
2255 #else
2256 TCGv_i64 t0 = tcg_temp_new_i64();
2258 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2259 tcg_gen_shri_i64(t0, t0, 32);
2260 tcg_gen_trunc_i64_reg(dest, t0);
2262 tcg_temp_free_i64(t0);
2263 #endif
2264 save_gpr(ctx, a->t, dest);
2266 cond_free(&ctx->null_cond);
2267 return true;
2270 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2272 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2273 #ifndef CONFIG_USER_ONLY
2274 TCGv_reg tmp;
2276 nullify_over(ctx);
2278 tmp = get_temp(ctx);
2279 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2280 tcg_gen_andi_reg(tmp, tmp, ~a->i);
2281 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2282 save_gpr(ctx, a->t, tmp);
2284 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2285 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2286 return nullify_end(ctx);
2287 #endif
2290 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2292 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2293 #ifndef CONFIG_USER_ONLY
2294 TCGv_reg tmp;
2296 nullify_over(ctx);
2298 tmp = get_temp(ctx);
2299 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2300 tcg_gen_ori_reg(tmp, tmp, a->i);
2301 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2302 save_gpr(ctx, a->t, tmp);
2304 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2305 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2306 return nullify_end(ctx);
2307 #endif
2310 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2312 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2313 #ifndef CONFIG_USER_ONLY
2314 TCGv_reg tmp, reg;
2315 nullify_over(ctx);
2317 reg = load_gpr(ctx, a->r);
2318 tmp = get_temp(ctx);
2319 gen_helper_swap_system_mask(tmp, cpu_env, reg);
2321 /* Exit the TB to recognize new interrupts. */
2322 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2323 return nullify_end(ctx);
2324 #endif
2327 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2329 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2330 #ifndef CONFIG_USER_ONLY
2331 nullify_over(ctx);
2333 if (rfi_r) {
2334 gen_helper_rfi_r(cpu_env);
2335 } else {
2336 gen_helper_rfi(cpu_env);
2338 /* Exit the TB to recognize new interrupts. */
2339 if (ctx->base.singlestep_enabled) {
2340 gen_excp_1(EXCP_DEBUG);
2341 } else {
2342 tcg_gen_exit_tb(NULL, 0);
2344 ctx->base.is_jmp = DISAS_NORETURN;
2346 return nullify_end(ctx);
2347 #endif
2350 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2352 return do_rfi(ctx, false);
2355 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2357 return do_rfi(ctx, true);
2360 #ifndef CONFIG_USER_ONLY
2361 static bool gen_hlt(DisasContext *ctx, int reset)
2363 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2364 nullify_over(ctx);
2365 if (reset) {
2366 gen_helper_reset(cpu_env);
2367 } else {
2368 gen_helper_halt(cpu_env);
2370 ctx->base.is_jmp = DISAS_NORETURN;
2371 return nullify_end(ctx);
2373 #endif /* !CONFIG_USER_ONLY */
2375 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2377 if (a->m) {
2378 TCGv_reg dest = dest_gpr(ctx, a->b);
2379 TCGv_reg src1 = load_gpr(ctx, a->b);
2380 TCGv_reg src2 = load_gpr(ctx, a->x);
2382 /* The only thing we need to do is the base register modification. */
2383 tcg_gen_add_reg(dest, src1, src2);
2384 save_gpr(ctx, a->b, dest);
2386 cond_free(&ctx->null_cond);
2387 return true;
2390 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2392 TCGv_reg dest, ofs;
2393 TCGv_i32 level, want;
2394 TCGv_tl addr;
2396 nullify_over(ctx);
2398 dest = dest_gpr(ctx, a->t);
2399 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2401 if (a->imm) {
2402 level = tcg_const_i32(a->ri);
2403 } else {
2404 level = tcg_temp_new_i32();
2405 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2406 tcg_gen_andi_i32(level, level, 3);
2408 want = tcg_const_i32(a->write ? PAGE_WRITE : PAGE_READ);
2410 gen_helper_probe(dest, cpu_env, addr, level, want);
2412 tcg_temp_free_i32(want);
2413 tcg_temp_free_i32(level);
2415 save_gpr(ctx, a->t, dest);
2416 return nullify_end(ctx);
2419 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2421 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2422 #ifndef CONFIG_USER_ONLY
2423 TCGv_tl addr;
2424 TCGv_reg ofs, reg;
2426 nullify_over(ctx);
2428 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2429 reg = load_gpr(ctx, a->r);
2430 if (a->addr) {
2431 gen_helper_itlba(cpu_env, addr, reg);
2432 } else {
2433 gen_helper_itlbp(cpu_env, addr, reg);
2436 /* Exit TB for ITLB change if mmu is enabled. This *should* not be
2437 the case, since the OS TLB fill handler runs with mmu disabled. */
2438 if (!a->data && (ctx->tb_flags & PSW_C)) {
2439 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2441 return nullify_end(ctx);
2442 #endif
2445 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2447 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2448 #ifndef CONFIG_USER_ONLY
2449 TCGv_tl addr;
2450 TCGv_reg ofs;
2452 nullify_over(ctx);
2454 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2455 if (a->m) {
2456 save_gpr(ctx, a->b, ofs);
2458 if (a->local) {
2459 gen_helper_ptlbe(cpu_env);
2460 } else {
2461 gen_helper_ptlb(cpu_env, addr);
2464 /* Exit TB for TLB change if mmu is enabled. */
2465 if (!a->data && (ctx->tb_flags & PSW_C)) {
2466 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2468 return nullify_end(ctx);
2469 #endif
2472 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2474 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2475 #ifndef CONFIG_USER_ONLY
2476 TCGv_tl vaddr;
2477 TCGv_reg ofs, paddr;
2479 nullify_over(ctx);
2481 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2483 paddr = tcg_temp_new();
2484 gen_helper_lpa(paddr, cpu_env, vaddr);
2486 /* Note that physical address result overrides base modification. */
2487 if (a->m) {
2488 save_gpr(ctx, a->b, ofs);
2490 save_gpr(ctx, a->t, paddr);
2491 tcg_temp_free(paddr);
2493 return nullify_end(ctx);
2494 #endif
2497 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2499 TCGv_reg ci;
2501 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2503 /* The Coherence Index is an implementation-defined function of the
2504 physical address. Two addresses with the same CI have a coherent
2505 view of the cache. Our implementation is to return 0 for all,
2506 since the entire address space is coherent. */
2507 ci = tcg_const_reg(0);
2508 save_gpr(ctx, a->t, ci);
2509 tcg_temp_free(ci);
2511 cond_free(&ctx->null_cond);
2512 return true;
2515 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2517 return do_add_reg(ctx, a, false, false, false, false);
2520 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2522 return do_add_reg(ctx, a, true, false, false, false);
2525 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2527 return do_add_reg(ctx, a, false, true, false, false);
2530 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2532 return do_add_reg(ctx, a, false, false, false, true);
2535 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2537 return do_add_reg(ctx, a, false, true, false, true);
2540 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2542 return do_sub_reg(ctx, a, false, false, false);
2545 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2547 return do_sub_reg(ctx, a, true, false, false);
2550 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2552 return do_sub_reg(ctx, a, false, false, true);
2555 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2557 return do_sub_reg(ctx, a, true, false, true);
2560 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2562 return do_sub_reg(ctx, a, false, true, false);
2565 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2567 return do_sub_reg(ctx, a, true, true, false);
2570 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2572 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2575 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2577 return do_log_reg(ctx, a, tcg_gen_and_reg);
2580 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2582 if (a->cf == 0) {
2583 unsigned r2 = a->r2;
2584 unsigned r1 = a->r1;
2585 unsigned rt = a->t;
2587 if (rt == 0) { /* NOP */
2588 cond_free(&ctx->null_cond);
2589 return true;
2591 if (r2 == 0) { /* COPY */
2592 if (r1 == 0) {
2593 TCGv_reg dest = dest_gpr(ctx, rt);
2594 tcg_gen_movi_reg(dest, 0);
2595 save_gpr(ctx, rt, dest);
2596 } else {
2597 save_gpr(ctx, rt, cpu_gr[r1]);
2599 cond_free(&ctx->null_cond);
2600 return true;
2602 #ifndef CONFIG_USER_ONLY
2603 /* These are QEMU extensions and are nops in the real architecture:
2605 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2606 * or %r31,%r31,%r31 -- death loop; offline cpu
2607 * currently implemented as idle.
2609 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2610 TCGv_i32 tmp;
2612 /* No need to check for supervisor, as userland can only pause
2613 until the next timer interrupt. */
2614 nullify_over(ctx);
2616 /* Advance the instruction queue. */
2617 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2618 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2619 nullify_set(ctx, 0);
2621 /* Tell the qemu main loop to halt until this cpu has work. */
2622 tmp = tcg_const_i32(1);
2623 tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) +
2624 offsetof(CPUState, halted));
2625 tcg_temp_free_i32(tmp);
2626 gen_excp_1(EXCP_HALTED);
2627 ctx->base.is_jmp = DISAS_NORETURN;
2629 return nullify_end(ctx);
2631 #endif
2633 return do_log_reg(ctx, a, tcg_gen_or_reg);
2636 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2638 return do_log_reg(ctx, a, tcg_gen_xor_reg);
2641 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2643 TCGv_reg tcg_r1, tcg_r2;
2645 if (a->cf) {
2646 nullify_over(ctx);
2648 tcg_r1 = load_gpr(ctx, a->r1);
2649 tcg_r2 = load_gpr(ctx, a->r2);
2650 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2651 return nullify_end(ctx);
2654 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2656 TCGv_reg tcg_r1, tcg_r2;
2658 if (a->cf) {
2659 nullify_over(ctx);
2661 tcg_r1 = load_gpr(ctx, a->r1);
2662 tcg_r2 = load_gpr(ctx, a->r2);
2663 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2664 return nullify_end(ctx);
2667 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2669 TCGv_reg tcg_r1, tcg_r2, tmp;
2671 if (a->cf) {
2672 nullify_over(ctx);
2674 tcg_r1 = load_gpr(ctx, a->r1);
2675 tcg_r2 = load_gpr(ctx, a->r2);
2676 tmp = get_temp(ctx);
2677 tcg_gen_not_reg(tmp, tcg_r2);
2678 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2679 return nullify_end(ctx);
2682 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2684 return do_uaddcm(ctx, a, false);
2687 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2689 return do_uaddcm(ctx, a, true);
2692 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2694 TCGv_reg tmp;
2696 nullify_over(ctx);
2698 tmp = get_temp(ctx);
2699 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2700 if (!is_i) {
2701 tcg_gen_not_reg(tmp, tmp);
2703 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2704 tcg_gen_muli_reg(tmp, tmp, 6);
2705 do_unit(ctx, a->t, tmp, load_gpr(ctx, a->r), a->cf, false,
2706 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2707 return nullify_end(ctx);
2710 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2712 return do_dcor(ctx, a, false);
2715 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2717 return do_dcor(ctx, a, true);
2720 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2722 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2724 nullify_over(ctx);
2726 in1 = load_gpr(ctx, a->r1);
2727 in2 = load_gpr(ctx, a->r2);
2729 add1 = tcg_temp_new();
2730 add2 = tcg_temp_new();
2731 addc = tcg_temp_new();
2732 dest = tcg_temp_new();
2733 zero = tcg_const_reg(0);
2735 /* Form R1 << 1 | PSW[CB]{8}. */
2736 tcg_gen_add_reg(add1, in1, in1);
2737 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2739 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2740 carry{8} requires that we subtract via + ~R2 + 1, as described in
2741 the manual. By extracting and masking V, we can produce the
2742 proper inputs to the addition without movcond. */
2743 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2744 tcg_gen_xor_reg(add2, in2, addc);
2745 tcg_gen_andi_reg(addc, addc, 1);
2746 /* ??? This is only correct for 32-bit. */
2747 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2748 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2750 tcg_temp_free(addc);
2751 tcg_temp_free(zero);
2753 /* Write back the result register. */
2754 save_gpr(ctx, a->t, dest);
2756 /* Write back PSW[CB]. */
2757 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2758 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2760 /* Write back PSW[V] for the division step. */
2761 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2762 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2764 /* Install the new nullification. */
2765 if (a->cf) {
2766 TCGv_reg sv = NULL;
2767 if (a->cf >> 1 == 6) {
2768 /* ??? The lshift is supposed to contribute to overflow. */
2769 sv = do_add_sv(ctx, dest, add1, add2);
2771 ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
2774 tcg_temp_free(add1);
2775 tcg_temp_free(add2);
2776 tcg_temp_free(dest);
2778 return nullify_end(ctx);
2781 static bool trans_addi(DisasContext *ctx, uint32_t insn)
2783 target_sreg im = low_sextract(insn, 0, 11);
2784 unsigned e1 = extract32(insn, 11, 1);
2785 unsigned cf = extract32(insn, 12, 4);
2786 unsigned rt = extract32(insn, 16, 5);
2787 unsigned r2 = extract32(insn, 21, 5);
2788 unsigned o1 = extract32(insn, 26, 1);
2789 TCGv_reg tcg_im, tcg_r2;
2791 if (cf) {
2792 nullify_over(ctx);
2795 tcg_im = load_const(ctx, im);
2796 tcg_r2 = load_gpr(ctx, r2);
2797 do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2799 return nullify_end(ctx);
2802 static bool trans_subi(DisasContext *ctx, uint32_t insn)
2804 target_sreg im = low_sextract(insn, 0, 11);
2805 unsigned e1 = extract32(insn, 11, 1);
2806 unsigned cf = extract32(insn, 12, 4);
2807 unsigned rt = extract32(insn, 16, 5);
2808 unsigned r2 = extract32(insn, 21, 5);
2809 TCGv_reg tcg_im, tcg_r2;
2811 if (cf) {
2812 nullify_over(ctx);
2815 tcg_im = load_const(ctx, im);
2816 tcg_r2 = load_gpr(ctx, r2);
2817 do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2819 return nullify_end(ctx);
2822 static bool trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2824 target_sreg im = low_sextract(insn, 0, 11);
2825 unsigned cf = extract32(insn, 12, 4);
2826 unsigned rt = extract32(insn, 16, 5);
2827 unsigned r2 = extract32(insn, 21, 5);
2828 TCGv_reg tcg_im, tcg_r2;
2830 if (cf) {
2831 nullify_over(ctx);
2834 tcg_im = load_const(ctx, im);
2835 tcg_r2 = load_gpr(ctx, r2);
2836 do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2838 return nullify_end(ctx);
2841 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2843 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2844 a->disp, a->sp, a->m, a->size | MO_TE);
2847 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2849 assert(a->x == 0 && a->scale == 0);
2850 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2853 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2855 TCGMemOp mop = MO_TEUL | MO_ALIGN_16 | a->size;
2856 TCGv_reg zero, dest, ofs;
2857 TCGv_tl addr;
2859 nullify_over(ctx);
2861 if (a->m) {
2862 /* Base register modification. Make sure if RT == RB,
2863 we see the result of the load. */
2864 dest = get_temp(ctx);
2865 } else {
2866 dest = dest_gpr(ctx, a->t);
2869 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2870 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2871 zero = tcg_const_reg(0);
2872 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2873 if (a->m) {
2874 save_gpr(ctx, a->b, ofs);
2876 save_gpr(ctx, a->t, dest);
2878 return nullify_end(ctx);
2881 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2883 TCGv_reg ofs, val;
2884 TCGv_tl addr;
2886 nullify_over(ctx);
2888 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2889 ctx->mmu_idx == MMU_PHYS_IDX);
2890 val = load_gpr(ctx, a->r);
2891 if (a->a) {
2892 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2893 gen_helper_stby_e_parallel(cpu_env, addr, val);
2894 } else {
2895 gen_helper_stby_e(cpu_env, addr, val);
2897 } else {
2898 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2899 gen_helper_stby_b_parallel(cpu_env, addr, val);
2900 } else {
2901 gen_helper_stby_b(cpu_env, addr, val);
2904 if (a->m) {
2905 tcg_gen_andi_reg(ofs, ofs, ~3);
2906 save_gpr(ctx, a->b, ofs);
2909 return nullify_end(ctx);
2912 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2914 int hold_mmu_idx = ctx->mmu_idx;
2916 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2917 ctx->mmu_idx = MMU_PHYS_IDX;
2918 trans_ld(ctx, a);
2919 ctx->mmu_idx = hold_mmu_idx;
2920 return true;
2923 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2925 int hold_mmu_idx = ctx->mmu_idx;
2927 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2928 ctx->mmu_idx = MMU_PHYS_IDX;
2929 trans_st(ctx, a);
2930 ctx->mmu_idx = hold_mmu_idx;
2931 return true;
2934 static bool trans_ldil(DisasContext *ctx, uint32_t insn)
2936 unsigned rt = extract32(insn, 21, 5);
2937 target_sreg i = assemble_21(insn);
2938 TCGv_reg tcg_rt = dest_gpr(ctx, rt);
2940 tcg_gen_movi_reg(tcg_rt, i);
2941 save_gpr(ctx, rt, tcg_rt);
2942 cond_free(&ctx->null_cond);
2943 return true;
2946 static bool trans_addil(DisasContext *ctx, uint32_t insn)
2948 unsigned rt = extract32(insn, 21, 5);
2949 target_sreg i = assemble_21(insn);
2950 TCGv_reg tcg_rt = load_gpr(ctx, rt);
2951 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2953 tcg_gen_addi_reg(tcg_r1, tcg_rt, i);
2954 save_gpr(ctx, 1, tcg_r1);
2955 cond_free(&ctx->null_cond);
2956 return true;
2959 static bool trans_ldo(DisasContext *ctx, uint32_t insn)
2961 unsigned rb = extract32(insn, 21, 5);
2962 unsigned rt = extract32(insn, 16, 5);
2963 target_sreg i = assemble_16(insn);
2964 TCGv_reg tcg_rt = dest_gpr(ctx, rt);
2966 /* Special case rb == 0, for the LDI pseudo-op.
2967 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2968 if (rb == 0) {
2969 tcg_gen_movi_reg(tcg_rt, i);
2970 } else {
2971 tcg_gen_addi_reg(tcg_rt, cpu_gr[rb], i);
2973 save_gpr(ctx, rt, tcg_rt);
2974 cond_free(&ctx->null_cond);
2975 return true;
2978 static bool trans_load(DisasContext *ctx, uint32_t insn,
2979 bool is_mod, TCGMemOp mop)
2981 unsigned rb = extract32(insn, 21, 5);
2982 unsigned rt = extract32(insn, 16, 5);
2983 unsigned sp = extract32(insn, 14, 2);
2984 target_sreg i = assemble_16(insn);
2986 do_load(ctx, rt, rb, 0, 0, i, sp, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2987 return true;
2990 static bool trans_load_w(DisasContext *ctx, uint32_t insn)
2992 unsigned rb = extract32(insn, 21, 5);
2993 unsigned rt = extract32(insn, 16, 5);
2994 unsigned sp = extract32(insn, 14, 2);
2995 target_sreg i = assemble_16a(insn);
2996 unsigned ext2 = extract32(insn, 1, 2);
2998 switch (ext2) {
2999 case 0:
3000 case 1:
3001 /* FLDW without modification. */
3002 do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
3003 break;
3004 case 2:
3005 /* LDW with modification. Note that the sign of I selects
3006 post-dec vs pre-inc. */
3007 do_load(ctx, rt, rb, 0, 0, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
3008 break;
3009 default:
3010 return gen_illegal(ctx);
3012 return true;
3015 static bool trans_fload_mod(DisasContext *ctx, uint32_t insn)
3017 target_sreg i = assemble_16a(insn);
3018 unsigned t1 = extract32(insn, 1, 1);
3019 unsigned a = extract32(insn, 2, 1);
3020 unsigned sp = extract32(insn, 14, 2);
3021 unsigned t0 = extract32(insn, 16, 5);
3022 unsigned rb = extract32(insn, 21, 5);
3024 /* FLDW with modification. */
3025 do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3026 return true;
3029 static bool trans_store(DisasContext *ctx, uint32_t insn,
3030 bool is_mod, TCGMemOp mop)
3032 unsigned rb = extract32(insn, 21, 5);
3033 unsigned rt = extract32(insn, 16, 5);
3034 unsigned sp = extract32(insn, 14, 2);
3035 target_sreg i = assemble_16(insn);
3037 do_store(ctx, rt, rb, i, sp, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
3038 return true;
3041 static bool trans_store_w(DisasContext *ctx, uint32_t insn)
3043 unsigned rb = extract32(insn, 21, 5);
3044 unsigned rt = extract32(insn, 16, 5);
3045 unsigned sp = extract32(insn, 14, 2);
3046 target_sreg i = assemble_16a(insn);
3047 unsigned ext2 = extract32(insn, 1, 2);
3049 switch (ext2) {
3050 case 0:
3051 case 1:
3052 /* FSTW without modification. */
3053 do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
3054 break;
3055 case 2:
3056 /* STW with modification. */
3057 do_store(ctx, rt, rb, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
3058 break;
3059 default:
3060 return gen_illegal(ctx);
3062 return true;
3065 static bool trans_fstore_mod(DisasContext *ctx, uint32_t insn)
3067 target_sreg i = assemble_16a(insn);
3068 unsigned t1 = extract32(insn, 1, 1);
3069 unsigned a = extract32(insn, 2, 1);
3070 unsigned sp = extract32(insn, 14, 2);
3071 unsigned t0 = extract32(insn, 16, 5);
3072 unsigned rb = extract32(insn, 21, 5);
3074 /* FSTW with modification. */
3075 do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3076 return true;
3079 static bool trans_copr_w(DisasContext *ctx, uint32_t insn)
3081 unsigned t0 = extract32(insn, 0, 5);
3082 unsigned m = extract32(insn, 5, 1);
3083 unsigned t1 = extract32(insn, 6, 1);
3084 unsigned ext3 = extract32(insn, 7, 3);
3085 /* unsigned cc = extract32(insn, 10, 2); */
3086 unsigned i = extract32(insn, 12, 1);
3087 unsigned ua = extract32(insn, 13, 1);
3088 unsigned sp = extract32(insn, 14, 2);
3089 unsigned rx = extract32(insn, 16, 5);
3090 unsigned rb = extract32(insn, 21, 5);
3091 unsigned rt = t1 * 32 + t0;
3092 int modify = (m ? (ua ? -1 : 1) : 0);
3093 int disp, scale;
3095 if (i == 0) {
3096 scale = (ua ? 2 : 0);
3097 disp = 0;
3098 modify = m;
3099 } else {
3100 disp = low_sextract(rx, 0, 5);
3101 scale = 0;
3102 rx = 0;
3103 modify = (m ? (ua ? -1 : 1) : 0);
3106 switch (ext3) {
3107 case 0: /* FLDW */
3108 do_floadw(ctx, rt, rb, rx, scale, disp, sp, modify);
3109 break;
3110 case 4: /* FSTW */
3111 do_fstorew(ctx, rt, rb, rx, scale, disp, sp, modify);
3112 break;
3113 default:
3114 return gen_illegal(ctx);
3116 return true;
3119 static bool trans_copr_dw(DisasContext *ctx, uint32_t insn)
3121 unsigned rt = extract32(insn, 0, 5);
3122 unsigned m = extract32(insn, 5, 1);
3123 unsigned ext4 = extract32(insn, 6, 4);
3124 /* unsigned cc = extract32(insn, 10, 2); */
3125 unsigned i = extract32(insn, 12, 1);
3126 unsigned ua = extract32(insn, 13, 1);
3127 unsigned sp = extract32(insn, 14, 2);
3128 unsigned rx = extract32(insn, 16, 5);
3129 unsigned rb = extract32(insn, 21, 5);
3130 int modify = (m ? (ua ? -1 : 1) : 0);
3131 int disp, scale;
3133 if (i == 0) {
3134 scale = (ua ? 3 : 0);
3135 disp = 0;
3136 modify = m;
3137 } else {
3138 disp = low_sextract(rx, 0, 5);
3139 scale = 0;
3140 rx = 0;
3141 modify = (m ? (ua ? -1 : 1) : 0);
3144 switch (ext4) {
3145 case 0: /* FLDD */
3146 do_floadd(ctx, rt, rb, rx, scale, disp, sp, modify);
3147 break;
3148 case 8: /* FSTD */
3149 do_fstored(ctx, rt, rb, rx, scale, disp, sp, modify);
3150 break;
3151 default:
3152 return gen_illegal(ctx);
3154 return true;
3157 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3158 unsigned c, unsigned f, unsigned n, int disp)
3160 TCGv_reg dest, in2, sv;
3161 DisasCond cond;
3163 in2 = load_gpr(ctx, r);
3164 dest = get_temp(ctx);
3166 tcg_gen_sub_reg(dest, in1, in2);
3168 sv = NULL;
3169 if (c == 6) {
3170 sv = do_sub_sv(ctx, dest, in1, in2);
3173 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3174 return do_cbranch(ctx, disp, n, &cond);
3177 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3179 nullify_over(ctx);
3180 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3183 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3185 nullify_over(ctx);
3186 return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3189 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3190 unsigned c, unsigned f, unsigned n, int disp)
3192 TCGv_reg dest, in2, sv, cb_msb;
3193 DisasCond cond;
3195 in2 = load_gpr(ctx, r);
3196 dest = dest_gpr(ctx, r);
3197 sv = NULL;
3198 cb_msb = NULL;
3200 switch (c) {
3201 default:
3202 tcg_gen_add_reg(dest, in1, in2);
3203 break;
3204 case 4: case 5:
3205 cb_msb = get_temp(ctx);
3206 tcg_gen_movi_reg(cb_msb, 0);
3207 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3208 break;
3209 case 6:
3210 tcg_gen_add_reg(dest, in1, in2);
3211 sv = do_add_sv(ctx, dest, in1, in2);
3212 break;
3215 cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3216 return do_cbranch(ctx, disp, n, &cond);
3219 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3221 nullify_over(ctx);
3222 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3225 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3227 nullify_over(ctx);
3228 return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3231 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3233 TCGv_reg tmp, tcg_r;
3234 DisasCond cond;
3236 nullify_over(ctx);
3238 tmp = tcg_temp_new();
3239 tcg_r = load_gpr(ctx, a->r);
3240 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3242 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3243 tcg_temp_free(tmp);
3244 return do_cbranch(ctx, a->disp, a->n, &cond);
3247 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3249 TCGv_reg tmp, tcg_r;
3250 DisasCond cond;
3252 nullify_over(ctx);
3254 tmp = tcg_temp_new();
3255 tcg_r = load_gpr(ctx, a->r);
3256 tcg_gen_shli_reg(tmp, tcg_r, a->p);
3258 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3259 tcg_temp_free(tmp);
3260 return do_cbranch(ctx, a->disp, a->n, &cond);
3263 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3265 TCGv_reg dest;
3266 DisasCond cond;
3268 nullify_over(ctx);
3270 dest = dest_gpr(ctx, a->r2);
3271 if (a->r1 == 0) {
3272 tcg_gen_movi_reg(dest, 0);
3273 } else {
3274 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3277 cond = do_sed_cond(a->c, dest);
3278 return do_cbranch(ctx, a->disp, a->n, &cond);
3281 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3283 TCGv_reg dest;
3284 DisasCond cond;
3286 nullify_over(ctx);
3288 dest = dest_gpr(ctx, a->r);
3289 tcg_gen_movi_reg(dest, a->i);
3291 cond = do_sed_cond(a->c, dest);
3292 return do_cbranch(ctx, a->disp, a->n, &cond);
3295 static bool trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
3296 const DisasInsn *di)
3298 unsigned rt = extract32(insn, 0, 5);
3299 unsigned c = extract32(insn, 13, 3);
3300 unsigned r1 = extract32(insn, 16, 5);
3301 unsigned r2 = extract32(insn, 21, 5);
3302 TCGv_reg dest;
3304 if (c) {
3305 nullify_over(ctx);
3308 dest = dest_gpr(ctx, rt);
3309 if (r1 == 0) {
3310 tcg_gen_ext32u_reg(dest, load_gpr(ctx, r2));
3311 tcg_gen_shr_reg(dest, dest, cpu_sar);
3312 } else if (r1 == r2) {
3313 TCGv_i32 t32 = tcg_temp_new_i32();
3314 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, r2));
3315 tcg_gen_rotr_i32(t32, t32, cpu_sar);
3316 tcg_gen_extu_i32_reg(dest, t32);
3317 tcg_temp_free_i32(t32);
3318 } else {
3319 TCGv_i64 t = tcg_temp_new_i64();
3320 TCGv_i64 s = tcg_temp_new_i64();
3322 tcg_gen_concat_reg_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
3323 tcg_gen_extu_reg_i64(s, cpu_sar);
3324 tcg_gen_shr_i64(t, t, s);
3325 tcg_gen_trunc_i64_reg(dest, t);
3327 tcg_temp_free_i64(t);
3328 tcg_temp_free_i64(s);
3330 save_gpr(ctx, rt, dest);
3332 /* Install the new nullification. */
3333 cond_free(&ctx->null_cond);
3334 if (c) {
3335 ctx->null_cond = do_sed_cond(c, dest);
3337 return nullify_end(ctx);
3340 static bool trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
3341 const DisasInsn *di)
3343 unsigned rt = extract32(insn, 0, 5);
3344 unsigned cpos = extract32(insn, 5, 5);
3345 unsigned c = extract32(insn, 13, 3);
3346 unsigned r1 = extract32(insn, 16, 5);
3347 unsigned r2 = extract32(insn, 21, 5);
3348 unsigned sa = 31 - cpos;
3349 TCGv_reg dest, t2;
3351 if (c) {
3352 nullify_over(ctx);
3355 dest = dest_gpr(ctx, rt);
3356 t2 = load_gpr(ctx, r2);
3357 if (r1 == r2) {
3358 TCGv_i32 t32 = tcg_temp_new_i32();
3359 tcg_gen_trunc_reg_i32(t32, t2);
3360 tcg_gen_rotri_i32(t32, t32, sa);
3361 tcg_gen_extu_i32_reg(dest, t32);
3362 tcg_temp_free_i32(t32);
3363 } else if (r1 == 0) {
3364 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3365 } else {
3366 TCGv_reg t0 = tcg_temp_new();
3367 tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
3368 tcg_gen_deposit_reg(dest, t0, cpu_gr[r1], 32 - sa, sa);
3369 tcg_temp_free(t0);
3371 save_gpr(ctx, rt, dest);
3373 /* Install the new nullification. */
3374 cond_free(&ctx->null_cond);
3375 if (c) {
3376 ctx->null_cond = do_sed_cond(c, dest);
3378 return nullify_end(ctx);
3381 static bool trans_extrw_sar(DisasContext *ctx, uint32_t insn,
3382 const DisasInsn *di)
3384 unsigned clen = extract32(insn, 0, 5);
3385 unsigned is_se = extract32(insn, 10, 1);
3386 unsigned c = extract32(insn, 13, 3);
3387 unsigned rt = extract32(insn, 16, 5);
3388 unsigned rr = extract32(insn, 21, 5);
3389 unsigned len = 32 - clen;
3390 TCGv_reg dest, src, tmp;
3392 if (c) {
3393 nullify_over(ctx);
3396 dest = dest_gpr(ctx, rt);
3397 src = load_gpr(ctx, rr);
3398 tmp = tcg_temp_new();
3400 /* Recall that SAR is using big-endian bit numbering. */
3401 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3402 if (is_se) {
3403 tcg_gen_sar_reg(dest, src, tmp);
3404 tcg_gen_sextract_reg(dest, dest, 0, len);
3405 } else {
3406 tcg_gen_shr_reg(dest, src, tmp);
3407 tcg_gen_extract_reg(dest, dest, 0, len);
3409 tcg_temp_free(tmp);
3410 save_gpr(ctx, rt, dest);
3412 /* Install the new nullification. */
3413 cond_free(&ctx->null_cond);
3414 if (c) {
3415 ctx->null_cond = do_sed_cond(c, dest);
3417 return nullify_end(ctx);
3420 static bool trans_extrw_imm(DisasContext *ctx, uint32_t insn,
3421 const DisasInsn *di)
3423 unsigned clen = extract32(insn, 0, 5);
3424 unsigned pos = extract32(insn, 5, 5);
3425 unsigned is_se = extract32(insn, 10, 1);
3426 unsigned c = extract32(insn, 13, 3);
3427 unsigned rt = extract32(insn, 16, 5);
3428 unsigned rr = extract32(insn, 21, 5);
3429 unsigned len = 32 - clen;
3430 unsigned cpos = 31 - pos;
3431 TCGv_reg dest, src;
3433 if (c) {
3434 nullify_over(ctx);
3437 dest = dest_gpr(ctx, rt);
3438 src = load_gpr(ctx, rr);
3439 if (is_se) {
3440 tcg_gen_sextract_reg(dest, src, cpos, len);
3441 } else {
3442 tcg_gen_extract_reg(dest, src, cpos, len);
3444 save_gpr(ctx, rt, dest);
3446 /* Install the new nullification. */
3447 cond_free(&ctx->null_cond);
3448 if (c) {
3449 ctx->null_cond = do_sed_cond(c, dest);
3451 return nullify_end(ctx);
3454 static const DisasInsn table_sh_ex[] = {
3455 { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
3456 { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
3457 { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
3458 { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
3461 static bool trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
3462 const DisasInsn *di)
3464 unsigned clen = extract32(insn, 0, 5);
3465 unsigned cpos = extract32(insn, 5, 5);
3466 unsigned nz = extract32(insn, 10, 1);
3467 unsigned c = extract32(insn, 13, 3);
3468 target_sreg val = low_sextract(insn, 16, 5);
3469 unsigned rt = extract32(insn, 21, 5);
3470 unsigned len = 32 - clen;
3471 target_sreg mask0, mask1;
3472 TCGv_reg dest;
3474 if (c) {
3475 nullify_over(ctx);
3477 if (cpos + len > 32) {
3478 len = 32 - cpos;
3481 dest = dest_gpr(ctx, rt);
3482 mask0 = deposit64(0, cpos, len, val);
3483 mask1 = deposit64(-1, cpos, len, val);
3485 if (nz) {
3486 TCGv_reg src = load_gpr(ctx, rt);
3487 if (mask1 != -1) {
3488 tcg_gen_andi_reg(dest, src, mask1);
3489 src = dest;
3491 tcg_gen_ori_reg(dest, src, mask0);
3492 } else {
3493 tcg_gen_movi_reg(dest, mask0);
3495 save_gpr(ctx, rt, dest);
3497 /* Install the new nullification. */
3498 cond_free(&ctx->null_cond);
3499 if (c) {
3500 ctx->null_cond = do_sed_cond(c, dest);
3502 return nullify_end(ctx);
3505 static bool trans_depw_imm(DisasContext *ctx, uint32_t insn,
3506 const DisasInsn *di)
3508 unsigned clen = extract32(insn, 0, 5);
3509 unsigned cpos = extract32(insn, 5, 5);
3510 unsigned nz = extract32(insn, 10, 1);
3511 unsigned c = extract32(insn, 13, 3);
3512 unsigned rr = extract32(insn, 16, 5);
3513 unsigned rt = extract32(insn, 21, 5);
3514 unsigned rs = nz ? rt : 0;
3515 unsigned len = 32 - clen;
3516 TCGv_reg dest, val;
3518 if (c) {
3519 nullify_over(ctx);
3521 if (cpos + len > 32) {
3522 len = 32 - cpos;
3525 dest = dest_gpr(ctx, rt);
3526 val = load_gpr(ctx, rr);
3527 if (rs == 0) {
3528 tcg_gen_deposit_z_reg(dest, val, cpos, len);
3529 } else {
3530 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, cpos, len);
3532 save_gpr(ctx, rt, dest);
3534 /* Install the new nullification. */
3535 cond_free(&ctx->null_cond);
3536 if (c) {
3537 ctx->null_cond = do_sed_cond(c, dest);
3539 return nullify_end(ctx);
3542 static bool trans_depw_sar(DisasContext *ctx, uint32_t insn,
3543 const DisasInsn *di)
3545 unsigned clen = extract32(insn, 0, 5);
3546 unsigned nz = extract32(insn, 10, 1);
3547 unsigned i = extract32(insn, 12, 1);
3548 unsigned c = extract32(insn, 13, 3);
3549 unsigned rt = extract32(insn, 21, 5);
3550 unsigned rs = nz ? rt : 0;
3551 unsigned len = 32 - clen;
3552 TCGv_reg val, mask, tmp, shift, dest;
3553 unsigned msb = 1U << (len - 1);
3555 if (c) {
3556 nullify_over(ctx);
3559 if (i) {
3560 val = load_const(ctx, low_sextract(insn, 16, 5));
3561 } else {
3562 val = load_gpr(ctx, extract32(insn, 16, 5));
3564 dest = dest_gpr(ctx, rt);
3565 shift = tcg_temp_new();
3566 tmp = tcg_temp_new();
3568 /* Convert big-endian bit numbering in SAR to left-shift. */
3569 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3571 mask = tcg_const_reg(msb + (msb - 1));
3572 tcg_gen_and_reg(tmp, val, mask);
3573 if (rs) {
3574 tcg_gen_shl_reg(mask, mask, shift);
3575 tcg_gen_shl_reg(tmp, tmp, shift);
3576 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3577 tcg_gen_or_reg(dest, dest, tmp);
3578 } else {
3579 tcg_gen_shl_reg(dest, tmp, shift);
3581 tcg_temp_free(shift);
3582 tcg_temp_free(mask);
3583 tcg_temp_free(tmp);
3584 save_gpr(ctx, rt, dest);
3586 /* Install the new nullification. */
3587 cond_free(&ctx->null_cond);
3588 if (c) {
3589 ctx->null_cond = do_sed_cond(c, dest);
3591 return nullify_end(ctx);
3594 static const DisasInsn table_depw[] = {
3595 { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
3596 { 0xd4000800u, 0xfc001800u, trans_depw_imm },
3597 { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
3600 static bool trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
3602 unsigned n = extract32(insn, 1, 1);
3603 unsigned b = extract32(insn, 21, 5);
3604 target_sreg disp = assemble_17(insn);
3605 TCGv_reg tmp;
3607 #ifdef CONFIG_USER_ONLY
3608 /* ??? It seems like there should be a good way of using
3609 "be disp(sr2, r0)", the canonical gateway entry mechanism
3610 to our advantage. But that appears to be inconvenient to
3611 manage along side branch delay slots. Therefore we handle
3612 entry into the gateway page via absolute address. */
3613 /* Since we don't implement spaces, just branch. Do notice the special
3614 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3615 goto_tb to the TB containing the syscall. */
3616 if (b == 0) {
3617 return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
3619 #else
3620 int sp = assemble_sr3(insn);
3621 nullify_over(ctx);
3622 #endif
3624 tmp = get_temp(ctx);
3625 tcg_gen_addi_reg(tmp, load_gpr(ctx, b), disp);
3626 tmp = do_ibranch_priv(ctx, tmp);
3628 #ifdef CONFIG_USER_ONLY
3629 return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
3630 #else
3631 TCGv_i64 new_spc = tcg_temp_new_i64();
3633 load_spr(ctx, new_spc, sp);
3634 if (is_l) {
3635 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3636 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3638 if (n && use_nullify_skip(ctx)) {
3639 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3640 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3641 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3642 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3643 } else {
3644 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3645 if (ctx->iaoq_b == -1) {
3646 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3648 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3649 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3650 nullify_set(ctx, n);
3652 tcg_temp_free_i64(new_spc);
3653 tcg_gen_lookup_and_goto_ptr();
3654 ctx->base.is_jmp = DISAS_NORETURN;
3655 return nullify_end(ctx);
3656 #endif
3659 static bool trans_bl(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3661 unsigned n = extract32(insn, 1, 1);
3662 unsigned link = extract32(insn, 21, 5);
3663 target_sreg disp = assemble_17(insn);
3665 do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
3666 return true;
3669 static bool trans_b_gate(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3671 unsigned n = extract32(insn, 1, 1);
3672 unsigned link = extract32(insn, 21, 5);
3673 target_sreg disp = assemble_17(insn);
3674 target_ureg dest = iaoq_dest(ctx, disp);
3676 /* Make sure the caller hasn't done something weird with the queue.
3677 * ??? This is not quite the same as the PSW[B] bit, which would be
3678 * expensive to track. Real hardware will trap for
3679 * b gateway
3680 * b gateway+4 (in delay slot of first branch)
3681 * However, checking for a non-sequential instruction queue *will*
3682 * diagnose the security hole
3683 * b gateway
3684 * b evil
3685 * in which instructions at evil would run with increased privs.
3687 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3688 return gen_illegal(ctx);
3691 #ifndef CONFIG_USER_ONLY
3692 if (ctx->tb_flags & PSW_C) {
3693 CPUHPPAState *env = ctx->cs->env_ptr;
3694 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3695 /* If we could not find a TLB entry, then we need to generate an
3696 ITLB miss exception so the kernel will provide it.
3697 The resulting TLB fill operation will invalidate this TB and
3698 we will re-translate, at which point we *will* be able to find
3699 the TLB entry and determine if this is in fact a gateway page. */
3700 if (type < 0) {
3701 gen_excp(ctx, EXCP_ITLB_MISS);
3702 return true;
3704 /* No change for non-gateway pages or for priv decrease. */
3705 if (type >= 4 && type - 4 < ctx->privilege) {
3706 dest = deposit32(dest, 0, 2, type - 4);
3708 } else {
3709 dest &= -4; /* priv = 0 */
3711 #endif
3713 do_dbranch(ctx, dest, link, n);
3714 return true;
3717 static bool trans_bl_long(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3719 unsigned n = extract32(insn, 1, 1);
3720 target_sreg disp = assemble_22(insn);
3722 do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
3723 return true;
3726 static bool trans_blr(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3728 unsigned n = extract32(insn, 1, 1);
3729 unsigned rx = extract32(insn, 16, 5);
3730 unsigned link = extract32(insn, 21, 5);
3731 TCGv_reg tmp = get_temp(ctx);
3733 tcg_gen_shli_reg(tmp, load_gpr(ctx, rx), 3);
3734 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3735 /* The computation here never changes privilege level. */
3736 do_ibranch(ctx, tmp, link, n);
3737 return true;
3740 static bool trans_bv(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3742 unsigned n = extract32(insn, 1, 1);
3743 unsigned rx = extract32(insn, 16, 5);
3744 unsigned rb = extract32(insn, 21, 5);
3745 TCGv_reg dest;
3747 if (rx == 0) {
3748 dest = load_gpr(ctx, rb);
3749 } else {
3750 dest = get_temp(ctx);
3751 tcg_gen_shli_reg(dest, load_gpr(ctx, rx), 3);
3752 tcg_gen_add_reg(dest, dest, load_gpr(ctx, rb));
3754 dest = do_ibranch_priv(ctx, dest);
3755 do_ibranch(ctx, dest, 0, n);
3756 return true;
3759 static bool trans_bve(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3761 unsigned n = extract32(insn, 1, 1);
3762 unsigned rb = extract32(insn, 21, 5);
3763 unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3764 TCGv_reg dest;
3766 #ifdef CONFIG_USER_ONLY
3767 dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
3768 do_ibranch(ctx, dest, link, n);
3769 #else
3770 nullify_over(ctx);
3771 dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
3773 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3774 if (ctx->iaoq_b == -1) {
3775 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3777 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3778 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3779 if (link) {
3780 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
3782 nullify_set(ctx, n);
3783 tcg_gen_lookup_and_goto_ptr();
3784 ctx->base.is_jmp = DISAS_NORETURN;
3785 return nullify_end(ctx);
3786 #endif
3787 return true;
3790 static const DisasInsn table_branch[] = {
3791 { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3792 { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3793 { 0xe8004000u, 0xfc00fffdu, trans_blr },
3794 { 0xe800c000u, 0xfc00fffdu, trans_bv },
3795 { 0xe800d000u, 0xfc00dffcu, trans_bve },
3796 { 0xe8002000u, 0xfc00e000u, trans_b_gate },
3799 static bool trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3800 const DisasInsn *di)
3802 unsigned rt = extract32(insn, 0, 5);
3803 unsigned ra = extract32(insn, 21, 5);
3804 do_fop_wew(ctx, rt, ra, di->f.wew);
3805 return true;
3808 static bool trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3809 const DisasInsn *di)
3811 unsigned rt = assemble_rt64(insn);
3812 unsigned ra = assemble_ra64(insn);
3813 do_fop_wew(ctx, rt, ra, di->f.wew);
3814 return true;
3817 static bool trans_fop_ded(DisasContext *ctx, uint32_t insn,
3818 const DisasInsn *di)
3820 unsigned rt = extract32(insn, 0, 5);
3821 unsigned ra = extract32(insn, 21, 5);
3822 do_fop_ded(ctx, rt, ra, di->f.ded);
3823 return true;
3826 static bool trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
3827 const DisasInsn *di)
3829 unsigned rt = extract32(insn, 0, 5);
3830 unsigned ra = extract32(insn, 21, 5);
3831 do_fop_wed(ctx, rt, ra, di->f.wed);
3832 return true;
3835 static bool trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
3836 const DisasInsn *di)
3838 unsigned rt = assemble_rt64(insn);
3839 unsigned ra = extract32(insn, 21, 5);
3840 do_fop_wed(ctx, rt, ra, di->f.wed);
3841 return true;
3844 static bool trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
3845 const DisasInsn *di)
3847 unsigned rt = extract32(insn, 0, 5);
3848 unsigned ra = extract32(insn, 21, 5);
3849 do_fop_dew(ctx, rt, ra, di->f.dew);
3850 return true;
3853 static bool trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
3854 const DisasInsn *di)
3856 unsigned rt = extract32(insn, 0, 5);
3857 unsigned ra = assemble_ra64(insn);
3858 do_fop_dew(ctx, rt, ra, di->f.dew);
3859 return true;
3862 static bool trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
3863 const DisasInsn *di)
3865 unsigned rt = extract32(insn, 0, 5);
3866 unsigned rb = extract32(insn, 16, 5);
3867 unsigned ra = extract32(insn, 21, 5);
3868 do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3869 return true;
3872 static bool trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
3873 const DisasInsn *di)
3875 unsigned rt = assemble_rt64(insn);
3876 unsigned rb = assemble_rb64(insn);
3877 unsigned ra = assemble_ra64(insn);
3878 do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3879 return true;
3882 static bool trans_fop_dedd(DisasContext *ctx, uint32_t insn,
3883 const DisasInsn *di)
3885 unsigned rt = extract32(insn, 0, 5);
3886 unsigned rb = extract32(insn, 16, 5);
3887 unsigned ra = extract32(insn, 21, 5);
3888 do_fop_dedd(ctx, rt, ra, rb, di->f.dedd);
3889 return true;
3892 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3894 tcg_gen_mov_i32(dst, src);
3897 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3899 tcg_gen_mov_i64(dst, src);
3902 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3904 tcg_gen_andi_i32(dst, src, INT32_MAX);
3907 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3909 tcg_gen_andi_i64(dst, src, INT64_MAX);
3912 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3914 tcg_gen_xori_i32(dst, src, INT32_MIN);
3917 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3919 tcg_gen_xori_i64(dst, src, INT64_MIN);
3922 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3924 tcg_gen_ori_i32(dst, src, INT32_MIN);
3927 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3929 tcg_gen_ori_i64(dst, src, INT64_MIN);
3932 static void do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
3933 unsigned y, unsigned c)
3935 TCGv_i32 ta, tb, tc, ty;
3937 nullify_over(ctx);
3939 ta = load_frw0_i32(ra);
3940 tb = load_frw0_i32(rb);
3941 ty = tcg_const_i32(y);
3942 tc = tcg_const_i32(c);
3944 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3946 tcg_temp_free_i32(ta);
3947 tcg_temp_free_i32(tb);
3948 tcg_temp_free_i32(ty);
3949 tcg_temp_free_i32(tc);
3951 nullify_end(ctx);
3954 static bool trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
3955 const DisasInsn *di)
3957 unsigned c = extract32(insn, 0, 5);
3958 unsigned y = extract32(insn, 13, 3);
3959 unsigned rb = extract32(insn, 16, 5);
3960 unsigned ra = extract32(insn, 21, 5);
3961 do_fcmp_s(ctx, ra, rb, y, c);
3962 return true;
3965 static bool trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
3966 const DisasInsn *di)
3968 unsigned c = extract32(insn, 0, 5);
3969 unsigned y = extract32(insn, 13, 3);
3970 unsigned rb = assemble_rb64(insn);
3971 unsigned ra = assemble_ra64(insn);
3972 do_fcmp_s(ctx, ra, rb, y, c);
3973 return true;
3976 static bool trans_fcmp_d(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3978 unsigned c = extract32(insn, 0, 5);
3979 unsigned y = extract32(insn, 13, 3);
3980 unsigned rb = extract32(insn, 16, 5);
3981 unsigned ra = extract32(insn, 21, 5);
3982 TCGv_i64 ta, tb;
3983 TCGv_i32 tc, ty;
3985 nullify_over(ctx);
3987 ta = load_frd0(ra);
3988 tb = load_frd0(rb);
3989 ty = tcg_const_i32(y);
3990 tc = tcg_const_i32(c);
3992 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3994 tcg_temp_free_i64(ta);
3995 tcg_temp_free_i64(tb);
3996 tcg_temp_free_i32(ty);
3997 tcg_temp_free_i32(tc);
3999 return nullify_end(ctx);
4002 static bool trans_ftest_t(DisasContext *ctx, uint32_t insn,
4003 const DisasInsn *di)
4005 unsigned y = extract32(insn, 13, 3);
4006 unsigned cbit = (y ^ 1) - 1;
4007 TCGv_reg t;
4009 nullify_over(ctx);
4011 t = tcg_temp_new();
4012 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
4013 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
4014 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4015 tcg_temp_free(t);
4017 return nullify_end(ctx);
4020 static bool trans_ftest_q(DisasContext *ctx, uint32_t insn,
4021 const DisasInsn *di)
4023 unsigned c = extract32(insn, 0, 5);
4024 int mask;
4025 bool inv = false;
4026 TCGv_reg t;
4028 nullify_over(ctx);
4030 t = tcg_temp_new();
4031 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
4033 switch (c) {
4034 case 0: /* simple */
4035 tcg_gen_andi_reg(t, t, 0x4000000);
4036 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4037 goto done;
4038 case 2: /* rej */
4039 inv = true;
4040 /* fallthru */
4041 case 1: /* acc */
4042 mask = 0x43ff800;
4043 break;
4044 case 6: /* rej8 */
4045 inv = true;
4046 /* fallthru */
4047 case 5: /* acc8 */
4048 mask = 0x43f8000;
4049 break;
4050 case 9: /* acc6 */
4051 mask = 0x43e0000;
4052 break;
4053 case 13: /* acc4 */
4054 mask = 0x4380000;
4055 break;
4056 case 17: /* acc2 */
4057 mask = 0x4200000;
4058 break;
4059 default:
4060 return gen_illegal(ctx);
4062 if (inv) {
4063 TCGv_reg c = load_const(ctx, mask);
4064 tcg_gen_or_reg(t, t, c);
4065 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4066 } else {
4067 tcg_gen_andi_reg(t, t, mask);
4068 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4070 done:
4071 return nullify_end(ctx);
4074 static bool trans_xmpyu(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
4076 unsigned rt = extract32(insn, 0, 5);
4077 unsigned rb = assemble_rb64(insn);
4078 unsigned ra = assemble_ra64(insn);
4079 TCGv_i64 a, b;
4081 nullify_over(ctx);
4083 a = load_frw0_i64(ra);
4084 b = load_frw0_i64(rb);
4085 tcg_gen_mul_i64(a, a, b);
4086 save_frd(rt, a);
4087 tcg_temp_free_i64(a);
4088 tcg_temp_free_i64(b);
4090 return nullify_end(ctx);
4093 #define FOP_DED trans_fop_ded, .f.ded
4094 #define FOP_DEDD trans_fop_dedd, .f.dedd
4096 #define FOP_WEW trans_fop_wew_0c, .f.wew
4097 #define FOP_DEW trans_fop_dew_0c, .f.dew
4098 #define FOP_WED trans_fop_wed_0c, .f.wed
4099 #define FOP_WEWW trans_fop_weww_0c, .f.weww
4101 static const DisasInsn table_float_0c[] = {
4102 /* floating point class zero */
4103 { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
4104 { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
4105 { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
4106 { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
4107 { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
4108 { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
4110 { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
4111 { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4112 { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4113 { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4114 { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4115 { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4117 /* floating point class three */
4118 { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
4119 { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
4120 { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
4121 { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
4123 { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4124 { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4125 { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4126 { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4128 /* floating point class one */
4129 /* float/float */
4130 { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
4131 { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
4132 /* int/float */
4133 { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
4134 { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
4135 { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
4136 { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4137 /* float/int */
4138 { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
4139 { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
4140 { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
4141 { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4142 /* float/int truncate */
4143 { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
4144 { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
4145 { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
4146 { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4147 /* uint/float */
4148 { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
4149 { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
4150 { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
4151 { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4152 /* float/uint */
4153 { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
4154 { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
4155 { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
4156 { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4157 /* float/uint truncate */
4158 { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
4159 { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
4160 { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
4161 { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4163 /* floating point class two */
4164 { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
4165 { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
4166 { 0x30002420, 0xffffffe0, trans_ftest_q },
4167 { 0x30000420, 0xffff1fff, trans_ftest_t },
4169 /* FID. Note that ra == rt == 0, which via fcpy puts 0 into fr0.
4170 This is machine/revision == 0, which is reserved for simulator. */
4171 { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
4174 #undef FOP_WEW
4175 #undef FOP_DEW
4176 #undef FOP_WED
4177 #undef FOP_WEWW
4178 #define FOP_WEW trans_fop_wew_0e, .f.wew
4179 #define FOP_DEW trans_fop_dew_0e, .f.dew
4180 #define FOP_WED trans_fop_wed_0e, .f.wed
4181 #define FOP_WEWW trans_fop_weww_0e, .f.weww
4183 static const DisasInsn table_float_0e[] = {
4184 /* floating point class zero */
4185 { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
4186 { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
4187 { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
4188 { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
4189 { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
4190 { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
4192 { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
4193 { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4194 { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4195 { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4196 { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4197 { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4199 /* floating point class three */
4200 { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
4201 { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
4202 { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
4203 { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
4205 { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4206 { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4207 { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4208 { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4210 { 0x38004700, 0xfc00ef60, trans_xmpyu },
4212 /* floating point class one */
4213 /* float/float */
4214 { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
4215 { 0x38002200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_d },
4216 /* int/float */
4217 { 0x38008200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_w_s },
4218 { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
4219 { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
4220 { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4221 /* float/int */
4222 { 0x38010200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_w },
4223 { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
4224 { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
4225 { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4226 /* float/int truncate */
4227 { 0x38018200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_w },
4228 { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
4229 { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
4230 { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4231 /* uint/float */
4232 { 0x38028200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_uw_s },
4233 { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
4234 { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
4235 { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4236 /* float/uint */
4237 { 0x38030200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_uw },
4238 { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
4239 { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
4240 { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4241 /* float/uint truncate */
4242 { 0x38038200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_uw },
4243 { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
4244 { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
4245 { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4247 /* floating point class two */
4248 { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
4249 { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
4252 #undef FOP_WEW
4253 #undef FOP_DEW
4254 #undef FOP_WED
4255 #undef FOP_WEWW
4256 #undef FOP_DED
4257 #undef FOP_DEDD
4259 /* Convert the fmpyadd single-precision register encodings to standard. */
4260 static inline int fmpyadd_s_reg(unsigned r)
4262 return (r & 16) * 2 + 16 + (r & 15);
4265 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4267 int tm = fmpyadd_s_reg(a->tm);
4268 int ra = fmpyadd_s_reg(a->ra);
4269 int ta = fmpyadd_s_reg(a->ta);
4270 int rm2 = fmpyadd_s_reg(a->rm2);
4271 int rm1 = fmpyadd_s_reg(a->rm1);
4273 nullify_over(ctx);
4275 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4276 do_fop_weww(ctx, ta, ta, ra,
4277 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4279 return nullify_end(ctx);
4282 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4284 return do_fmpyadd_s(ctx, a, false);
4287 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4289 return do_fmpyadd_s(ctx, a, true);
4292 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4294 nullify_over(ctx);
4296 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4297 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4298 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4300 return nullify_end(ctx);
4303 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4305 return do_fmpyadd_d(ctx, a, false);
4308 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4310 return do_fmpyadd_d(ctx, a, true);
4313 static bool trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
4314 const DisasInsn *di)
4316 unsigned rt = assemble_rt64(insn);
4317 unsigned neg = extract32(insn, 5, 1);
4318 unsigned rm1 = assemble_ra64(insn);
4319 unsigned rm2 = assemble_rb64(insn);
4320 unsigned ra3 = assemble_rc64(insn);
4321 TCGv_i32 a, b, c;
4323 nullify_over(ctx);
4324 a = load_frw0_i32(rm1);
4325 b = load_frw0_i32(rm2);
4326 c = load_frw0_i32(ra3);
4328 if (neg) {
4329 gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
4330 } else {
4331 gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
4334 tcg_temp_free_i32(b);
4335 tcg_temp_free_i32(c);
4336 save_frw_i32(rt, a);
4337 tcg_temp_free_i32(a);
4338 return nullify_end(ctx);
4341 static bool trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
4342 const DisasInsn *di)
4344 unsigned rt = extract32(insn, 0, 5);
4345 unsigned neg = extract32(insn, 5, 1);
4346 unsigned rm1 = extract32(insn, 21, 5);
4347 unsigned rm2 = extract32(insn, 16, 5);
4348 unsigned ra3 = assemble_rc64(insn);
4349 TCGv_i64 a, b, c;
4351 nullify_over(ctx);
4352 a = load_frd0(rm1);
4353 b = load_frd0(rm2);
4354 c = load_frd0(ra3);
4356 if (neg) {
4357 gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
4358 } else {
4359 gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
4362 tcg_temp_free_i64(b);
4363 tcg_temp_free_i64(c);
4364 save_frd(rt, a);
4365 tcg_temp_free_i64(a);
4366 return nullify_end(ctx);
4369 static const DisasInsn table_fp_fused[] = {
4370 { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
4371 { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
4374 static void translate_table_int(DisasContext *ctx, uint32_t insn,
4375 const DisasInsn table[], size_t n)
4377 size_t i;
4378 for (i = 0; i < n; ++i) {
4379 if ((insn & table[i].mask) == table[i].insn) {
4380 table[i].trans(ctx, insn, &table[i]);
4381 return;
4384 qemu_log_mask(LOG_UNIMP, "UNIMP insn %08x @ " TARGET_FMT_lx "\n",
4385 insn, ctx->base.pc_next);
4386 gen_illegal(ctx);
4389 #define translate_table(ctx, insn, table) \
4390 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
4392 static void translate_one(DisasContext *ctx, uint32_t insn)
4394 uint32_t opc;
4396 /* Transition to the auto-generated decoder. */
4397 if (decode(ctx, insn)) {
4398 return;
4401 opc = extract32(insn, 26, 6);
4402 switch (opc) {
4403 case 0x08:
4404 trans_ldil(ctx, insn);
4405 return;
4406 case 0x09:
4407 trans_copr_w(ctx, insn);
4408 return;
4409 case 0x0A:
4410 trans_addil(ctx, insn);
4411 return;
4412 case 0x0B:
4413 trans_copr_dw(ctx, insn);
4414 return;
4415 case 0x0C:
4416 translate_table(ctx, insn, table_float_0c);
4417 return;
4418 case 0x0D:
4419 trans_ldo(ctx, insn);
4420 return;
4421 case 0x0E:
4422 translate_table(ctx, insn, table_float_0e);
4423 return;
4425 case 0x10:
4426 trans_load(ctx, insn, false, MO_UB);
4427 return;
4428 case 0x11:
4429 trans_load(ctx, insn, false, MO_TEUW);
4430 return;
4431 case 0x12:
4432 trans_load(ctx, insn, false, MO_TEUL);
4433 return;
4434 case 0x13:
4435 trans_load(ctx, insn, true, MO_TEUL);
4436 return;
4437 case 0x16:
4438 trans_fload_mod(ctx, insn);
4439 return;
4440 case 0x17:
4441 trans_load_w(ctx, insn);
4442 return;
4443 case 0x18:
4444 trans_store(ctx, insn, false, MO_UB);
4445 return;
4446 case 0x19:
4447 trans_store(ctx, insn, false, MO_TEUW);
4448 return;
4449 case 0x1A:
4450 trans_store(ctx, insn, false, MO_TEUL);
4451 return;
4452 case 0x1B:
4453 trans_store(ctx, insn, true, MO_TEUL);
4454 return;
4455 case 0x1E:
4456 trans_fstore_mod(ctx, insn);
4457 return;
4458 case 0x1F:
4459 trans_store_w(ctx, insn);
4460 return;
4462 case 0x24:
4463 trans_cmpiclr(ctx, insn);
4464 return;
4465 case 0x25:
4466 trans_subi(ctx, insn);
4467 return;
4468 case 0x2C:
4469 case 0x2D:
4470 trans_addi(ctx, insn);
4471 return;
4472 case 0x2E:
4473 translate_table(ctx, insn, table_fp_fused);
4474 return;
4476 case 0x34:
4477 translate_table(ctx, insn, table_sh_ex);
4478 return;
4479 case 0x35:
4480 translate_table(ctx, insn, table_depw);
4481 return;
4482 case 0x38:
4483 trans_be(ctx, insn, false);
4484 return;
4485 case 0x39:
4486 trans_be(ctx, insn, true);
4487 return;
4488 case 0x3A:
4489 translate_table(ctx, insn, table_branch);
4490 return;
4492 case 0x04: /* spopn */
4493 case 0x05: /* diag */
4494 case 0x0F: /* product specific */
4495 break;
4497 case 0x07: /* unassigned */
4498 case 0x15: /* unassigned */
4499 case 0x1D: /* unassigned */
4500 case 0x37: /* unassigned */
4501 break;
4502 case 0x3F:
4503 #ifndef CONFIG_USER_ONLY
4504 /* Unassigned, but use as system-halt. */
4505 if (insn == 0xfffdead0) {
4506 gen_hlt(ctx, 0); /* halt system */
4507 return;
4509 if (insn == 0xfffdead1) {
4510 gen_hlt(ctx, 1); /* reset system */
4511 return;
4513 #endif
4514 break;
4515 default:
4516 break;
4518 gen_illegal(ctx);
4521 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4523 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4524 int bound;
4526 ctx->cs = cs;
4527 ctx->tb_flags = ctx->base.tb->flags;
4529 #ifdef CONFIG_USER_ONLY
4530 ctx->privilege = MMU_USER_IDX;
4531 ctx->mmu_idx = MMU_USER_IDX;
4532 ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4533 ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
4534 #else
4535 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4536 ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4538 /* Recover the IAOQ values from the GVA + PRIV. */
4539 uint64_t cs_base = ctx->base.tb->cs_base;
4540 uint64_t iasq_f = cs_base & ~0xffffffffull;
4541 int32_t diff = cs_base;
4543 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4544 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4545 #endif
4546 ctx->iaoq_n = -1;
4547 ctx->iaoq_n_var = NULL;
4549 /* Bound the number of instructions by those left on the page. */
4550 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4551 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4553 ctx->ntempr = 0;
4554 ctx->ntempl = 0;
4555 memset(ctx->tempr, 0, sizeof(ctx->tempr));
4556 memset(ctx->templ, 0, sizeof(ctx->templ));
4559 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4561 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4563 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4564 ctx->null_cond = cond_make_f();
4565 ctx->psw_n_nonzero = false;
4566 if (ctx->tb_flags & PSW_N) {
4567 ctx->null_cond.c = TCG_COND_ALWAYS;
4568 ctx->psw_n_nonzero = true;
4570 ctx->null_lab = NULL;
4573 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4575 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4577 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4580 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
4581 const CPUBreakpoint *bp)
4583 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4585 gen_excp(ctx, EXCP_DEBUG);
4586 ctx->base.pc_next += 4;
4587 return true;
4590 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4592 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4593 CPUHPPAState *env = cs->env_ptr;
4594 DisasJumpType ret;
4595 int i, n;
4597 /* Execute one insn. */
4598 #ifdef CONFIG_USER_ONLY
4599 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4600 do_page_zero(ctx);
4601 ret = ctx->base.is_jmp;
4602 assert(ret != DISAS_NEXT);
4603 } else
4604 #endif
4606 /* Always fetch the insn, even if nullified, so that we check
4607 the page permissions for execute. */
4608 uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
4610 /* Set up the IA queue for the next insn.
4611 This will be overwritten by a branch. */
4612 if (ctx->iaoq_b == -1) {
4613 ctx->iaoq_n = -1;
4614 ctx->iaoq_n_var = get_temp(ctx);
4615 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4616 } else {
4617 ctx->iaoq_n = ctx->iaoq_b + 4;
4618 ctx->iaoq_n_var = NULL;
4621 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4622 ctx->null_cond.c = TCG_COND_NEVER;
4623 ret = DISAS_NEXT;
4624 } else {
4625 ctx->insn = insn;
4626 translate_one(ctx, insn);
4627 ret = ctx->base.is_jmp;
4628 assert(ctx->null_lab == NULL);
4632 /* Free any temporaries allocated. */
4633 for (i = 0, n = ctx->ntempr; i < n; ++i) {
4634 tcg_temp_free(ctx->tempr[i]);
4635 ctx->tempr[i] = NULL;
4637 for (i = 0, n = ctx->ntempl; i < n; ++i) {
4638 tcg_temp_free_tl(ctx->templ[i]);
4639 ctx->templ[i] = NULL;
4641 ctx->ntempr = 0;
4642 ctx->ntempl = 0;
4644 /* Advance the insn queue. Note that this check also detects
4645 a priority change within the instruction queue. */
4646 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4647 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4648 && use_goto_tb(ctx, ctx->iaoq_b)
4649 && (ctx->null_cond.c == TCG_COND_NEVER
4650 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4651 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4652 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4653 ctx->base.is_jmp = ret = DISAS_NORETURN;
4654 } else {
4655 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4658 ctx->iaoq_f = ctx->iaoq_b;
4659 ctx->iaoq_b = ctx->iaoq_n;
4660 ctx->base.pc_next += 4;
4662 if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
4663 return;
4665 if (ctx->iaoq_f == -1) {
4666 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4667 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4668 #ifndef CONFIG_USER_ONLY
4669 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4670 #endif
4671 nullify_save(ctx);
4672 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
4673 } else if (ctx->iaoq_b == -1) {
4674 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4678 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4680 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4681 DisasJumpType is_jmp = ctx->base.is_jmp;
4683 switch (is_jmp) {
4684 case DISAS_NORETURN:
4685 break;
4686 case DISAS_TOO_MANY:
4687 case DISAS_IAQ_N_STALE:
4688 case DISAS_IAQ_N_STALE_EXIT:
4689 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4690 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4691 nullify_save(ctx);
4692 /* FALLTHRU */
4693 case DISAS_IAQ_N_UPDATED:
4694 if (ctx->base.singlestep_enabled) {
4695 gen_excp_1(EXCP_DEBUG);
4696 } else if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
4697 tcg_gen_exit_tb(NULL, 0);
4698 } else {
4699 tcg_gen_lookup_and_goto_ptr();
4701 break;
4702 default:
4703 g_assert_not_reached();
4707 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4709 target_ulong pc = dcbase->pc_first;
4711 #ifdef CONFIG_USER_ONLY
4712 switch (pc) {
4713 case 0x00:
4714 qemu_log("IN:\n0x00000000: (null)\n");
4715 return;
4716 case 0xb0:
4717 qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
4718 return;
4719 case 0xe0:
4720 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
4721 return;
4722 case 0x100:
4723 qemu_log("IN:\n0x00000100: syscall\n");
4724 return;
4726 #endif
4728 qemu_log("IN: %s\n", lookup_symbol(pc));
4729 log_target_disas(cs, pc, dcbase->tb->size);
4732 static const TranslatorOps hppa_tr_ops = {
4733 .init_disas_context = hppa_tr_init_disas_context,
4734 .tb_start = hppa_tr_tb_start,
4735 .insn_start = hppa_tr_insn_start,
4736 .breakpoint_check = hppa_tr_breakpoint_check,
4737 .translate_insn = hppa_tr_translate_insn,
4738 .tb_stop = hppa_tr_tb_stop,
4739 .disas_log = hppa_tr_disas_log,
4742 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4745 DisasContext ctx;
4746 translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
4749 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4750 target_ulong *data)
4752 env->iaoq_f = data[0];
4753 if (data[1] != (target_ureg)-1) {
4754 env->iaoq_b = data[1];
4756 /* Since we were executing the instruction at IAOQ_F, and took some
4757 sort of action that provoked the cpu_restore_state, we can infer
4758 that the instruction was not nullified. */
4759 env->psw_n = 0;