2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
33 /* Since we have a distinction between register size and address size,
34 we need to redefine all of these. */
38 #undef tcg_global_reg_new
39 #undef tcg_global_mem_new
40 #undef tcg_temp_local_new
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #define tcg_temp_free_tl tcg_temp_free_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
50 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
53 #define TCGv_tl TCGv_i32
54 #define tcg_temp_new_tl tcg_temp_new_i32
55 #define tcg_temp_free_tl tcg_temp_free_i32
56 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
59 #if TARGET_REGISTER_BITS == 64
60 #define TCGv_reg TCGv_i64
62 #define tcg_temp_new tcg_temp_new_i64
63 #define tcg_global_reg_new tcg_global_reg_new_i64
64 #define tcg_global_mem_new tcg_global_mem_new_i64
65 #define tcg_temp_local_new tcg_temp_local_new_i64
66 #define tcg_temp_free tcg_temp_free_i64
68 #define tcg_gen_movi_reg tcg_gen_movi_i64
69 #define tcg_gen_mov_reg tcg_gen_mov_i64
70 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
71 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
72 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
73 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
74 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
75 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
76 #define tcg_gen_ld_reg tcg_gen_ld_i64
77 #define tcg_gen_st8_reg tcg_gen_st8_i64
78 #define tcg_gen_st16_reg tcg_gen_st16_i64
79 #define tcg_gen_st32_reg tcg_gen_st32_i64
80 #define tcg_gen_st_reg tcg_gen_st_i64
81 #define tcg_gen_add_reg tcg_gen_add_i64
82 #define tcg_gen_addi_reg tcg_gen_addi_i64
83 #define tcg_gen_sub_reg tcg_gen_sub_i64
84 #define tcg_gen_neg_reg tcg_gen_neg_i64
85 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
86 #define tcg_gen_subi_reg tcg_gen_subi_i64
87 #define tcg_gen_and_reg tcg_gen_and_i64
88 #define tcg_gen_andi_reg tcg_gen_andi_i64
89 #define tcg_gen_or_reg tcg_gen_or_i64
90 #define tcg_gen_ori_reg tcg_gen_ori_i64
91 #define tcg_gen_xor_reg tcg_gen_xor_i64
92 #define tcg_gen_xori_reg tcg_gen_xori_i64
93 #define tcg_gen_not_reg tcg_gen_not_i64
94 #define tcg_gen_shl_reg tcg_gen_shl_i64
95 #define tcg_gen_shli_reg tcg_gen_shli_i64
96 #define tcg_gen_shr_reg tcg_gen_shr_i64
97 #define tcg_gen_shri_reg tcg_gen_shri_i64
98 #define tcg_gen_sar_reg tcg_gen_sar_i64
99 #define tcg_gen_sari_reg tcg_gen_sari_i64
100 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
101 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
102 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
103 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
104 #define tcg_gen_mul_reg tcg_gen_mul_i64
105 #define tcg_gen_muli_reg tcg_gen_muli_i64
106 #define tcg_gen_div_reg tcg_gen_div_i64
107 #define tcg_gen_rem_reg tcg_gen_rem_i64
108 #define tcg_gen_divu_reg tcg_gen_divu_i64
109 #define tcg_gen_remu_reg tcg_gen_remu_i64
110 #define tcg_gen_discard_reg tcg_gen_discard_i64
111 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
112 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
113 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
114 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
115 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
116 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
117 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
118 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
119 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
120 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
121 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
122 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
123 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
124 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
125 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
126 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
127 #define tcg_gen_andc_reg tcg_gen_andc_i64
128 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
129 #define tcg_gen_nand_reg tcg_gen_nand_i64
130 #define tcg_gen_nor_reg tcg_gen_nor_i64
131 #define tcg_gen_orc_reg tcg_gen_orc_i64
132 #define tcg_gen_clz_reg tcg_gen_clz_i64
133 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
134 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
135 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
136 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
137 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
138 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
139 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
140 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
141 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
142 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
143 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
144 #define tcg_gen_extract_reg tcg_gen_extract_i64
145 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
146 #define tcg_const_reg tcg_const_i64
147 #define tcg_const_local_reg tcg_const_local_i64
148 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
149 #define tcg_gen_add2_reg tcg_gen_add2_i64
150 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
151 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
152 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
153 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
154 #if UINTPTR_MAX == UINT32_MAX
155 # define tcg_gen_trunc_reg_ptr(p, r) \
156 tcg_gen_trunc_i64_i32(TCGV_PTR_TO_NAT(p), r)
158 # define tcg_gen_trunc_reg_ptr(p, r) \
159 tcg_gen_mov_i64(TCGV_PTR_TO_NAT(p), r)
162 #define TCGv_reg TCGv_i32
163 #define tcg_temp_new tcg_temp_new_i32
164 #define tcg_global_reg_new tcg_global_reg_new_i32
165 #define tcg_global_mem_new tcg_global_mem_new_i32
166 #define tcg_temp_local_new tcg_temp_local_new_i32
167 #define tcg_temp_free tcg_temp_free_i32
169 #define tcg_gen_movi_reg tcg_gen_movi_i32
170 #define tcg_gen_mov_reg tcg_gen_mov_i32
171 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
172 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
173 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
174 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
175 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
176 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
177 #define tcg_gen_ld_reg tcg_gen_ld_i32
178 #define tcg_gen_st8_reg tcg_gen_st8_i32
179 #define tcg_gen_st16_reg tcg_gen_st16_i32
180 #define tcg_gen_st32_reg tcg_gen_st32_i32
181 #define tcg_gen_st_reg tcg_gen_st_i32
182 #define tcg_gen_add_reg tcg_gen_add_i32
183 #define tcg_gen_addi_reg tcg_gen_addi_i32
184 #define tcg_gen_sub_reg tcg_gen_sub_i32
185 #define tcg_gen_neg_reg tcg_gen_neg_i32
186 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
187 #define tcg_gen_subi_reg tcg_gen_subi_i32
188 #define tcg_gen_and_reg tcg_gen_and_i32
189 #define tcg_gen_andi_reg tcg_gen_andi_i32
190 #define tcg_gen_or_reg tcg_gen_or_i32
191 #define tcg_gen_ori_reg tcg_gen_ori_i32
192 #define tcg_gen_xor_reg tcg_gen_xor_i32
193 #define tcg_gen_xori_reg tcg_gen_xori_i32
194 #define tcg_gen_not_reg tcg_gen_not_i32
195 #define tcg_gen_shl_reg tcg_gen_shl_i32
196 #define tcg_gen_shli_reg tcg_gen_shli_i32
197 #define tcg_gen_shr_reg tcg_gen_shr_i32
198 #define tcg_gen_shri_reg tcg_gen_shri_i32
199 #define tcg_gen_sar_reg tcg_gen_sar_i32
200 #define tcg_gen_sari_reg tcg_gen_sari_i32
201 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
202 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
203 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
204 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
205 #define tcg_gen_mul_reg tcg_gen_mul_i32
206 #define tcg_gen_muli_reg tcg_gen_muli_i32
207 #define tcg_gen_div_reg tcg_gen_div_i32
208 #define tcg_gen_rem_reg tcg_gen_rem_i32
209 #define tcg_gen_divu_reg tcg_gen_divu_i32
210 #define tcg_gen_remu_reg tcg_gen_remu_i32
211 #define tcg_gen_discard_reg tcg_gen_discard_i32
212 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
213 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
214 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
215 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
216 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
217 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
218 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
219 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
220 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
221 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
222 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
223 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
224 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
225 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
226 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
227 #define tcg_gen_andc_reg tcg_gen_andc_i32
228 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
229 #define tcg_gen_nand_reg tcg_gen_nand_i32
230 #define tcg_gen_nor_reg tcg_gen_nor_i32
231 #define tcg_gen_orc_reg tcg_gen_orc_i32
232 #define tcg_gen_clz_reg tcg_gen_clz_i32
233 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
234 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
235 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
236 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
237 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
238 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
239 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
240 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
241 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
242 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
243 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
244 #define tcg_gen_extract_reg tcg_gen_extract_i32
245 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
246 #define tcg_const_reg tcg_const_i32
247 #define tcg_const_local_reg tcg_const_local_i32
248 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
249 #define tcg_gen_add2_reg tcg_gen_add2_i32
250 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
251 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
252 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
253 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
254 #if UINTPTR_MAX == UINT32_MAX
255 # define tcg_gen_trunc_reg_ptr(p, r) \
256 tcg_gen_mov_i32(TCGV_PTR_TO_NAT(p), r)
258 # define tcg_gen_trunc_reg_ptr(p, r) \
259 tcg_gen_extu_i32_i64(TCGV_PTR_TO_NAT(p), r)
261 #endif /* TARGET_REGISTER_BITS */
263 typedef struct DisasCond
{
270 typedef struct DisasContext
{
271 DisasContextBase base
;
293 /* Target-specific return values from translate_one, indicating the
294 state of the TB. Note that DISAS_NEXT indicates that we are not
297 /* We are not using a goto_tb (for whatever reason), but have updated
298 the iaq (for whatever reason), so don't do it again on exit. */
299 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
301 /* We are exiting the TB, but have neither emitted a goto_tb, nor
302 updated the iaq for the next instruction to be executed. */
303 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
305 /* Similarly, but we want to return to the main loop immediately
306 to recognize unmasked interrupts. */
307 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
309 typedef struct DisasInsn
{
311 DisasJumpType (*trans
)(DisasContext
*ctx
, uint32_t insn
,
312 const struct DisasInsn
*f
);
314 void (*ttt
)(TCGv_reg
, TCGv_reg
, TCGv_reg
);
315 void (*weww
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
);
316 void (*dedd
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
);
317 void (*wew
)(TCGv_i32
, TCGv_env
, TCGv_i32
);
318 void (*ded
)(TCGv_i64
, TCGv_env
, TCGv_i64
);
319 void (*wed
)(TCGv_i32
, TCGv_env
, TCGv_i64
);
320 void (*dew
)(TCGv_i64
, TCGv_env
, TCGv_i32
);
324 /* global register indexes */
325 static TCGv_reg cpu_gr
[32];
326 static TCGv_i64 cpu_sr
[4];
327 static TCGv_i64 cpu_srH
;
328 static TCGv_reg cpu_iaoq_f
;
329 static TCGv_reg cpu_iaoq_b
;
330 static TCGv_i64 cpu_iasq_f
;
331 static TCGv_i64 cpu_iasq_b
;
332 static TCGv_reg cpu_sar
;
333 static TCGv_reg cpu_psw_n
;
334 static TCGv_reg cpu_psw_v
;
335 static TCGv_reg cpu_psw_cb
;
336 static TCGv_reg cpu_psw_cb_msb
;
338 #include "exec/gen-icount.h"
340 void hppa_translate_init(void)
342 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
344 typedef struct { TCGv_reg
*var
; const char *name
; int ofs
; } GlobalVar
;
345 static const GlobalVar vars
[] = {
346 { &cpu_sar
, "sar", offsetof(CPUHPPAState
, cr
[CR_SAR
]) },
357 /* Use the symbolic register names that match the disassembler. */
358 static const char gr_names
[32][4] = {
359 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
360 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
361 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
362 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
364 /* SR[4-7] are not global registers so that we can index them. */
365 static const char sr_names
[5][4] = {
366 "sr0", "sr1", "sr2", "sr3", "srH"
372 for (i
= 1; i
< 32; i
++) {
373 cpu_gr
[i
] = tcg_global_mem_new(cpu_env
,
374 offsetof(CPUHPPAState
, gr
[i
]),
377 for (i
= 0; i
< 4; i
++) {
378 cpu_sr
[i
] = tcg_global_mem_new_i64(cpu_env
,
379 offsetof(CPUHPPAState
, sr
[i
]),
382 cpu_srH
= tcg_global_mem_new_i64(cpu_env
,
383 offsetof(CPUHPPAState
, sr
[4]),
386 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
387 const GlobalVar
*v
= &vars
[i
];
388 *v
->var
= tcg_global_mem_new(cpu_env
, v
->ofs
, v
->name
);
391 cpu_iasq_f
= tcg_global_mem_new_i64(cpu_env
,
392 offsetof(CPUHPPAState
, iasq_f
),
394 cpu_iasq_b
= tcg_global_mem_new_i64(cpu_env
,
395 offsetof(CPUHPPAState
, iasq_b
),
399 static DisasCond
cond_make_f(void)
408 static DisasCond
cond_make_n(void)
419 static DisasCond
cond_make_0(TCGCond c
, TCGv_reg a0
)
421 DisasCond r
= { .c
= c
, .a1
= NULL
, .a1_is_0
= true };
423 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
424 r
.a0
= tcg_temp_new();
425 tcg_gen_mov_reg(r
.a0
, a0
);
430 static DisasCond
cond_make(TCGCond c
, TCGv_reg a0
, TCGv_reg a1
)
432 DisasCond r
= { .c
= c
};
434 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
435 r
.a0
= tcg_temp_new();
436 tcg_gen_mov_reg(r
.a0
, a0
);
437 r
.a1
= tcg_temp_new();
438 tcg_gen_mov_reg(r
.a1
, a1
);
443 static void cond_prep(DisasCond
*cond
)
446 cond
->a1_is_0
= false;
447 cond
->a1
= tcg_const_reg(0);
451 static void cond_free(DisasCond
*cond
)
455 if (!cond
->a0_is_n
) {
456 tcg_temp_free(cond
->a0
);
458 if (!cond
->a1_is_0
) {
459 tcg_temp_free(cond
->a1
);
461 cond
->a0_is_n
= false;
462 cond
->a1_is_0
= false;
466 case TCG_COND_ALWAYS
:
467 cond
->c
= TCG_COND_NEVER
;
474 static TCGv_reg
get_temp(DisasContext
*ctx
)
476 unsigned i
= ctx
->ntempr
++;
477 g_assert(i
< ARRAY_SIZE(ctx
->tempr
));
478 return ctx
->tempr
[i
] = tcg_temp_new();
481 #ifndef CONFIG_USER_ONLY
482 static TCGv_tl
get_temp_tl(DisasContext
*ctx
)
484 unsigned i
= ctx
->ntempl
++;
485 g_assert(i
< ARRAY_SIZE(ctx
->templ
));
486 return ctx
->templ
[i
] = tcg_temp_new_tl();
490 static TCGv_reg
load_const(DisasContext
*ctx
, target_sreg v
)
492 TCGv_reg t
= get_temp(ctx
);
493 tcg_gen_movi_reg(t
, v
);
497 static TCGv_reg
load_gpr(DisasContext
*ctx
, unsigned reg
)
500 TCGv_reg t
= get_temp(ctx
);
501 tcg_gen_movi_reg(t
, 0);
508 static TCGv_reg
dest_gpr(DisasContext
*ctx
, unsigned reg
)
510 if (reg
== 0 || ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
511 return get_temp(ctx
);
517 static void save_or_nullify(DisasContext
*ctx
, TCGv_reg dest
, TCGv_reg t
)
519 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
520 cond_prep(&ctx
->null_cond
);
521 tcg_gen_movcond_reg(ctx
->null_cond
.c
, dest
, ctx
->null_cond
.a0
,
522 ctx
->null_cond
.a1
, dest
, t
);
524 tcg_gen_mov_reg(dest
, t
);
528 static void save_gpr(DisasContext
*ctx
, unsigned reg
, TCGv_reg t
)
531 save_or_nullify(ctx
, cpu_gr
[reg
], t
);
535 #ifdef HOST_WORDS_BIGENDIAN
543 static TCGv_i32
load_frw_i32(unsigned rt
)
545 TCGv_i32 ret
= tcg_temp_new_i32();
546 tcg_gen_ld_i32(ret
, cpu_env
,
547 offsetof(CPUHPPAState
, fr
[rt
& 31])
548 + (rt
& 32 ? LO_OFS
: HI_OFS
));
552 static TCGv_i32
load_frw0_i32(unsigned rt
)
555 return tcg_const_i32(0);
557 return load_frw_i32(rt
);
561 static TCGv_i64
load_frw0_i64(unsigned rt
)
564 return tcg_const_i64(0);
566 TCGv_i64 ret
= tcg_temp_new_i64();
567 tcg_gen_ld32u_i64(ret
, cpu_env
,
568 offsetof(CPUHPPAState
, fr
[rt
& 31])
569 + (rt
& 32 ? LO_OFS
: HI_OFS
));
574 static void save_frw_i32(unsigned rt
, TCGv_i32 val
)
576 tcg_gen_st_i32(val
, cpu_env
,
577 offsetof(CPUHPPAState
, fr
[rt
& 31])
578 + (rt
& 32 ? LO_OFS
: HI_OFS
));
584 static TCGv_i64
load_frd(unsigned rt
)
586 TCGv_i64 ret
= tcg_temp_new_i64();
587 tcg_gen_ld_i64(ret
, cpu_env
, offsetof(CPUHPPAState
, fr
[rt
]));
591 static TCGv_i64
load_frd0(unsigned rt
)
594 return tcg_const_i64(0);
600 static void save_frd(unsigned rt
, TCGv_i64 val
)
602 tcg_gen_st_i64(val
, cpu_env
, offsetof(CPUHPPAState
, fr
[rt
]));
605 static void load_spr(DisasContext
*ctx
, TCGv_i64 dest
, unsigned reg
)
607 #ifdef CONFIG_USER_ONLY
608 tcg_gen_movi_i64(dest
, 0);
611 tcg_gen_mov_i64(dest
, cpu_sr
[reg
]);
612 } else if (ctx
->tb_flags
& TB_FLAG_SR_SAME
) {
613 tcg_gen_mov_i64(dest
, cpu_srH
);
615 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUHPPAState
, sr
[reg
]));
620 /* Skip over the implementation of an insn that has been nullified.
621 Use this when the insn is too complex for a conditional move. */
622 static void nullify_over(DisasContext
*ctx
)
624 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
625 /* The always condition should have been handled in the main loop. */
626 assert(ctx
->null_cond
.c
!= TCG_COND_ALWAYS
);
628 ctx
->null_lab
= gen_new_label();
629 cond_prep(&ctx
->null_cond
);
631 /* If we're using PSW[N], copy it to a temp because... */
632 if (ctx
->null_cond
.a0_is_n
) {
633 ctx
->null_cond
.a0_is_n
= false;
634 ctx
->null_cond
.a0
= tcg_temp_new();
635 tcg_gen_mov_reg(ctx
->null_cond
.a0
, cpu_psw_n
);
637 /* ... we clear it before branching over the implementation,
638 so that (1) it's clear after nullifying this insn and
639 (2) if this insn nullifies the next, PSW[N] is valid. */
640 if (ctx
->psw_n_nonzero
) {
641 ctx
->psw_n_nonzero
= false;
642 tcg_gen_movi_reg(cpu_psw_n
, 0);
645 tcg_gen_brcond_reg(ctx
->null_cond
.c
, ctx
->null_cond
.a0
,
646 ctx
->null_cond
.a1
, ctx
->null_lab
);
647 cond_free(&ctx
->null_cond
);
651 /* Save the current nullification state to PSW[N]. */
652 static void nullify_save(DisasContext
*ctx
)
654 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
655 if (ctx
->psw_n_nonzero
) {
656 tcg_gen_movi_reg(cpu_psw_n
, 0);
660 if (!ctx
->null_cond
.a0_is_n
) {
661 cond_prep(&ctx
->null_cond
);
662 tcg_gen_setcond_reg(ctx
->null_cond
.c
, cpu_psw_n
,
663 ctx
->null_cond
.a0
, ctx
->null_cond
.a1
);
664 ctx
->psw_n_nonzero
= true;
666 cond_free(&ctx
->null_cond
);
669 /* Set a PSW[N] to X. The intention is that this is used immediately
670 before a goto_tb/exit_tb, so that there is no fallthru path to other
671 code within the TB. Therefore we do not update psw_n_nonzero. */
672 static void nullify_set(DisasContext
*ctx
, bool x
)
674 if (ctx
->psw_n_nonzero
|| x
) {
675 tcg_gen_movi_reg(cpu_psw_n
, x
);
679 /* Mark the end of an instruction that may have been nullified.
680 This is the pair to nullify_over. */
681 static DisasJumpType
nullify_end(DisasContext
*ctx
, DisasJumpType status
)
683 TCGLabel
*null_lab
= ctx
->null_lab
;
685 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
686 For UPDATED, we cannot update on the nullified path. */
687 assert(status
!= DISAS_IAQ_N_UPDATED
);
689 if (likely(null_lab
== NULL
)) {
690 /* The current insn wasn't conditional or handled the condition
691 applied to it without a branch, so the (new) setting of
692 NULL_COND can be applied directly to the next insn. */
695 ctx
->null_lab
= NULL
;
697 if (likely(ctx
->null_cond
.c
== TCG_COND_NEVER
)) {
698 /* The next instruction will be unconditional,
699 and NULL_COND already reflects that. */
700 gen_set_label(null_lab
);
702 /* The insn that we just executed is itself nullifying the next
703 instruction. Store the condition in the PSW[N] global.
704 We asserted PSW[N] = 0 in nullify_over, so that after the
705 label we have the proper value in place. */
707 gen_set_label(null_lab
);
708 ctx
->null_cond
= cond_make_n();
710 if (status
== DISAS_NORETURN
) {
716 static void copy_iaoq_entry(TCGv_reg dest
, target_ureg ival
, TCGv_reg vval
)
718 if (unlikely(ival
== -1)) {
719 tcg_gen_mov_reg(dest
, vval
);
721 tcg_gen_movi_reg(dest
, ival
);
725 static inline target_ureg
iaoq_dest(DisasContext
*ctx
, target_sreg disp
)
727 return ctx
->iaoq_f
+ disp
+ 8;
730 static void gen_excp_1(int exception
)
732 TCGv_i32 t
= tcg_const_i32(exception
);
733 gen_helper_excp(cpu_env
, t
);
734 tcg_temp_free_i32(t
);
737 static DisasJumpType
gen_excp(DisasContext
*ctx
, int exception
)
739 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
740 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
742 gen_excp_1(exception
);
743 return DISAS_NORETURN
;
746 static DisasJumpType
gen_excp_iir(DisasContext
*ctx
, int exc
)
748 TCGv_reg tmp
= tcg_const_reg(ctx
->insn
);
749 tcg_gen_st_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, cr
[CR_IIR
]));
751 return gen_excp(ctx
, exc
);
754 static DisasJumpType
gen_illegal(DisasContext
*ctx
)
757 return nullify_end(ctx
, gen_excp_iir(ctx
, EXCP_ILL
));
760 #define CHECK_MOST_PRIVILEGED(EXCP) \
762 if (ctx->privilege != 0) { \
764 return nullify_end(ctx, gen_excp_iir(ctx, EXCP)); \
768 static bool use_goto_tb(DisasContext
*ctx
, target_ureg dest
)
770 /* Suppress goto_tb in the case of single-steping and IO. */
771 if ((tb_cflags(ctx
->base
.tb
) & CF_LAST_IO
) || ctx
->base
.singlestep_enabled
) {
777 /* If the next insn is to be nullified, and it's on the same page,
778 and we're not attempting to set a breakpoint on it, then we can
779 totally skip the nullified insn. This avoids creating and
780 executing a TB that merely branches to the next TB. */
781 static bool use_nullify_skip(DisasContext
*ctx
)
783 return (((ctx
->iaoq_b
^ ctx
->iaoq_f
) & TARGET_PAGE_MASK
) == 0
784 && !cpu_breakpoint_test(ctx
->cs
, ctx
->iaoq_b
, BP_ANY
));
787 static void gen_goto_tb(DisasContext
*ctx
, int which
,
788 target_ureg f
, target_ureg b
)
790 if (f
!= -1 && b
!= -1 && use_goto_tb(ctx
, f
)) {
791 tcg_gen_goto_tb(which
);
792 tcg_gen_movi_reg(cpu_iaoq_f
, f
);
793 tcg_gen_movi_reg(cpu_iaoq_b
, b
);
794 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
+ which
);
796 copy_iaoq_entry(cpu_iaoq_f
, f
, cpu_iaoq_b
);
797 copy_iaoq_entry(cpu_iaoq_b
, b
, ctx
->iaoq_n_var
);
798 if (ctx
->base
.singlestep_enabled
) {
799 gen_excp_1(EXCP_DEBUG
);
801 tcg_gen_lookup_and_goto_ptr();
806 /* PA has a habit of taking the LSB of a field and using that as the sign,
807 with the rest of the field becoming the least significant bits. */
808 static target_sreg
low_sextract(uint32_t val
, int pos
, int len
)
810 target_ureg x
= -(target_ureg
)extract32(val
, pos
, 1);
811 x
= (x
<< (len
- 1)) | extract32(val
, pos
+ 1, len
- 1);
815 static unsigned assemble_rt64(uint32_t insn
)
817 unsigned r1
= extract32(insn
, 6, 1);
818 unsigned r0
= extract32(insn
, 0, 5);
822 static unsigned assemble_ra64(uint32_t insn
)
824 unsigned r1
= extract32(insn
, 7, 1);
825 unsigned r0
= extract32(insn
, 21, 5);
829 static unsigned assemble_rb64(uint32_t insn
)
831 unsigned r1
= extract32(insn
, 12, 1);
832 unsigned r0
= extract32(insn
, 16, 5);
836 static unsigned assemble_rc64(uint32_t insn
)
838 unsigned r2
= extract32(insn
, 8, 1);
839 unsigned r1
= extract32(insn
, 13, 3);
840 unsigned r0
= extract32(insn
, 9, 2);
841 return r2
* 32 + r1
* 4 + r0
;
844 static unsigned assemble_sr3(uint32_t insn
)
846 unsigned s2
= extract32(insn
, 13, 1);
847 unsigned s0
= extract32(insn
, 14, 2);
851 static target_sreg
assemble_12(uint32_t insn
)
853 target_ureg x
= -(target_ureg
)(insn
& 1);
854 x
= (x
<< 1) | extract32(insn
, 2, 1);
855 x
= (x
<< 10) | extract32(insn
, 3, 10);
859 static target_sreg
assemble_16(uint32_t insn
)
861 /* Take the name from PA2.0, which produces a 16-bit number
862 only with wide mode; otherwise a 14-bit number. Since we don't
863 implement wide mode, this is always the 14-bit number. */
864 return low_sextract(insn
, 0, 14);
867 static target_sreg
assemble_16a(uint32_t insn
)
869 /* Take the name from PA2.0, which produces a 14-bit shifted number
870 only with wide mode; otherwise a 12-bit shifted number. Since we
871 don't implement wide mode, this is always the 12-bit number. */
872 target_ureg x
= -(target_ureg
)(insn
& 1);
873 x
= (x
<< 11) | extract32(insn
, 2, 11);
877 static target_sreg
assemble_17(uint32_t insn
)
879 target_ureg x
= -(target_ureg
)(insn
& 1);
880 x
= (x
<< 5) | extract32(insn
, 16, 5);
881 x
= (x
<< 1) | extract32(insn
, 2, 1);
882 x
= (x
<< 10) | extract32(insn
, 3, 10);
886 static target_sreg
assemble_21(uint32_t insn
)
888 target_ureg x
= -(target_ureg
)(insn
& 1);
889 x
= (x
<< 11) | extract32(insn
, 1, 11);
890 x
= (x
<< 2) | extract32(insn
, 14, 2);
891 x
= (x
<< 5) | extract32(insn
, 16, 5);
892 x
= (x
<< 2) | extract32(insn
, 12, 2);
896 static target_sreg
assemble_22(uint32_t insn
)
898 target_ureg x
= -(target_ureg
)(insn
& 1);
899 x
= (x
<< 10) | extract32(insn
, 16, 10);
900 x
= (x
<< 1) | extract32(insn
, 2, 1);
901 x
= (x
<< 10) | extract32(insn
, 3, 10);
905 /* The parisc documentation describes only the general interpretation of
906 the conditions, without describing their exact implementation. The
907 interpretations do not stand up well when considering ADD,C and SUB,B.
908 However, considering the Addition, Subtraction and Logical conditions
909 as a whole it would appear that these relations are similar to what
910 a traditional NZCV set of flags would produce. */
912 static DisasCond
do_cond(unsigned cf
, TCGv_reg res
,
913 TCGv_reg cb_msb
, TCGv_reg sv
)
919 case 0: /* Never / TR */
920 cond
= cond_make_f();
922 case 1: /* = / <> (Z / !Z) */
923 cond
= cond_make_0(TCG_COND_EQ
, res
);
925 case 2: /* < / >= (N / !N) */
926 cond
= cond_make_0(TCG_COND_LT
, res
);
928 case 3: /* <= / > (N | Z / !N & !Z) */
929 cond
= cond_make_0(TCG_COND_LE
, res
);
931 case 4: /* NUV / UV (!C / C) */
932 cond
= cond_make_0(TCG_COND_EQ
, cb_msb
);
934 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
935 tmp
= tcg_temp_new();
936 tcg_gen_neg_reg(tmp
, cb_msb
);
937 tcg_gen_and_reg(tmp
, tmp
, res
);
938 cond
= cond_make_0(TCG_COND_EQ
, tmp
);
941 case 6: /* SV / NSV (V / !V) */
942 cond
= cond_make_0(TCG_COND_LT
, sv
);
944 case 7: /* OD / EV */
945 tmp
= tcg_temp_new();
946 tcg_gen_andi_reg(tmp
, res
, 1);
947 cond
= cond_make_0(TCG_COND_NE
, tmp
);
951 g_assert_not_reached();
954 cond
.c
= tcg_invert_cond(cond
.c
);
960 /* Similar, but for the special case of subtraction without borrow, we
961 can use the inputs directly. This can allow other computation to be
962 deleted as unused. */
964 static DisasCond
do_sub_cond(unsigned cf
, TCGv_reg res
,
965 TCGv_reg in1
, TCGv_reg in2
, TCGv_reg sv
)
971 cond
= cond_make(TCG_COND_EQ
, in1
, in2
);
974 cond
= cond_make(TCG_COND_LT
, in1
, in2
);
977 cond
= cond_make(TCG_COND_LE
, in1
, in2
);
979 case 4: /* << / >>= */
980 cond
= cond_make(TCG_COND_LTU
, in1
, in2
);
982 case 5: /* <<= / >> */
983 cond
= cond_make(TCG_COND_LEU
, in1
, in2
);
986 return do_cond(cf
, res
, sv
, sv
);
989 cond
.c
= tcg_invert_cond(cond
.c
);
995 /* Similar, but for logicals, where the carry and overflow bits are not
996 computed, and use of them is undefined. */
998 static DisasCond
do_log_cond(unsigned cf
, TCGv_reg res
)
1001 case 4: case 5: case 6:
1005 return do_cond(cf
, res
, res
, res
);
1008 /* Similar, but for shift/extract/deposit conditions. */
1010 static DisasCond
do_sed_cond(unsigned orig
, TCGv_reg res
)
1014 /* Convert the compressed condition codes to standard.
1015 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1016 4-7 are the reverse of 0-3. */
1023 return do_log_cond(c
* 2 + f
, res
);
1026 /* Similar, but for unit conditions. */
1028 static DisasCond
do_unit_cond(unsigned cf
, TCGv_reg res
,
1029 TCGv_reg in1
, TCGv_reg in2
)
1032 TCGv_reg tmp
, cb
= NULL
;
1035 /* Since we want to test lots of carry-out bits all at once, do not
1036 * do our normal thing and compute carry-in of bit B+1 since that
1037 * leaves us with carry bits spread across two words.
1039 cb
= tcg_temp_new();
1040 tmp
= tcg_temp_new();
1041 tcg_gen_or_reg(cb
, in1
, in2
);
1042 tcg_gen_and_reg(tmp
, in1
, in2
);
1043 tcg_gen_andc_reg(cb
, cb
, res
);
1044 tcg_gen_or_reg(cb
, cb
, tmp
);
1049 case 0: /* never / TR */
1050 case 1: /* undefined */
1051 case 5: /* undefined */
1052 cond
= cond_make_f();
1055 case 2: /* SBZ / NBZ */
1056 /* See hasless(v,1) from
1057 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1059 tmp
= tcg_temp_new();
1060 tcg_gen_subi_reg(tmp
, res
, 0x01010101u
);
1061 tcg_gen_andc_reg(tmp
, tmp
, res
);
1062 tcg_gen_andi_reg(tmp
, tmp
, 0x80808080u
);
1063 cond
= cond_make_0(TCG_COND_NE
, tmp
);
1067 case 3: /* SHZ / NHZ */
1068 tmp
= tcg_temp_new();
1069 tcg_gen_subi_reg(tmp
, res
, 0x00010001u
);
1070 tcg_gen_andc_reg(tmp
, tmp
, res
);
1071 tcg_gen_andi_reg(tmp
, tmp
, 0x80008000u
);
1072 cond
= cond_make_0(TCG_COND_NE
, tmp
);
1076 case 4: /* SDC / NDC */
1077 tcg_gen_andi_reg(cb
, cb
, 0x88888888u
);
1078 cond
= cond_make_0(TCG_COND_NE
, cb
);
1081 case 6: /* SBC / NBC */
1082 tcg_gen_andi_reg(cb
, cb
, 0x80808080u
);
1083 cond
= cond_make_0(TCG_COND_NE
, cb
);
1086 case 7: /* SHC / NHC */
1087 tcg_gen_andi_reg(cb
, cb
, 0x80008000u
);
1088 cond
= cond_make_0(TCG_COND_NE
, cb
);
1092 g_assert_not_reached();
1098 cond
.c
= tcg_invert_cond(cond
.c
);
1104 /* Compute signed overflow for addition. */
1105 static TCGv_reg
do_add_sv(DisasContext
*ctx
, TCGv_reg res
,
1106 TCGv_reg in1
, TCGv_reg in2
)
1108 TCGv_reg sv
= get_temp(ctx
);
1109 TCGv_reg tmp
= tcg_temp_new();
1111 tcg_gen_xor_reg(sv
, res
, in1
);
1112 tcg_gen_xor_reg(tmp
, in1
, in2
);
1113 tcg_gen_andc_reg(sv
, sv
, tmp
);
1119 /* Compute signed overflow for subtraction. */
1120 static TCGv_reg
do_sub_sv(DisasContext
*ctx
, TCGv_reg res
,
1121 TCGv_reg in1
, TCGv_reg in2
)
1123 TCGv_reg sv
= get_temp(ctx
);
1124 TCGv_reg tmp
= tcg_temp_new();
1126 tcg_gen_xor_reg(sv
, res
, in1
);
1127 tcg_gen_xor_reg(tmp
, in1
, in2
);
1128 tcg_gen_and_reg(sv
, sv
, tmp
);
1134 static DisasJumpType
do_add(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1135 TCGv_reg in2
, unsigned shift
, bool is_l
,
1136 bool is_tsv
, bool is_tc
, bool is_c
, unsigned cf
)
1138 TCGv_reg dest
, cb
, cb_msb
, sv
, tmp
;
1139 unsigned c
= cf
>> 1;
1142 dest
= tcg_temp_new();
1147 tmp
= get_temp(ctx
);
1148 tcg_gen_shli_reg(tmp
, in1
, shift
);
1152 if (!is_l
|| c
== 4 || c
== 5) {
1153 TCGv_reg zero
= tcg_const_reg(0);
1154 cb_msb
= get_temp(ctx
);
1155 tcg_gen_add2_reg(dest
, cb_msb
, in1
, zero
, in2
, zero
);
1157 tcg_gen_add2_reg(dest
, cb_msb
, dest
, cb_msb
, cpu_psw_cb_msb
, zero
);
1159 tcg_temp_free(zero
);
1162 tcg_gen_xor_reg(cb
, in1
, in2
);
1163 tcg_gen_xor_reg(cb
, cb
, dest
);
1166 tcg_gen_add_reg(dest
, in1
, in2
);
1168 tcg_gen_add_reg(dest
, dest
, cpu_psw_cb_msb
);
1172 /* Compute signed overflow if required. */
1174 if (is_tsv
|| c
== 6) {
1175 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
1177 /* ??? Need to include overflow from shift. */
1178 gen_helper_tsv(cpu_env
, sv
);
1182 /* Emit any conditional trap before any writeback. */
1183 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
1186 tmp
= tcg_temp_new();
1187 tcg_gen_setcond_reg(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1188 gen_helper_tcond(cpu_env
, tmp
);
1192 /* Write back the result. */
1194 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
1195 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
1197 save_gpr(ctx
, rt
, dest
);
1198 tcg_temp_free(dest
);
1200 /* Install the new nullification. */
1201 cond_free(&ctx
->null_cond
);
1202 ctx
->null_cond
= cond
;
1206 static DisasJumpType
do_sub(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1207 TCGv_reg in2
, bool is_tsv
, bool is_b
,
1208 bool is_tc
, unsigned cf
)
1210 TCGv_reg dest
, sv
, cb
, cb_msb
, zero
, tmp
;
1211 unsigned c
= cf
>> 1;
1214 dest
= tcg_temp_new();
1215 cb
= tcg_temp_new();
1216 cb_msb
= tcg_temp_new();
1218 zero
= tcg_const_reg(0);
1220 /* DEST,C = IN1 + ~IN2 + C. */
1221 tcg_gen_not_reg(cb
, in2
);
1222 tcg_gen_add2_reg(dest
, cb_msb
, in1
, zero
, cpu_psw_cb_msb
, zero
);
1223 tcg_gen_add2_reg(dest
, cb_msb
, dest
, cb_msb
, cb
, zero
);
1224 tcg_gen_xor_reg(cb
, cb
, in1
);
1225 tcg_gen_xor_reg(cb
, cb
, dest
);
1227 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1228 operations by seeding the high word with 1 and subtracting. */
1229 tcg_gen_movi_reg(cb_msb
, 1);
1230 tcg_gen_sub2_reg(dest
, cb_msb
, in1
, cb_msb
, in2
, zero
);
1231 tcg_gen_eqv_reg(cb
, in1
, in2
);
1232 tcg_gen_xor_reg(cb
, cb
, dest
);
1234 tcg_temp_free(zero
);
1236 /* Compute signed overflow if required. */
1238 if (is_tsv
|| c
== 6) {
1239 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
1241 gen_helper_tsv(cpu_env
, sv
);
1245 /* Compute the condition. We cannot use the special case for borrow. */
1247 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
1249 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
1252 /* Emit any conditional trap before any writeback. */
1255 tmp
= tcg_temp_new();
1256 tcg_gen_setcond_reg(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1257 gen_helper_tcond(cpu_env
, tmp
);
1261 /* Write back the result. */
1262 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
1263 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
1264 save_gpr(ctx
, rt
, dest
);
1265 tcg_temp_free(dest
);
1267 /* Install the new nullification. */
1268 cond_free(&ctx
->null_cond
);
1269 ctx
->null_cond
= cond
;
1273 static DisasJumpType
do_cmpclr(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1274 TCGv_reg in2
, unsigned cf
)
1279 dest
= tcg_temp_new();
1280 tcg_gen_sub_reg(dest
, in1
, in2
);
1282 /* Compute signed overflow if required. */
1284 if ((cf
>> 1) == 6) {
1285 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
1288 /* Form the condition for the compare. */
1289 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
1292 tcg_gen_movi_reg(dest
, 0);
1293 save_gpr(ctx
, rt
, dest
);
1294 tcg_temp_free(dest
);
1296 /* Install the new nullification. */
1297 cond_free(&ctx
->null_cond
);
1298 ctx
->null_cond
= cond
;
1302 static DisasJumpType
do_log(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1303 TCGv_reg in2
, unsigned cf
,
1304 void (*fn
)(TCGv_reg
, TCGv_reg
, TCGv_reg
))
1306 TCGv_reg dest
= dest_gpr(ctx
, rt
);
1308 /* Perform the operation, and writeback. */
1310 save_gpr(ctx
, rt
, dest
);
1312 /* Install the new nullification. */
1313 cond_free(&ctx
->null_cond
);
1315 ctx
->null_cond
= do_log_cond(cf
, dest
);
1320 static DisasJumpType
do_unit(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1321 TCGv_reg in2
, unsigned cf
, bool is_tc
,
1322 void (*fn
)(TCGv_reg
, TCGv_reg
, TCGv_reg
))
1328 dest
= dest_gpr(ctx
, rt
);
1330 save_gpr(ctx
, rt
, dest
);
1331 cond_free(&ctx
->null_cond
);
1333 dest
= tcg_temp_new();
1336 cond
= do_unit_cond(cf
, dest
, in1
, in2
);
1339 TCGv_reg tmp
= tcg_temp_new();
1341 tcg_gen_setcond_reg(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1342 gen_helper_tcond(cpu_env
, tmp
);
1345 save_gpr(ctx
, rt
, dest
);
1347 cond_free(&ctx
->null_cond
);
1348 ctx
->null_cond
= cond
;
1353 #ifndef CONFIG_USER_ONLY
1354 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1355 from the top 2 bits of the base register. There are a few system
1356 instructions that have a 3-bit space specifier, for which SR0 is
1357 not special. To handle this, pass ~SP. */
1358 static TCGv_i64
space_select(DisasContext
*ctx
, int sp
, TCGv_reg base
)
1368 spc
= get_temp_tl(ctx
);
1369 load_spr(ctx
, spc
, sp
);
1372 if (ctx
->tb_flags
& TB_FLAG_SR_SAME
) {
1376 ptr
= tcg_temp_new_ptr();
1377 tmp
= tcg_temp_new();
1378 spc
= get_temp_tl(ctx
);
1380 tcg_gen_shri_reg(tmp
, base
, TARGET_REGISTER_BITS
- 5);
1381 tcg_gen_andi_reg(tmp
, tmp
, 030);
1382 tcg_gen_trunc_reg_ptr(ptr
, tmp
);
1385 tcg_gen_add_ptr(ptr
, ptr
, cpu_env
);
1386 tcg_gen_ld_i64(spc
, ptr
, offsetof(CPUHPPAState
, sr
[4]));
1387 tcg_temp_free_ptr(ptr
);
1393 static void form_gva(DisasContext
*ctx
, TCGv_tl
*pgva
, TCGv_reg
*pofs
,
1394 unsigned rb
, unsigned rx
, int scale
, target_sreg disp
,
1395 unsigned sp
, int modify
, bool is_phys
)
1397 TCGv_reg base
= load_gpr(ctx
, rb
);
1400 /* Note that RX is mutually exclusive with DISP. */
1402 ofs
= get_temp(ctx
);
1403 tcg_gen_shli_reg(ofs
, cpu_gr
[rx
], scale
);
1404 tcg_gen_add_reg(ofs
, ofs
, base
);
1405 } else if (disp
|| modify
) {
1406 ofs
= get_temp(ctx
);
1407 tcg_gen_addi_reg(ofs
, base
, disp
);
1413 #ifdef CONFIG_USER_ONLY
1414 *pgva
= (modify
<= 0 ? ofs
: base
);
1416 TCGv_tl addr
= get_temp_tl(ctx
);
1417 tcg_gen_extu_reg_tl(addr
, modify
<= 0 ? ofs
: base
);
1418 if (ctx
->tb_flags
& PSW_W
) {
1419 tcg_gen_andi_tl(addr
, addr
, 0x3fffffffffffffffull
);
1422 tcg_gen_or_tl(addr
, addr
, space_select(ctx
, sp
, base
));
1428 /* Emit a memory load. The modify parameter should be
1429 * < 0 for pre-modify,
1430 * > 0 for post-modify,
1431 * = 0 for no base register update.
1433 static void do_load_32(DisasContext
*ctx
, TCGv_i32 dest
, unsigned rb
,
1434 unsigned rx
, int scale
, target_sreg disp
,
1435 unsigned sp
, int modify
, TCGMemOp mop
)
1440 /* Caller uses nullify_over/nullify_end. */
1441 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1443 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1444 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1445 tcg_gen_qemu_ld_reg(dest
, addr
, ctx
->mmu_idx
, mop
);
1447 save_gpr(ctx
, rb
, ofs
);
1451 static void do_load_64(DisasContext
*ctx
, TCGv_i64 dest
, unsigned rb
,
1452 unsigned rx
, int scale
, target_sreg disp
,
1453 unsigned sp
, int modify
, TCGMemOp mop
)
1458 /* Caller uses nullify_over/nullify_end. */
1459 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1461 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1462 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1463 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mmu_idx
, mop
);
1465 save_gpr(ctx
, rb
, ofs
);
1469 static void do_store_32(DisasContext
*ctx
, TCGv_i32 src
, unsigned rb
,
1470 unsigned rx
, int scale
, target_sreg disp
,
1471 unsigned sp
, int modify
, TCGMemOp mop
)
1476 /* Caller uses nullify_over/nullify_end. */
1477 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1479 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1480 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1481 tcg_gen_qemu_st_i32(src
, addr
, ctx
->mmu_idx
, mop
);
1483 save_gpr(ctx
, rb
, ofs
);
1487 static void do_store_64(DisasContext
*ctx
, TCGv_i64 src
, unsigned rb
,
1488 unsigned rx
, int scale
, target_sreg disp
,
1489 unsigned sp
, int modify
, TCGMemOp mop
)
1494 /* Caller uses nullify_over/nullify_end. */
1495 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1497 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1498 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1499 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mmu_idx
, mop
);
1501 save_gpr(ctx
, rb
, ofs
);
1505 #if TARGET_REGISTER_BITS == 64
1506 #define do_load_reg do_load_64
1507 #define do_store_reg do_store_64
1509 #define do_load_reg do_load_32
1510 #define do_store_reg do_store_32
1513 static DisasJumpType
do_load(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1514 unsigned rx
, int scale
, target_sreg disp
,
1515 unsigned sp
, int modify
, TCGMemOp mop
)
1522 /* No base register update. */
1523 dest
= dest_gpr(ctx
, rt
);
1525 /* Make sure if RT == RB, we see the result of the load. */
1526 dest
= get_temp(ctx
);
1528 do_load_reg(ctx
, dest
, rb
, rx
, scale
, disp
, sp
, modify
, mop
);
1529 save_gpr(ctx
, rt
, dest
);
1531 return nullify_end(ctx
, DISAS_NEXT
);
1534 static DisasJumpType
do_floadw(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1535 unsigned rx
, int scale
, target_sreg disp
,
1536 unsigned sp
, int modify
)
1542 tmp
= tcg_temp_new_i32();
1543 do_load_32(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUL
);
1544 save_frw_i32(rt
, tmp
);
1545 tcg_temp_free_i32(tmp
);
1548 gen_helper_loaded_fr0(cpu_env
);
1551 return nullify_end(ctx
, DISAS_NEXT
);
1554 static DisasJumpType
do_floadd(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1555 unsigned rx
, int scale
, target_sreg disp
,
1556 unsigned sp
, int modify
)
1562 tmp
= tcg_temp_new_i64();
1563 do_load_64(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEQ
);
1565 tcg_temp_free_i64(tmp
);
1568 gen_helper_loaded_fr0(cpu_env
);
1571 return nullify_end(ctx
, DISAS_NEXT
);
1574 static DisasJumpType
do_store(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1575 target_sreg disp
, unsigned sp
,
1576 int modify
, TCGMemOp mop
)
1579 do_store_reg(ctx
, load_gpr(ctx
, rt
), rb
, 0, 0, disp
, sp
, modify
, mop
);
1580 return nullify_end(ctx
, DISAS_NEXT
);
1583 static DisasJumpType
do_fstorew(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1584 unsigned rx
, int scale
, target_sreg disp
,
1585 unsigned sp
, int modify
)
1591 tmp
= load_frw_i32(rt
);
1592 do_store_32(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUL
);
1593 tcg_temp_free_i32(tmp
);
1595 return nullify_end(ctx
, DISAS_NEXT
);
1598 static DisasJumpType
do_fstored(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1599 unsigned rx
, int scale
, target_sreg disp
,
1600 unsigned sp
, int modify
)
1607 do_store_64(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEQ
);
1608 tcg_temp_free_i64(tmp
);
1610 return nullify_end(ctx
, DISAS_NEXT
);
1613 static DisasJumpType
do_fop_wew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1614 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
))
1619 tmp
= load_frw0_i32(ra
);
1621 func(tmp
, cpu_env
, tmp
);
1623 save_frw_i32(rt
, tmp
);
1624 tcg_temp_free_i32(tmp
);
1625 return nullify_end(ctx
, DISAS_NEXT
);
1628 static DisasJumpType
do_fop_wed(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1629 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i64
))
1636 dst
= tcg_temp_new_i32();
1638 func(dst
, cpu_env
, src
);
1640 tcg_temp_free_i64(src
);
1641 save_frw_i32(rt
, dst
);
1642 tcg_temp_free_i32(dst
);
1643 return nullify_end(ctx
, DISAS_NEXT
);
1646 static DisasJumpType
do_fop_ded(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1647 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
))
1652 tmp
= load_frd0(ra
);
1654 func(tmp
, cpu_env
, tmp
);
1657 tcg_temp_free_i64(tmp
);
1658 return nullify_end(ctx
, DISAS_NEXT
);
1661 static DisasJumpType
do_fop_dew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1662 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i32
))
1668 src
= load_frw0_i32(ra
);
1669 dst
= tcg_temp_new_i64();
1671 func(dst
, cpu_env
, src
);
1673 tcg_temp_free_i32(src
);
1675 tcg_temp_free_i64(dst
);
1676 return nullify_end(ctx
, DISAS_NEXT
);
1679 static DisasJumpType
do_fop_weww(DisasContext
*ctx
, unsigned rt
,
1680 unsigned ra
, unsigned rb
,
1681 void (*func
)(TCGv_i32
, TCGv_env
,
1682 TCGv_i32
, TCGv_i32
))
1687 a
= load_frw0_i32(ra
);
1688 b
= load_frw0_i32(rb
);
1690 func(a
, cpu_env
, a
, b
);
1692 tcg_temp_free_i32(b
);
1693 save_frw_i32(rt
, a
);
1694 tcg_temp_free_i32(a
);
1695 return nullify_end(ctx
, DISAS_NEXT
);
1698 static DisasJumpType
do_fop_dedd(DisasContext
*ctx
, unsigned rt
,
1699 unsigned ra
, unsigned rb
,
1700 void (*func
)(TCGv_i64
, TCGv_env
,
1701 TCGv_i64
, TCGv_i64
))
1709 func(a
, cpu_env
, a
, b
);
1711 tcg_temp_free_i64(b
);
1713 tcg_temp_free_i64(a
);
1714 return nullify_end(ctx
, DISAS_NEXT
);
1717 /* Emit an unconditional branch to a direct target, which may or may not
1718 have already had nullification handled. */
1719 static DisasJumpType
do_dbranch(DisasContext
*ctx
, target_ureg dest
,
1720 unsigned link
, bool is_n
)
1722 if (ctx
->null_cond
.c
== TCG_COND_NEVER
&& ctx
->null_lab
== NULL
) {
1724 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1728 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1735 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1738 if (is_n
&& use_nullify_skip(ctx
)) {
1739 nullify_set(ctx
, 0);
1740 gen_goto_tb(ctx
, 0, dest
, dest
+ 4);
1742 nullify_set(ctx
, is_n
);
1743 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, dest
);
1746 nullify_end(ctx
, DISAS_NEXT
);
1748 nullify_set(ctx
, 0);
1749 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, ctx
->iaoq_n
);
1750 return DISAS_NORETURN
;
1754 /* Emit a conditional branch to a direct target. If the branch itself
1755 is nullified, we should have already used nullify_over. */
1756 static DisasJumpType
do_cbranch(DisasContext
*ctx
, target_sreg disp
, bool is_n
,
1759 target_ureg dest
= iaoq_dest(ctx
, disp
);
1760 TCGLabel
*taken
= NULL
;
1761 TCGCond c
= cond
->c
;
1764 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1766 /* Handle TRUE and NEVER as direct branches. */
1767 if (c
== TCG_COND_ALWAYS
) {
1768 return do_dbranch(ctx
, dest
, 0, is_n
&& disp
>= 0);
1770 if (c
== TCG_COND_NEVER
) {
1771 return do_dbranch(ctx
, ctx
->iaoq_n
, 0, is_n
&& disp
< 0);
1774 taken
= gen_new_label();
1776 tcg_gen_brcond_reg(c
, cond
->a0
, cond
->a1
, taken
);
1779 /* Not taken: Condition not satisfied; nullify on backward branches. */
1780 n
= is_n
&& disp
< 0;
1781 if (n
&& use_nullify_skip(ctx
)) {
1782 nullify_set(ctx
, 0);
1783 gen_goto_tb(ctx
, 0, ctx
->iaoq_n
, ctx
->iaoq_n
+ 4);
1785 if (!n
&& ctx
->null_lab
) {
1786 gen_set_label(ctx
->null_lab
);
1787 ctx
->null_lab
= NULL
;
1789 nullify_set(ctx
, n
);
1790 if (ctx
->iaoq_n
== -1) {
1791 /* The temporary iaoq_n_var died at the branch above.
1792 Regenerate it here instead of saving it. */
1793 tcg_gen_addi_reg(ctx
->iaoq_n_var
, cpu_iaoq_b
, 4);
1795 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, ctx
->iaoq_n
);
1798 gen_set_label(taken
);
1800 /* Taken: Condition satisfied; nullify on forward branches. */
1801 n
= is_n
&& disp
>= 0;
1802 if (n
&& use_nullify_skip(ctx
)) {
1803 nullify_set(ctx
, 0);
1804 gen_goto_tb(ctx
, 1, dest
, dest
+ 4);
1806 nullify_set(ctx
, n
);
1807 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, dest
);
1810 /* Not taken: the branch itself was nullified. */
1811 if (ctx
->null_lab
) {
1812 gen_set_label(ctx
->null_lab
);
1813 ctx
->null_lab
= NULL
;
1814 return DISAS_IAQ_N_STALE
;
1816 return DISAS_NORETURN
;
1820 /* Emit an unconditional branch to an indirect target. This handles
1821 nullification of the branch itself. */
1822 static DisasJumpType
do_ibranch(DisasContext
*ctx
, TCGv_reg dest
,
1823 unsigned link
, bool is_n
)
1825 TCGv_reg a0
, a1
, next
, tmp
;
1828 assert(ctx
->null_lab
== NULL
);
1830 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
1832 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1834 next
= get_temp(ctx
);
1835 tcg_gen_mov_reg(next
, dest
);
1837 if (use_nullify_skip(ctx
)) {
1838 tcg_gen_mov_reg(cpu_iaoq_f
, next
);
1839 tcg_gen_addi_reg(cpu_iaoq_b
, next
, 4);
1840 nullify_set(ctx
, 0);
1841 return DISAS_IAQ_N_UPDATED
;
1843 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1846 ctx
->iaoq_n_var
= next
;
1847 } else if (is_n
&& use_nullify_skip(ctx
)) {
1848 /* The (conditional) branch, B, nullifies the next insn, N,
1849 and we're allowed to skip execution N (no single-step or
1850 tracepoint in effect). Since the goto_ptr that we must use
1851 for the indirect branch consumes no special resources, we
1852 can (conditionally) skip B and continue execution. */
1853 /* The use_nullify_skip test implies we have a known control path. */
1854 tcg_debug_assert(ctx
->iaoq_b
!= -1);
1855 tcg_debug_assert(ctx
->iaoq_n
!= -1);
1857 /* We do have to handle the non-local temporary, DEST, before
1858 branching. Since IOAQ_F is not really live at this point, we
1859 can simply store DEST optimistically. Similarly with IAOQ_B. */
1860 tcg_gen_mov_reg(cpu_iaoq_f
, dest
);
1861 tcg_gen_addi_reg(cpu_iaoq_b
, dest
, 4);
1865 tcg_gen_movi_reg(cpu_gr
[link
], ctx
->iaoq_n
);
1867 tcg_gen_lookup_and_goto_ptr();
1868 return nullify_end(ctx
, DISAS_NEXT
);
1870 cond_prep(&ctx
->null_cond
);
1871 c
= ctx
->null_cond
.c
;
1872 a0
= ctx
->null_cond
.a0
;
1873 a1
= ctx
->null_cond
.a1
;
1875 tmp
= tcg_temp_new();
1876 next
= get_temp(ctx
);
1878 copy_iaoq_entry(tmp
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1879 tcg_gen_movcond_reg(c
, next
, a0
, a1
, tmp
, dest
);
1881 ctx
->iaoq_n_var
= next
;
1884 tcg_gen_movcond_reg(c
, cpu_gr
[link
], a0
, a1
, cpu_gr
[link
], tmp
);
1888 /* The branch nullifies the next insn, which means the state of N
1889 after the branch is the inverse of the state of N that applied
1891 tcg_gen_setcond_reg(tcg_invert_cond(c
), cpu_psw_n
, a0
, a1
);
1892 cond_free(&ctx
->null_cond
);
1893 ctx
->null_cond
= cond_make_n();
1894 ctx
->psw_n_nonzero
= true;
1896 cond_free(&ctx
->null_cond
);
1904 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1905 * IAOQ_Next{30..31} ← GR[b]{30..31};
1907 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1908 * which keeps the privilege level from being increased.
1910 static TCGv_reg
do_ibranch_priv(DisasContext
*ctx
, TCGv_reg offset
)
1912 #ifdef CONFIG_USER_ONLY
1916 switch (ctx
->privilege
) {
1918 /* Privilege 0 is maximum and is allowed to decrease. */
1921 /* Privilege 3 is minimum and is never allowed increase. */
1922 dest
= get_temp(ctx
);
1923 tcg_gen_ori_reg(dest
, offset
, 3);
1926 dest
= tcg_temp_new();
1927 tcg_gen_andi_reg(dest
, offset
, -4);
1928 tcg_gen_ori_reg(dest
, dest
, ctx
->privilege
);
1929 tcg_gen_movcond_reg(TCG_COND_GTU
, dest
, dest
, offset
, dest
, offset
);
1930 tcg_temp_free(dest
);
1937 #ifdef CONFIG_USER_ONLY
1938 /* On Linux, page zero is normally marked execute only + gateway.
1939 Therefore normal read or write is supposed to fail, but specific
1940 offsets have kernel code mapped to raise permissions to implement
1941 system calls. Handling this via an explicit check here, rather
1942 in than the "be disp(sr2,r0)" instruction that probably sent us
1943 here, is the easiest way to handle the branch delay slot on the
1944 aforementioned BE. */
1945 static DisasJumpType
do_page_zero(DisasContext
*ctx
)
1947 /* If by some means we get here with PSW[N]=1, that implies that
1948 the B,GATE instruction would be skipped, and we'd fault on the
1949 next insn within the privilaged page. */
1950 switch (ctx
->null_cond
.c
) {
1951 case TCG_COND_NEVER
:
1953 case TCG_COND_ALWAYS
:
1954 tcg_gen_movi_reg(cpu_psw_n
, 0);
1957 /* Since this is always the first (and only) insn within the
1958 TB, we should know the state of PSW[N] from TB->FLAGS. */
1959 g_assert_not_reached();
1962 /* Check that we didn't arrive here via some means that allowed
1963 non-sequential instruction execution. Normally the PSW[B] bit
1964 detects this by disallowing the B,GATE instruction to execute
1965 under such conditions. */
1966 if (ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
1970 switch (ctx
->iaoq_f
) {
1971 case 0x00: /* Null pointer call */
1972 gen_excp_1(EXCP_IMP
);
1973 return DISAS_NORETURN
;
1975 case 0xb0: /* LWS */
1976 gen_excp_1(EXCP_SYSCALL_LWS
);
1977 return DISAS_NORETURN
;
1979 case 0xe0: /* SET_THREAD_POINTER */
1980 tcg_gen_st_reg(cpu_gr
[26], cpu_env
, offsetof(CPUHPPAState
, cr
[27]));
1981 tcg_gen_mov_reg(cpu_iaoq_f
, cpu_gr
[31]);
1982 tcg_gen_addi_reg(cpu_iaoq_b
, cpu_iaoq_f
, 4);
1983 return DISAS_IAQ_N_UPDATED
;
1985 case 0x100: /* SYSCALL */
1986 gen_excp_1(EXCP_SYSCALL
);
1987 return DISAS_NORETURN
;
1991 gen_excp_1(EXCP_ILL
);
1992 return DISAS_NORETURN
;
1997 static DisasJumpType
trans_nop(DisasContext
*ctx
, uint32_t insn
,
1998 const DisasInsn
*di
)
2000 cond_free(&ctx
->null_cond
);
2004 static DisasJumpType
trans_break(DisasContext
*ctx
, uint32_t insn
,
2005 const DisasInsn
*di
)
2008 return nullify_end(ctx
, gen_excp_iir(ctx
, EXCP_BREAK
));
2011 static DisasJumpType
trans_sync(DisasContext
*ctx
, uint32_t insn
,
2012 const DisasInsn
*di
)
2014 /* No point in nullifying the memory barrier. */
2015 tcg_gen_mb(TCG_BAR_SC
| TCG_MO_ALL
);
2017 cond_free(&ctx
->null_cond
);
2021 static DisasJumpType
trans_mfia(DisasContext
*ctx
, uint32_t insn
,
2022 const DisasInsn
*di
)
2024 unsigned rt
= extract32(insn
, 0, 5);
2025 TCGv_reg tmp
= dest_gpr(ctx
, rt
);
2026 tcg_gen_movi_reg(tmp
, ctx
->iaoq_f
);
2027 save_gpr(ctx
, rt
, tmp
);
2029 cond_free(&ctx
->null_cond
);
2033 static DisasJumpType
trans_mfsp(DisasContext
*ctx
, uint32_t insn
,
2034 const DisasInsn
*di
)
2036 unsigned rt
= extract32(insn
, 0, 5);
2037 unsigned rs
= assemble_sr3(insn
);
2038 TCGv_i64 t0
= tcg_temp_new_i64();
2039 TCGv_reg t1
= tcg_temp_new();
2041 load_spr(ctx
, t0
, rs
);
2042 tcg_gen_shri_i64(t0
, t0
, 32);
2043 tcg_gen_trunc_i64_reg(t1
, t0
);
2045 save_gpr(ctx
, rt
, t1
);
2047 tcg_temp_free_i64(t0
);
2049 cond_free(&ctx
->null_cond
);
2053 static DisasJumpType
trans_mfctl(DisasContext
*ctx
, uint32_t insn
,
2054 const DisasInsn
*di
)
2056 unsigned rt
= extract32(insn
, 0, 5);
2057 unsigned ctl
= extract32(insn
, 21, 5);
2063 #ifdef TARGET_HPPA64
2064 if (extract32(insn
, 14, 1) == 0) {
2065 /* MFSAR without ,W masks low 5 bits. */
2066 tmp
= dest_gpr(ctx
, rt
);
2067 tcg_gen_andi_reg(tmp
, cpu_sar
, 31);
2068 save_gpr(ctx
, rt
, tmp
);
2072 save_gpr(ctx
, rt
, cpu_sar
);
2074 case CR_IT
: /* Interval Timer */
2075 /* FIXME: Respect PSW_S bit. */
2077 tmp
= dest_gpr(ctx
, rt
);
2078 if (ctx
->base
.tb
->cflags
& CF_USE_ICOUNT
) {
2080 gen_helper_read_interval_timer(tmp
);
2082 ret
= DISAS_IAQ_N_STALE
;
2084 gen_helper_read_interval_timer(tmp
);
2087 save_gpr(ctx
, rt
, tmp
);
2088 return nullify_end(ctx
, ret
);
2093 /* All other control registers are privileged. */
2094 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2098 tmp
= get_temp(ctx
);
2099 tcg_gen_ld_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2100 save_gpr(ctx
, rt
, tmp
);
2103 cond_free(&ctx
->null_cond
);
2107 static DisasJumpType
trans_mtsp(DisasContext
*ctx
, uint32_t insn
,
2108 const DisasInsn
*di
)
2110 unsigned rr
= extract32(insn
, 16, 5);
2111 unsigned rs
= assemble_sr3(insn
);
2115 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2119 t64
= tcg_temp_new_i64();
2120 tcg_gen_extu_reg_i64(t64
, load_gpr(ctx
, rr
));
2121 tcg_gen_shli_i64(t64
, t64
, 32);
2124 tcg_gen_st_i64(t64
, cpu_env
, offsetof(CPUHPPAState
, sr
[rs
]));
2125 ctx
->tb_flags
&= ~TB_FLAG_SR_SAME
;
2127 tcg_gen_mov_i64(cpu_sr
[rs
], t64
);
2129 tcg_temp_free_i64(t64
);
2131 return nullify_end(ctx
, DISAS_NEXT
);
2134 static DisasJumpType
trans_mtctl(DisasContext
*ctx
, uint32_t insn
,
2135 const DisasInsn
*di
)
2137 unsigned rin
= extract32(insn
, 16, 5);
2138 unsigned ctl
= extract32(insn
, 21, 5);
2139 TCGv_reg reg
= load_gpr(ctx
, rin
);
2142 if (ctl
== CR_SAR
) {
2143 tmp
= tcg_temp_new();
2144 tcg_gen_andi_reg(tmp
, reg
, TARGET_REGISTER_BITS
- 1);
2145 save_or_nullify(ctx
, cpu_sar
, tmp
);
2148 cond_free(&ctx
->null_cond
);
2152 /* All other control registers are privileged or read-only. */
2153 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2155 #ifdef CONFIG_USER_ONLY
2156 g_assert_not_reached();
2158 DisasJumpType ret
= DISAS_NEXT
;
2163 gen_helper_write_interval_timer(cpu_env
, reg
);
2166 gen_helper_write_eirr(cpu_env
, reg
);
2169 gen_helper_write_eiem(cpu_env
, reg
);
2170 ret
= DISAS_IAQ_N_STALE_EXIT
;
2175 /* FIXME: Respect PSW_Q bit */
2176 /* The write advances the queue and stores to the back element. */
2177 tmp
= get_temp(ctx
);
2178 tcg_gen_ld_reg(tmp
, cpu_env
,
2179 offsetof(CPUHPPAState
, cr_back
[ctl
- CR_IIASQ
]));
2180 tcg_gen_st_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2181 tcg_gen_st_reg(reg
, cpu_env
,
2182 offsetof(CPUHPPAState
, cr_back
[ctl
- CR_IIASQ
]));
2186 tcg_gen_st_reg(reg
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2189 return nullify_end(ctx
, ret
);
2193 static DisasJumpType
trans_mtsarcm(DisasContext
*ctx
, uint32_t insn
,
2194 const DisasInsn
*di
)
2196 unsigned rin
= extract32(insn
, 16, 5);
2197 TCGv_reg tmp
= tcg_temp_new();
2199 tcg_gen_not_reg(tmp
, load_gpr(ctx
, rin
));
2200 tcg_gen_andi_reg(tmp
, tmp
, TARGET_REGISTER_BITS
- 1);
2201 save_or_nullify(ctx
, cpu_sar
, tmp
);
2204 cond_free(&ctx
->null_cond
);
2208 static DisasJumpType
trans_ldsid(DisasContext
*ctx
, uint32_t insn
,
2209 const DisasInsn
*di
)
2211 unsigned rt
= extract32(insn
, 0, 5);
2212 TCGv_reg dest
= dest_gpr(ctx
, rt
);
2214 /* Since we don't implement space registers, this returns zero. */
2215 tcg_gen_movi_reg(dest
, 0);
2216 save_gpr(ctx
, rt
, dest
);
2218 cond_free(&ctx
->null_cond
);
2222 #ifndef CONFIG_USER_ONLY
2223 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
2224 static target_ureg
extract_sm_imm(uint32_t insn
)
2226 target_ureg val
= extract32(insn
, 16, 10);
2228 if (val
& PSW_SM_E
) {
2229 val
= (val
& ~PSW_SM_E
) | PSW_E
;
2231 if (val
& PSW_SM_W
) {
2232 val
= (val
& ~PSW_SM_W
) | PSW_W
;
2237 static DisasJumpType
trans_rsm(DisasContext
*ctx
, uint32_t insn
,
2238 const DisasInsn
*di
)
2240 unsigned rt
= extract32(insn
, 0, 5);
2241 target_ureg sm
= extract_sm_imm(insn
);
2244 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2247 tmp
= get_temp(ctx
);
2248 tcg_gen_ld_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, psw
));
2249 tcg_gen_andi_reg(tmp
, tmp
, ~sm
);
2250 gen_helper_swap_system_mask(tmp
, cpu_env
, tmp
);
2251 save_gpr(ctx
, rt
, tmp
);
2253 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2254 return nullify_end(ctx
, DISAS_IAQ_N_STALE_EXIT
);
2257 static DisasJumpType
trans_ssm(DisasContext
*ctx
, uint32_t insn
,
2258 const DisasInsn
*di
)
2260 unsigned rt
= extract32(insn
, 0, 5);
2261 target_ureg sm
= extract_sm_imm(insn
);
2264 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2267 tmp
= get_temp(ctx
);
2268 tcg_gen_ld_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, psw
));
2269 tcg_gen_ori_reg(tmp
, tmp
, sm
);
2270 gen_helper_swap_system_mask(tmp
, cpu_env
, tmp
);
2271 save_gpr(ctx
, rt
, tmp
);
2273 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2274 return nullify_end(ctx
, DISAS_IAQ_N_STALE_EXIT
);
2277 static DisasJumpType
trans_mtsm(DisasContext
*ctx
, uint32_t insn
,
2278 const DisasInsn
*di
)
2280 unsigned rr
= extract32(insn
, 16, 5);
2283 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2286 reg
= load_gpr(ctx
, rr
);
2287 tmp
= get_temp(ctx
);
2288 gen_helper_swap_system_mask(tmp
, cpu_env
, reg
);
2290 /* Exit the TB to recognize new interrupts. */
2291 return nullify_end(ctx
, DISAS_IAQ_N_STALE_EXIT
);
2294 static DisasJumpType
trans_rfi(DisasContext
*ctx
, uint32_t insn
,
2295 const DisasInsn
*di
)
2297 unsigned comp
= extract32(insn
, 5, 4);
2299 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2303 gen_helper_rfi_r(cpu_env
);
2305 gen_helper_rfi(cpu_env
);
2307 if (ctx
->base
.singlestep_enabled
) {
2308 gen_excp_1(EXCP_DEBUG
);
2313 /* Exit the TB to recognize new interrupts. */
2314 return nullify_end(ctx
, DISAS_NORETURN
);
2317 static DisasJumpType
gen_hlt(DisasContext
*ctx
, int reset
)
2319 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2322 gen_helper_reset(cpu_env
);
2324 gen_helper_halt(cpu_env
);
2326 return nullify_end(ctx
, DISAS_NORETURN
);
2328 #endif /* !CONFIG_USER_ONLY */
2330 static const DisasInsn table_system
[] = {
2331 { 0x00000000u
, 0xfc001fe0u
, trans_break
},
2332 { 0x00001820u
, 0xffe01fffu
, trans_mtsp
},
2333 { 0x00001840u
, 0xfc00ffffu
, trans_mtctl
},
2334 { 0x016018c0u
, 0xffe0ffffu
, trans_mtsarcm
},
2335 { 0x000014a0u
, 0xffffffe0u
, trans_mfia
},
2336 { 0x000004a0u
, 0xffff1fe0u
, trans_mfsp
},
2337 { 0x000008a0u
, 0xfc1fbfe0u
, trans_mfctl
},
2338 { 0x00000400u
, 0xffffffffu
, trans_sync
}, /* sync */
2339 { 0x00100400u
, 0xffffffffu
, trans_sync
}, /* syncdma */
2340 { 0x000010a0u
, 0xfc1f3fe0u
, trans_ldsid
},
2341 #ifndef CONFIG_USER_ONLY
2342 { 0x00000e60u
, 0xfc00ffe0u
, trans_rsm
},
2343 { 0x00000d60u
, 0xfc00ffe0u
, trans_ssm
},
2344 { 0x00001860u
, 0xffe0ffffu
, trans_mtsm
},
2345 { 0x00000c00u
, 0xfffffe1fu
, trans_rfi
},
2349 static DisasJumpType
trans_base_idx_mod(DisasContext
*ctx
, uint32_t insn
,
2350 const DisasInsn
*di
)
2352 unsigned rb
= extract32(insn
, 21, 5);
2353 unsigned rx
= extract32(insn
, 16, 5);
2354 TCGv_reg dest
= dest_gpr(ctx
, rb
);
2355 TCGv_reg src1
= load_gpr(ctx
, rb
);
2356 TCGv_reg src2
= load_gpr(ctx
, rx
);
2358 /* The only thing we need to do is the base register modification. */
2359 tcg_gen_add_reg(dest
, src1
, src2
);
2360 save_gpr(ctx
, rb
, dest
);
2362 cond_free(&ctx
->null_cond
);
2366 static DisasJumpType
trans_probe(DisasContext
*ctx
, uint32_t insn
,
2367 const DisasInsn
*di
)
2369 unsigned rt
= extract32(insn
, 0, 5);
2370 unsigned sp
= extract32(insn
, 14, 2);
2371 unsigned rb
= extract32(insn
, 21, 5);
2372 unsigned is_write
= extract32(insn
, 6, 1);
2378 /* ??? Do something with priv level operand. */
2379 dest
= dest_gpr(ctx
, rt
);
2380 form_gva(ctx
, &addr
, &ofs
, rb
, 0, 0, 0, sp
, 0, false);
2382 gen_helper_probe_w(dest
, addr
);
2384 gen_helper_probe_r(dest
, addr
);
2386 save_gpr(ctx
, rt
, dest
);
2387 return nullify_end(ctx
, DISAS_NEXT
);
2390 #ifndef CONFIG_USER_ONLY
2391 static DisasJumpType
trans_ixtlbx(DisasContext
*ctx
, uint32_t insn
,
2392 const DisasInsn
*di
)
2395 unsigned rr
= extract32(insn
, 16, 5);
2396 unsigned rb
= extract32(insn
, 21, 5);
2397 unsigned is_data
= insn
& 0x1000;
2398 unsigned is_addr
= insn
& 0x40;
2403 sp
= extract32(insn
, 14, 2);
2405 sp
= ~assemble_sr3(insn
);
2408 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2411 form_gva(ctx
, &addr
, &ofs
, rb
, 0, 0, 0, sp
, 0, false);
2412 reg
= load_gpr(ctx
, rr
);
2414 gen_helper_itlba(cpu_env
, addr
, reg
);
2416 gen_helper_itlbp(cpu_env
, addr
, reg
);
2419 /* Exit TB for ITLB change if mmu is enabled. This *should* not be
2420 the case, since the OS TLB fill handler runs with mmu disabled. */
2421 return nullify_end(ctx
, !is_data
&& (ctx
->tb_flags
& PSW_C
)
2422 ? DISAS_IAQ_N_STALE
: DISAS_NEXT
);
2425 static DisasJumpType
trans_pxtlbx(DisasContext
*ctx
, uint32_t insn
,
2426 const DisasInsn
*di
)
2428 unsigned m
= extract32(insn
, 5, 1);
2430 unsigned rx
= extract32(insn
, 16, 5);
2431 unsigned rb
= extract32(insn
, 21, 5);
2432 unsigned is_data
= insn
& 0x1000;
2433 unsigned is_local
= insn
& 0x40;
2438 sp
= extract32(insn
, 14, 2);
2440 sp
= ~assemble_sr3(insn
);
2443 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2446 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, 0, 0, sp
, m
, false);
2448 save_gpr(ctx
, rb
, ofs
);
2451 gen_helper_ptlbe(cpu_env
);
2453 gen_helper_ptlb(cpu_env
, addr
);
2456 /* Exit TB for TLB change if mmu is enabled. */
2457 return nullify_end(ctx
, !is_data
&& (ctx
->tb_flags
& PSW_C
)
2458 ? DISAS_IAQ_N_STALE
: DISAS_NEXT
);
2461 static DisasJumpType
trans_lpa(DisasContext
*ctx
, uint32_t insn
,
2462 const DisasInsn
*di
)
2464 unsigned rt
= extract32(insn
, 0, 5);
2465 unsigned m
= extract32(insn
, 5, 1);
2466 unsigned sp
= extract32(insn
, 14, 2);
2467 unsigned rx
= extract32(insn
, 16, 5);
2468 unsigned rb
= extract32(insn
, 21, 5);
2470 TCGv_reg ofs
, paddr
;
2472 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2475 form_gva(ctx
, &vaddr
, &ofs
, rb
, rx
, 0, 0, sp
, m
, false);
2477 paddr
= tcg_temp_new();
2478 gen_helper_lpa(paddr
, cpu_env
, vaddr
);
2480 /* Note that physical address result overrides base modification. */
2482 save_gpr(ctx
, rb
, ofs
);
2484 save_gpr(ctx
, rt
, paddr
);
2485 tcg_temp_free(paddr
);
2487 return nullify_end(ctx
, DISAS_NEXT
);
2490 static DisasJumpType
trans_lci(DisasContext
*ctx
, uint32_t insn
,
2491 const DisasInsn
*di
)
2493 unsigned rt
= extract32(insn
, 0, 5);
2496 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2498 /* The Coherence Index is an implementation-defined function of the
2499 physical address. Two addresses with the same CI have a coherent
2500 view of the cache. Our implementation is to return 0 for all,
2501 since the entire address space is coherent. */
2502 ci
= tcg_const_reg(0);
2503 save_gpr(ctx
, rt
, ci
);
2508 #endif /* !CONFIG_USER_ONLY */
2510 static const DisasInsn table_mem_mgmt
[] = {
2511 { 0x04003280u
, 0xfc003fffu
, trans_nop
}, /* fdc, disp */
2512 { 0x04001280u
, 0xfc003fffu
, trans_nop
}, /* fdc, index */
2513 { 0x040012a0u
, 0xfc003fffu
, trans_base_idx_mod
}, /* fdc, index, base mod */
2514 { 0x040012c0u
, 0xfc003fffu
, trans_nop
}, /* fdce */
2515 { 0x040012e0u
, 0xfc003fffu
, trans_base_idx_mod
}, /* fdce, base mod */
2516 { 0x04000280u
, 0xfc001fffu
, trans_nop
}, /* fic 0a */
2517 { 0x040002a0u
, 0xfc001fffu
, trans_base_idx_mod
}, /* fic 0a, base mod */
2518 { 0x040013c0u
, 0xfc003fffu
, trans_nop
}, /* fic 4f */
2519 { 0x040013e0u
, 0xfc003fffu
, trans_base_idx_mod
}, /* fic 4f, base mod */
2520 { 0x040002c0u
, 0xfc001fffu
, trans_nop
}, /* fice */
2521 { 0x040002e0u
, 0xfc001fffu
, trans_base_idx_mod
}, /* fice, base mod */
2522 { 0x04002700u
, 0xfc003fffu
, trans_nop
}, /* pdc */
2523 { 0x04002720u
, 0xfc003fffu
, trans_base_idx_mod
}, /* pdc, base mod */
2524 { 0x04001180u
, 0xfc003fa0u
, trans_probe
}, /* probe */
2525 { 0x04003180u
, 0xfc003fa0u
, trans_probe
}, /* probei */
2526 #ifndef CONFIG_USER_ONLY
2527 { 0x04000000u
, 0xfc001fffu
, trans_ixtlbx
}, /* iitlbp */
2528 { 0x04000040u
, 0xfc001fffu
, trans_ixtlbx
}, /* iitlba */
2529 { 0x04001000u
, 0xfc001fffu
, trans_ixtlbx
}, /* idtlbp */
2530 { 0x04001040u
, 0xfc001fffu
, trans_ixtlbx
}, /* idtlba */
2531 { 0x04000200u
, 0xfc001fdfu
, trans_pxtlbx
}, /* pitlb */
2532 { 0x04000240u
, 0xfc001fdfu
, trans_pxtlbx
}, /* pitlbe */
2533 { 0x04001200u
, 0xfc001fdfu
, trans_pxtlbx
}, /* pdtlb */
2534 { 0x04001240u
, 0xfc001fdfu
, trans_pxtlbx
}, /* pdtlbe */
2535 { 0x04001340u
, 0xfc003fc0u
, trans_lpa
},
2536 { 0x04001300u
, 0xfc003fe0u
, trans_lci
},
2540 static DisasJumpType
trans_add(DisasContext
*ctx
, uint32_t insn
,
2541 const DisasInsn
*di
)
2543 unsigned r2
= extract32(insn
, 21, 5);
2544 unsigned r1
= extract32(insn
, 16, 5);
2545 unsigned cf
= extract32(insn
, 12, 4);
2546 unsigned ext
= extract32(insn
, 8, 4);
2547 unsigned shift
= extract32(insn
, 6, 2);
2548 unsigned rt
= extract32(insn
, 0, 5);
2549 TCGv_reg tcg_r1
, tcg_r2
;
2553 bool is_tsv
= false;
2557 case 0x6: /* ADD, SHLADD */
2559 case 0xa: /* ADD,L, SHLADD,L */
2562 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
2565 case 0x7: /* ADD,C */
2568 case 0xf: /* ADD,C,TSV */
2569 is_c
= is_tsv
= true;
2572 return gen_illegal(ctx
);
2578 tcg_r1
= load_gpr(ctx
, r1
);
2579 tcg_r2
= load_gpr(ctx
, r2
);
2580 ret
= do_add(ctx
, rt
, tcg_r1
, tcg_r2
, shift
, is_l
, is_tsv
, is_tc
, is_c
, cf
);
2581 return nullify_end(ctx
, ret
);
2584 static DisasJumpType
trans_sub(DisasContext
*ctx
, uint32_t insn
,
2585 const DisasInsn
*di
)
2587 unsigned r2
= extract32(insn
, 21, 5);
2588 unsigned r1
= extract32(insn
, 16, 5);
2589 unsigned cf
= extract32(insn
, 12, 4);
2590 unsigned ext
= extract32(insn
, 6, 6);
2591 unsigned rt
= extract32(insn
, 0, 5);
2592 TCGv_reg tcg_r1
, tcg_r2
;
2595 bool is_tsv
= false;
2599 case 0x10: /* SUB */
2601 case 0x30: /* SUB,TSV */
2604 case 0x14: /* SUB,B */
2607 case 0x34: /* SUB,B,TSV */
2608 is_b
= is_tsv
= true;
2610 case 0x13: /* SUB,TC */
2613 case 0x33: /* SUB,TSV,TC */
2614 is_tc
= is_tsv
= true;
2617 return gen_illegal(ctx
);
2623 tcg_r1
= load_gpr(ctx
, r1
);
2624 tcg_r2
= load_gpr(ctx
, r2
);
2625 ret
= do_sub(ctx
, rt
, tcg_r1
, tcg_r2
, is_tsv
, is_b
, is_tc
, cf
);
2626 return nullify_end(ctx
, ret
);
2629 static DisasJumpType
trans_log(DisasContext
*ctx
, uint32_t insn
,
2630 const DisasInsn
*di
)
2632 unsigned r2
= extract32(insn
, 21, 5);
2633 unsigned r1
= extract32(insn
, 16, 5);
2634 unsigned cf
= extract32(insn
, 12, 4);
2635 unsigned rt
= extract32(insn
, 0, 5);
2636 TCGv_reg tcg_r1
, tcg_r2
;
2642 tcg_r1
= load_gpr(ctx
, r1
);
2643 tcg_r2
= load_gpr(ctx
, r2
);
2644 ret
= do_log(ctx
, rt
, tcg_r1
, tcg_r2
, cf
, di
->f
.ttt
);
2645 return nullify_end(ctx
, ret
);
2648 /* OR r,0,t -> COPY (according to gas) */
2649 static DisasJumpType
trans_copy(DisasContext
*ctx
, uint32_t insn
,
2650 const DisasInsn
*di
)
2652 unsigned r1
= extract32(insn
, 16, 5);
2653 unsigned rt
= extract32(insn
, 0, 5);
2656 TCGv_reg dest
= dest_gpr(ctx
, rt
);
2657 tcg_gen_movi_reg(dest
, 0);
2658 save_gpr(ctx
, rt
, dest
);
2660 save_gpr(ctx
, rt
, cpu_gr
[r1
]);
2662 cond_free(&ctx
->null_cond
);
2666 static DisasJumpType
trans_cmpclr(DisasContext
*ctx
, uint32_t insn
,
2667 const DisasInsn
*di
)
2669 unsigned r2
= extract32(insn
, 21, 5);
2670 unsigned r1
= extract32(insn
, 16, 5);
2671 unsigned cf
= extract32(insn
, 12, 4);
2672 unsigned rt
= extract32(insn
, 0, 5);
2673 TCGv_reg tcg_r1
, tcg_r2
;
2679 tcg_r1
= load_gpr(ctx
, r1
);
2680 tcg_r2
= load_gpr(ctx
, r2
);
2681 ret
= do_cmpclr(ctx
, rt
, tcg_r1
, tcg_r2
, cf
);
2682 return nullify_end(ctx
, ret
);
2685 static DisasJumpType
trans_uxor(DisasContext
*ctx
, uint32_t insn
,
2686 const DisasInsn
*di
)
2688 unsigned r2
= extract32(insn
, 21, 5);
2689 unsigned r1
= extract32(insn
, 16, 5);
2690 unsigned cf
= extract32(insn
, 12, 4);
2691 unsigned rt
= extract32(insn
, 0, 5);
2692 TCGv_reg tcg_r1
, tcg_r2
;
2698 tcg_r1
= load_gpr(ctx
, r1
);
2699 tcg_r2
= load_gpr(ctx
, r2
);
2700 ret
= do_unit(ctx
, rt
, tcg_r1
, tcg_r2
, cf
, false, tcg_gen_xor_reg
);
2701 return nullify_end(ctx
, ret
);
2704 static DisasJumpType
trans_uaddcm(DisasContext
*ctx
, uint32_t insn
,
2705 const DisasInsn
*di
)
2707 unsigned r2
= extract32(insn
, 21, 5);
2708 unsigned r1
= extract32(insn
, 16, 5);
2709 unsigned cf
= extract32(insn
, 12, 4);
2710 unsigned is_tc
= extract32(insn
, 6, 1);
2711 unsigned rt
= extract32(insn
, 0, 5);
2712 TCGv_reg tcg_r1
, tcg_r2
, tmp
;
2718 tcg_r1
= load_gpr(ctx
, r1
);
2719 tcg_r2
= load_gpr(ctx
, r2
);
2720 tmp
= get_temp(ctx
);
2721 tcg_gen_not_reg(tmp
, tcg_r2
);
2722 ret
= do_unit(ctx
, rt
, tcg_r1
, tmp
, cf
, is_tc
, tcg_gen_add_reg
);
2723 return nullify_end(ctx
, ret
);
2726 static DisasJumpType
trans_dcor(DisasContext
*ctx
, uint32_t insn
,
2727 const DisasInsn
*di
)
2729 unsigned r2
= extract32(insn
, 21, 5);
2730 unsigned cf
= extract32(insn
, 12, 4);
2731 unsigned is_i
= extract32(insn
, 6, 1);
2732 unsigned rt
= extract32(insn
, 0, 5);
2738 tmp
= get_temp(ctx
);
2739 tcg_gen_shri_reg(tmp
, cpu_psw_cb
, 3);
2741 tcg_gen_not_reg(tmp
, tmp
);
2743 tcg_gen_andi_reg(tmp
, tmp
, 0x11111111);
2744 tcg_gen_muli_reg(tmp
, tmp
, 6);
2745 ret
= do_unit(ctx
, rt
, tmp
, load_gpr(ctx
, r2
), cf
, false,
2746 is_i
? tcg_gen_add_reg
: tcg_gen_sub_reg
);
2748 return nullify_end(ctx
, ret
);
2751 static DisasJumpType
trans_ds(DisasContext
*ctx
, uint32_t insn
,
2752 const DisasInsn
*di
)
2754 unsigned r2
= extract32(insn
, 21, 5);
2755 unsigned r1
= extract32(insn
, 16, 5);
2756 unsigned cf
= extract32(insn
, 12, 4);
2757 unsigned rt
= extract32(insn
, 0, 5);
2758 TCGv_reg dest
, add1
, add2
, addc
, zero
, in1
, in2
;
2762 in1
= load_gpr(ctx
, r1
);
2763 in2
= load_gpr(ctx
, r2
);
2765 add1
= tcg_temp_new();
2766 add2
= tcg_temp_new();
2767 addc
= tcg_temp_new();
2768 dest
= tcg_temp_new();
2769 zero
= tcg_const_reg(0);
2771 /* Form R1 << 1 | PSW[CB]{8}. */
2772 tcg_gen_add_reg(add1
, in1
, in1
);
2773 tcg_gen_add_reg(add1
, add1
, cpu_psw_cb_msb
);
2775 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2776 carry{8} requires that we subtract via + ~R2 + 1, as described in
2777 the manual. By extracting and masking V, we can produce the
2778 proper inputs to the addition without movcond. */
2779 tcg_gen_sari_reg(addc
, cpu_psw_v
, TARGET_REGISTER_BITS
- 1);
2780 tcg_gen_xor_reg(add2
, in2
, addc
);
2781 tcg_gen_andi_reg(addc
, addc
, 1);
2782 /* ??? This is only correct for 32-bit. */
2783 tcg_gen_add2_i32(dest
, cpu_psw_cb_msb
, add1
, zero
, add2
, zero
);
2784 tcg_gen_add2_i32(dest
, cpu_psw_cb_msb
, dest
, cpu_psw_cb_msb
, addc
, zero
);
2786 tcg_temp_free(addc
);
2787 tcg_temp_free(zero
);
2789 /* Write back the result register. */
2790 save_gpr(ctx
, rt
, dest
);
2792 /* Write back PSW[CB]. */
2793 tcg_gen_xor_reg(cpu_psw_cb
, add1
, add2
);
2794 tcg_gen_xor_reg(cpu_psw_cb
, cpu_psw_cb
, dest
);
2796 /* Write back PSW[V] for the division step. */
2797 tcg_gen_neg_reg(cpu_psw_v
, cpu_psw_cb_msb
);
2798 tcg_gen_xor_reg(cpu_psw_v
, cpu_psw_v
, in2
);
2800 /* Install the new nullification. */
2804 /* ??? The lshift is supposed to contribute to overflow. */
2805 sv
= do_add_sv(ctx
, dest
, add1
, add2
);
2807 ctx
->null_cond
= do_cond(cf
, dest
, cpu_psw_cb_msb
, sv
);
2810 tcg_temp_free(add1
);
2811 tcg_temp_free(add2
);
2812 tcg_temp_free(dest
);
2814 return nullify_end(ctx
, DISAS_NEXT
);
2817 static const DisasInsn table_arith_log
[] = {
2818 { 0x08000240u
, 0xfc00ffffu
, trans_nop
}, /* or x,y,0 */
2819 { 0x08000240u
, 0xffe0ffe0u
, trans_copy
}, /* or x,0,t */
2820 { 0x08000000u
, 0xfc000fe0u
, trans_log
, .f
.ttt
= tcg_gen_andc_reg
},
2821 { 0x08000200u
, 0xfc000fe0u
, trans_log
, .f
.ttt
= tcg_gen_and_reg
},
2822 { 0x08000240u
, 0xfc000fe0u
, trans_log
, .f
.ttt
= tcg_gen_or_reg
},
2823 { 0x08000280u
, 0xfc000fe0u
, trans_log
, .f
.ttt
= tcg_gen_xor_reg
},
2824 { 0x08000880u
, 0xfc000fe0u
, trans_cmpclr
},
2825 { 0x08000380u
, 0xfc000fe0u
, trans_uxor
},
2826 { 0x08000980u
, 0xfc000fa0u
, trans_uaddcm
},
2827 { 0x08000b80u
, 0xfc1f0fa0u
, trans_dcor
},
2828 { 0x08000440u
, 0xfc000fe0u
, trans_ds
},
2829 { 0x08000700u
, 0xfc0007e0u
, trans_add
}, /* add */
2830 { 0x08000400u
, 0xfc0006e0u
, trans_sub
}, /* sub; sub,b; sub,tsv */
2831 { 0x080004c0u
, 0xfc0007e0u
, trans_sub
}, /* sub,tc; sub,tsv,tc */
2832 { 0x08000200u
, 0xfc000320u
, trans_add
}, /* shladd */
2835 static DisasJumpType
trans_addi(DisasContext
*ctx
, uint32_t insn
)
2837 target_sreg im
= low_sextract(insn
, 0, 11);
2838 unsigned e1
= extract32(insn
, 11, 1);
2839 unsigned cf
= extract32(insn
, 12, 4);
2840 unsigned rt
= extract32(insn
, 16, 5);
2841 unsigned r2
= extract32(insn
, 21, 5);
2842 unsigned o1
= extract32(insn
, 26, 1);
2843 TCGv_reg tcg_im
, tcg_r2
;
2850 tcg_im
= load_const(ctx
, im
);
2851 tcg_r2
= load_gpr(ctx
, r2
);
2852 ret
= do_add(ctx
, rt
, tcg_im
, tcg_r2
, 0, false, e1
, !o1
, false, cf
);
2854 return nullify_end(ctx
, ret
);
2857 static DisasJumpType
trans_subi(DisasContext
*ctx
, uint32_t insn
)
2859 target_sreg im
= low_sextract(insn
, 0, 11);
2860 unsigned e1
= extract32(insn
, 11, 1);
2861 unsigned cf
= extract32(insn
, 12, 4);
2862 unsigned rt
= extract32(insn
, 16, 5);
2863 unsigned r2
= extract32(insn
, 21, 5);
2864 TCGv_reg tcg_im
, tcg_r2
;
2871 tcg_im
= load_const(ctx
, im
);
2872 tcg_r2
= load_gpr(ctx
, r2
);
2873 ret
= do_sub(ctx
, rt
, tcg_im
, tcg_r2
, e1
, false, false, cf
);
2875 return nullify_end(ctx
, ret
);
2878 static DisasJumpType
trans_cmpiclr(DisasContext
*ctx
, uint32_t insn
)
2880 target_sreg im
= low_sextract(insn
, 0, 11);
2881 unsigned cf
= extract32(insn
, 12, 4);
2882 unsigned rt
= extract32(insn
, 16, 5);
2883 unsigned r2
= extract32(insn
, 21, 5);
2884 TCGv_reg tcg_im
, tcg_r2
;
2891 tcg_im
= load_const(ctx
, im
);
2892 tcg_r2
= load_gpr(ctx
, r2
);
2893 ret
= do_cmpclr(ctx
, rt
, tcg_im
, tcg_r2
, cf
);
2895 return nullify_end(ctx
, ret
);
2898 static DisasJumpType
trans_ld_idx_i(DisasContext
*ctx
, uint32_t insn
,
2899 const DisasInsn
*di
)
2901 unsigned rt
= extract32(insn
, 0, 5);
2902 unsigned m
= extract32(insn
, 5, 1);
2903 unsigned sz
= extract32(insn
, 6, 2);
2904 unsigned a
= extract32(insn
, 13, 1);
2905 unsigned sp
= extract32(insn
, 14, 2);
2906 int disp
= low_sextract(insn
, 16, 5);
2907 unsigned rb
= extract32(insn
, 21, 5);
2908 int modify
= (m
? (a
? -1 : 1) : 0);
2909 TCGMemOp mop
= MO_TE
| sz
;
2911 return do_load(ctx
, rt
, rb
, 0, 0, disp
, sp
, modify
, mop
);
2914 static DisasJumpType
trans_ld_idx_x(DisasContext
*ctx
, uint32_t insn
,
2915 const DisasInsn
*di
)
2917 unsigned rt
= extract32(insn
, 0, 5);
2918 unsigned m
= extract32(insn
, 5, 1);
2919 unsigned sz
= extract32(insn
, 6, 2);
2920 unsigned u
= extract32(insn
, 13, 1);
2921 unsigned sp
= extract32(insn
, 14, 2);
2922 unsigned rx
= extract32(insn
, 16, 5);
2923 unsigned rb
= extract32(insn
, 21, 5);
2924 TCGMemOp mop
= MO_TE
| sz
;
2926 return do_load(ctx
, rt
, rb
, rx
, u
? sz
: 0, 0, sp
, m
, mop
);
2929 static DisasJumpType
trans_st_idx_i(DisasContext
*ctx
, uint32_t insn
,
2930 const DisasInsn
*di
)
2932 int disp
= low_sextract(insn
, 0, 5);
2933 unsigned m
= extract32(insn
, 5, 1);
2934 unsigned sz
= extract32(insn
, 6, 2);
2935 unsigned a
= extract32(insn
, 13, 1);
2936 unsigned sp
= extract32(insn
, 14, 2);
2937 unsigned rr
= extract32(insn
, 16, 5);
2938 unsigned rb
= extract32(insn
, 21, 5);
2939 int modify
= (m
? (a
? -1 : 1) : 0);
2940 TCGMemOp mop
= MO_TE
| sz
;
2942 return do_store(ctx
, rr
, rb
, disp
, sp
, modify
, mop
);
2945 static DisasJumpType
trans_ldcw(DisasContext
*ctx
, uint32_t insn
,
2946 const DisasInsn
*di
)
2948 unsigned rt
= extract32(insn
, 0, 5);
2949 unsigned m
= extract32(insn
, 5, 1);
2950 unsigned i
= extract32(insn
, 12, 1);
2951 unsigned au
= extract32(insn
, 13, 1);
2952 unsigned sp
= extract32(insn
, 14, 2);
2953 unsigned rx
= extract32(insn
, 16, 5);
2954 unsigned rb
= extract32(insn
, 21, 5);
2955 TCGMemOp mop
= MO_TEUL
| MO_ALIGN_16
;
2956 TCGv_reg zero
, dest
, ofs
;
2958 int modify
, disp
= 0, scale
= 0;
2963 modify
= (m
? (au
? -1 : 1) : 0);
2964 disp
= low_sextract(rx
, 0, 5);
2969 scale
= mop
& MO_SIZE
;
2973 /* Base register modification. Make sure if RT == RB,
2974 we see the result of the load. */
2975 dest
= get_temp(ctx
);
2977 dest
= dest_gpr(ctx
, rt
);
2980 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
2981 ctx
->mmu_idx
== MMU_PHYS_IDX
);
2982 zero
= tcg_const_reg(0);
2983 tcg_gen_atomic_xchg_reg(dest
, addr
, zero
, ctx
->mmu_idx
, mop
);
2985 save_gpr(ctx
, rb
, ofs
);
2987 save_gpr(ctx
, rt
, dest
);
2989 return nullify_end(ctx
, DISAS_NEXT
);
2992 static DisasJumpType
trans_stby(DisasContext
*ctx
, uint32_t insn
,
2993 const DisasInsn
*di
)
2995 target_sreg disp
= low_sextract(insn
, 0, 5);
2996 unsigned m
= extract32(insn
, 5, 1);
2997 unsigned a
= extract32(insn
, 13, 1);
2998 unsigned sp
= extract32(insn
, 14, 2);
2999 unsigned rt
= extract32(insn
, 16, 5);
3000 unsigned rb
= extract32(insn
, 21, 5);
3006 form_gva(ctx
, &addr
, &ofs
, rb
, 0, 0, disp
, sp
, m
,
3007 ctx
->mmu_idx
== MMU_PHYS_IDX
);
3008 val
= load_gpr(ctx
, rt
);
3010 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
3011 gen_helper_stby_e_parallel(cpu_env
, addr
, val
);
3013 gen_helper_stby_e(cpu_env
, addr
, val
);
3016 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
3017 gen_helper_stby_b_parallel(cpu_env
, addr
, val
);
3019 gen_helper_stby_b(cpu_env
, addr
, val
);
3024 tcg_gen_andi_reg(ofs
, ofs
, ~3);
3025 save_gpr(ctx
, rb
, ofs
);
3028 return nullify_end(ctx
, DISAS_NEXT
);
3031 #ifndef CONFIG_USER_ONLY
3032 static DisasJumpType
trans_ldwa_idx_i(DisasContext
*ctx
, uint32_t insn
,
3033 const DisasInsn
*di
)
3035 int hold_mmu_idx
= ctx
->mmu_idx
;
3038 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
3040 /* ??? needs fixing for hppa64 -- ldda does not follow the same
3041 format wrt the sub-opcode in bits 6:9. */
3042 ctx
->mmu_idx
= MMU_PHYS_IDX
;
3043 ret
= trans_ld_idx_i(ctx
, insn
, di
);
3044 ctx
->mmu_idx
= hold_mmu_idx
;
3048 static DisasJumpType
trans_ldwa_idx_x(DisasContext
*ctx
, uint32_t insn
,
3049 const DisasInsn
*di
)
3051 int hold_mmu_idx
= ctx
->mmu_idx
;
3054 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
3056 /* ??? needs fixing for hppa64 -- ldda does not follow the same
3057 format wrt the sub-opcode in bits 6:9. */
3058 ctx
->mmu_idx
= MMU_PHYS_IDX
;
3059 ret
= trans_ld_idx_x(ctx
, insn
, di
);
3060 ctx
->mmu_idx
= hold_mmu_idx
;
3065 static const DisasInsn table_index_mem
[] = {
3066 { 0x0c001000u
, 0xfc001300, trans_ld_idx_i
}, /* LD[BHWD], im */
3067 { 0x0c000000u
, 0xfc001300, trans_ld_idx_x
}, /* LD[BHWD], rx */
3068 { 0x0c001200u
, 0xfc001300, trans_st_idx_i
}, /* ST[BHWD] */
3069 { 0x0c0001c0u
, 0xfc0003c0, trans_ldcw
},
3070 { 0x0c001300u
, 0xfc0013c0, trans_stby
},
3071 #ifndef CONFIG_USER_ONLY
3072 { 0x0c001180u
, 0xfc00d3c0, trans_ldwa_idx_i
}, /* LDWA, im */
3073 { 0x0c000180u
, 0xfc00d3c0, trans_ldwa_idx_x
}, /* LDWA, rx */
3077 static DisasJumpType
trans_ldil(DisasContext
*ctx
, uint32_t insn
)
3079 unsigned rt
= extract32(insn
, 21, 5);
3080 target_sreg i
= assemble_21(insn
);
3081 TCGv_reg tcg_rt
= dest_gpr(ctx
, rt
);
3083 tcg_gen_movi_reg(tcg_rt
, i
);
3084 save_gpr(ctx
, rt
, tcg_rt
);
3085 cond_free(&ctx
->null_cond
);
3090 static DisasJumpType
trans_addil(DisasContext
*ctx
, uint32_t insn
)
3092 unsigned rt
= extract32(insn
, 21, 5);
3093 target_sreg i
= assemble_21(insn
);
3094 TCGv_reg tcg_rt
= load_gpr(ctx
, rt
);
3095 TCGv_reg tcg_r1
= dest_gpr(ctx
, 1);
3097 tcg_gen_addi_reg(tcg_r1
, tcg_rt
, i
);
3098 save_gpr(ctx
, 1, tcg_r1
);
3099 cond_free(&ctx
->null_cond
);
3104 static DisasJumpType
trans_ldo(DisasContext
*ctx
, uint32_t insn
)
3106 unsigned rb
= extract32(insn
, 21, 5);
3107 unsigned rt
= extract32(insn
, 16, 5);
3108 target_sreg i
= assemble_16(insn
);
3109 TCGv_reg tcg_rt
= dest_gpr(ctx
, rt
);
3111 /* Special case rb == 0, for the LDI pseudo-op.
3112 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
3114 tcg_gen_movi_reg(tcg_rt
, i
);
3116 tcg_gen_addi_reg(tcg_rt
, cpu_gr
[rb
], i
);
3118 save_gpr(ctx
, rt
, tcg_rt
);
3119 cond_free(&ctx
->null_cond
);
3124 static DisasJumpType
trans_load(DisasContext
*ctx
, uint32_t insn
,
3125 bool is_mod
, TCGMemOp mop
)
3127 unsigned rb
= extract32(insn
, 21, 5);
3128 unsigned rt
= extract32(insn
, 16, 5);
3129 unsigned sp
= extract32(insn
, 14, 2);
3130 target_sreg i
= assemble_16(insn
);
3132 return do_load(ctx
, rt
, rb
, 0, 0, i
, sp
,
3133 is_mod
? (i
< 0 ? -1 : 1) : 0, mop
);
3136 static DisasJumpType
trans_load_w(DisasContext
*ctx
, uint32_t insn
)
3138 unsigned rb
= extract32(insn
, 21, 5);
3139 unsigned rt
= extract32(insn
, 16, 5);
3140 unsigned sp
= extract32(insn
, 14, 2);
3141 target_sreg i
= assemble_16a(insn
);
3142 unsigned ext2
= extract32(insn
, 1, 2);
3147 /* FLDW without modification. */
3148 return do_floadw(ctx
, ext2
* 32 + rt
, rb
, 0, 0, i
, sp
, 0);
3150 /* LDW with modification. Note that the sign of I selects
3151 post-dec vs pre-inc. */
3152 return do_load(ctx
, rt
, rb
, 0, 0, i
, sp
, (i
< 0 ? 1 : -1), MO_TEUL
);
3154 return gen_illegal(ctx
);
3158 static DisasJumpType
trans_fload_mod(DisasContext
*ctx
, uint32_t insn
)
3160 target_sreg i
= assemble_16a(insn
);
3161 unsigned t1
= extract32(insn
, 1, 1);
3162 unsigned a
= extract32(insn
, 2, 1);
3163 unsigned sp
= extract32(insn
, 14, 2);
3164 unsigned t0
= extract32(insn
, 16, 5);
3165 unsigned rb
= extract32(insn
, 21, 5);
3167 /* FLDW with modification. */
3168 return do_floadw(ctx
, t1
* 32 + t0
, rb
, 0, 0, i
, sp
, (a
? -1 : 1));
3171 static DisasJumpType
trans_store(DisasContext
*ctx
, uint32_t insn
,
3172 bool is_mod
, TCGMemOp mop
)
3174 unsigned rb
= extract32(insn
, 21, 5);
3175 unsigned rt
= extract32(insn
, 16, 5);
3176 unsigned sp
= extract32(insn
, 14, 2);
3177 target_sreg i
= assemble_16(insn
);
3179 return do_store(ctx
, rt
, rb
, i
, sp
, is_mod
? (i
< 0 ? -1 : 1) : 0, mop
);
3182 static DisasJumpType
trans_store_w(DisasContext
*ctx
, uint32_t insn
)
3184 unsigned rb
= extract32(insn
, 21, 5);
3185 unsigned rt
= extract32(insn
, 16, 5);
3186 unsigned sp
= extract32(insn
, 14, 2);
3187 target_sreg i
= assemble_16a(insn
);
3188 unsigned ext2
= extract32(insn
, 1, 2);
3193 /* FSTW without modification. */
3194 return do_fstorew(ctx
, ext2
* 32 + rt
, rb
, 0, 0, i
, sp
, 0);
3196 /* LDW with modification. */
3197 return do_store(ctx
, rt
, rb
, i
, sp
, (i
< 0 ? 1 : -1), MO_TEUL
);
3199 return gen_illegal(ctx
);
3203 static DisasJumpType
trans_fstore_mod(DisasContext
*ctx
, uint32_t insn
)
3205 target_sreg i
= assemble_16a(insn
);
3206 unsigned t1
= extract32(insn
, 1, 1);
3207 unsigned a
= extract32(insn
, 2, 1);
3208 unsigned sp
= extract32(insn
, 14, 2);
3209 unsigned t0
= extract32(insn
, 16, 5);
3210 unsigned rb
= extract32(insn
, 21, 5);
3212 /* FSTW with modification. */
3213 return do_fstorew(ctx
, t1
* 32 + t0
, rb
, 0, 0, i
, sp
, (a
? -1 : 1));
3216 static DisasJumpType
trans_copr_w(DisasContext
*ctx
, uint32_t insn
)
3218 unsigned t0
= extract32(insn
, 0, 5);
3219 unsigned m
= extract32(insn
, 5, 1);
3220 unsigned t1
= extract32(insn
, 6, 1);
3221 unsigned ext3
= extract32(insn
, 7, 3);
3222 /* unsigned cc = extract32(insn, 10, 2); */
3223 unsigned i
= extract32(insn
, 12, 1);
3224 unsigned ua
= extract32(insn
, 13, 1);
3225 unsigned sp
= extract32(insn
, 14, 2);
3226 unsigned rx
= extract32(insn
, 16, 5);
3227 unsigned rb
= extract32(insn
, 21, 5);
3228 unsigned rt
= t1
* 32 + t0
;
3229 int modify
= (m
? (ua
? -1 : 1) : 0);
3233 scale
= (ua
? 2 : 0);
3237 disp
= low_sextract(rx
, 0, 5);
3240 modify
= (m
? (ua
? -1 : 1) : 0);
3245 return do_floadw(ctx
, rt
, rb
, rx
, scale
, disp
, sp
, modify
);
3247 return do_fstorew(ctx
, rt
, rb
, rx
, scale
, disp
, sp
, modify
);
3249 return gen_illegal(ctx
);
3252 static DisasJumpType
trans_copr_dw(DisasContext
*ctx
, uint32_t insn
)
3254 unsigned rt
= extract32(insn
, 0, 5);
3255 unsigned m
= extract32(insn
, 5, 1);
3256 unsigned ext4
= extract32(insn
, 6, 4);
3257 /* unsigned cc = extract32(insn, 10, 2); */
3258 unsigned i
= extract32(insn
, 12, 1);
3259 unsigned ua
= extract32(insn
, 13, 1);
3260 unsigned sp
= extract32(insn
, 14, 2);
3261 unsigned rx
= extract32(insn
, 16, 5);
3262 unsigned rb
= extract32(insn
, 21, 5);
3263 int modify
= (m
? (ua
? -1 : 1) : 0);
3267 scale
= (ua
? 3 : 0);
3271 disp
= low_sextract(rx
, 0, 5);
3274 modify
= (m
? (ua
? -1 : 1) : 0);
3279 return do_floadd(ctx
, rt
, rb
, rx
, scale
, disp
, sp
, modify
);
3281 return do_fstored(ctx
, rt
, rb
, rx
, scale
, disp
, sp
, modify
);
3283 return gen_illegal(ctx
);
3287 static DisasJumpType
trans_cmpb(DisasContext
*ctx
, uint32_t insn
,
3288 bool is_true
, bool is_imm
, bool is_dw
)
3290 target_sreg disp
= assemble_12(insn
) * 4;
3291 unsigned n
= extract32(insn
, 1, 1);
3292 unsigned c
= extract32(insn
, 13, 3);
3293 unsigned r
= extract32(insn
, 21, 5);
3294 unsigned cf
= c
* 2 + !is_true
;
3295 TCGv_reg dest
, in1
, in2
, sv
;
3301 in1
= load_const(ctx
, low_sextract(insn
, 16, 5));
3303 in1
= load_gpr(ctx
, extract32(insn
, 16, 5));
3305 in2
= load_gpr(ctx
, r
);
3306 dest
= get_temp(ctx
);
3308 tcg_gen_sub_reg(dest
, in1
, in2
);
3312 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
3315 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
3316 return do_cbranch(ctx
, disp
, n
, &cond
);
3319 static DisasJumpType
trans_addb(DisasContext
*ctx
, uint32_t insn
,
3320 bool is_true
, bool is_imm
)
3322 target_sreg disp
= assemble_12(insn
) * 4;
3323 unsigned n
= extract32(insn
, 1, 1);
3324 unsigned c
= extract32(insn
, 13, 3);
3325 unsigned r
= extract32(insn
, 21, 5);
3326 unsigned cf
= c
* 2 + !is_true
;
3327 TCGv_reg dest
, in1
, in2
, sv
, cb_msb
;
3333 in1
= load_const(ctx
, low_sextract(insn
, 16, 5));
3335 in1
= load_gpr(ctx
, extract32(insn
, 16, 5));
3337 in2
= load_gpr(ctx
, r
);
3338 dest
= dest_gpr(ctx
, r
);
3344 tcg_gen_add_reg(dest
, in1
, in2
);
3347 cb_msb
= get_temp(ctx
);
3348 tcg_gen_movi_reg(cb_msb
, 0);
3349 tcg_gen_add2_reg(dest
, cb_msb
, in1
, cb_msb
, in2
, cb_msb
);
3352 tcg_gen_add_reg(dest
, in1
, in2
);
3353 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
3357 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
3358 return do_cbranch(ctx
, disp
, n
, &cond
);
3361 static DisasJumpType
trans_bb(DisasContext
*ctx
, uint32_t insn
)
3363 target_sreg disp
= assemble_12(insn
) * 4;
3364 unsigned n
= extract32(insn
, 1, 1);
3365 unsigned c
= extract32(insn
, 15, 1);
3366 unsigned r
= extract32(insn
, 16, 5);
3367 unsigned p
= extract32(insn
, 21, 5);
3368 unsigned i
= extract32(insn
, 26, 1);
3369 TCGv_reg tmp
, tcg_r
;
3374 tmp
= tcg_temp_new();
3375 tcg_r
= load_gpr(ctx
, r
);
3377 tcg_gen_shli_reg(tmp
, tcg_r
, p
);
3379 tcg_gen_shl_reg(tmp
, tcg_r
, cpu_sar
);
3382 cond
= cond_make_0(c
? TCG_COND_GE
: TCG_COND_LT
, tmp
);
3384 return do_cbranch(ctx
, disp
, n
, &cond
);
3387 static DisasJumpType
trans_movb(DisasContext
*ctx
, uint32_t insn
, bool is_imm
)
3389 target_sreg disp
= assemble_12(insn
) * 4;
3390 unsigned n
= extract32(insn
, 1, 1);
3391 unsigned c
= extract32(insn
, 13, 3);
3392 unsigned t
= extract32(insn
, 16, 5);
3393 unsigned r
= extract32(insn
, 21, 5);
3399 dest
= dest_gpr(ctx
, r
);
3401 tcg_gen_movi_reg(dest
, low_sextract(t
, 0, 5));
3402 } else if (t
== 0) {
3403 tcg_gen_movi_reg(dest
, 0);
3405 tcg_gen_mov_reg(dest
, cpu_gr
[t
]);
3408 cond
= do_sed_cond(c
, dest
);
3409 return do_cbranch(ctx
, disp
, n
, &cond
);
3412 static DisasJumpType
trans_shrpw_sar(DisasContext
*ctx
, uint32_t insn
,
3413 const DisasInsn
*di
)
3415 unsigned rt
= extract32(insn
, 0, 5);
3416 unsigned c
= extract32(insn
, 13, 3);
3417 unsigned r1
= extract32(insn
, 16, 5);
3418 unsigned r2
= extract32(insn
, 21, 5);
3425 dest
= dest_gpr(ctx
, rt
);
3427 tcg_gen_ext32u_reg(dest
, load_gpr(ctx
, r2
));
3428 tcg_gen_shr_reg(dest
, dest
, cpu_sar
);
3429 } else if (r1
== r2
) {
3430 TCGv_i32 t32
= tcg_temp_new_i32();
3431 tcg_gen_trunc_reg_i32(t32
, load_gpr(ctx
, r2
));
3432 tcg_gen_rotr_i32(t32
, t32
, cpu_sar
);
3433 tcg_gen_extu_i32_reg(dest
, t32
);
3434 tcg_temp_free_i32(t32
);
3436 TCGv_i64 t
= tcg_temp_new_i64();
3437 TCGv_i64 s
= tcg_temp_new_i64();
3439 tcg_gen_concat_reg_i64(t
, load_gpr(ctx
, r2
), load_gpr(ctx
, r1
));
3440 tcg_gen_extu_reg_i64(s
, cpu_sar
);
3441 tcg_gen_shr_i64(t
, t
, s
);
3442 tcg_gen_trunc_i64_reg(dest
, t
);
3444 tcg_temp_free_i64(t
);
3445 tcg_temp_free_i64(s
);
3447 save_gpr(ctx
, rt
, dest
);
3449 /* Install the new nullification. */
3450 cond_free(&ctx
->null_cond
);
3452 ctx
->null_cond
= do_sed_cond(c
, dest
);
3454 return nullify_end(ctx
, DISAS_NEXT
);
3457 static DisasJumpType
trans_shrpw_imm(DisasContext
*ctx
, uint32_t insn
,
3458 const DisasInsn
*di
)
3460 unsigned rt
= extract32(insn
, 0, 5);
3461 unsigned cpos
= extract32(insn
, 5, 5);
3462 unsigned c
= extract32(insn
, 13, 3);
3463 unsigned r1
= extract32(insn
, 16, 5);
3464 unsigned r2
= extract32(insn
, 21, 5);
3465 unsigned sa
= 31 - cpos
;
3472 dest
= dest_gpr(ctx
, rt
);
3473 t2
= load_gpr(ctx
, r2
);
3475 TCGv_i32 t32
= tcg_temp_new_i32();
3476 tcg_gen_trunc_reg_i32(t32
, t2
);
3477 tcg_gen_rotri_i32(t32
, t32
, sa
);
3478 tcg_gen_extu_i32_reg(dest
, t32
);
3479 tcg_temp_free_i32(t32
);
3480 } else if (r1
== 0) {
3481 tcg_gen_extract_reg(dest
, t2
, sa
, 32 - sa
);
3483 TCGv_reg t0
= tcg_temp_new();
3484 tcg_gen_extract_reg(t0
, t2
, sa
, 32 - sa
);
3485 tcg_gen_deposit_reg(dest
, t0
, cpu_gr
[r1
], 32 - sa
, sa
);
3488 save_gpr(ctx
, rt
, dest
);
3490 /* Install the new nullification. */
3491 cond_free(&ctx
->null_cond
);
3493 ctx
->null_cond
= do_sed_cond(c
, dest
);
3495 return nullify_end(ctx
, DISAS_NEXT
);
3498 static DisasJumpType
trans_extrw_sar(DisasContext
*ctx
, uint32_t insn
,
3499 const DisasInsn
*di
)
3501 unsigned clen
= extract32(insn
, 0, 5);
3502 unsigned is_se
= extract32(insn
, 10, 1);
3503 unsigned c
= extract32(insn
, 13, 3);
3504 unsigned rt
= extract32(insn
, 16, 5);
3505 unsigned rr
= extract32(insn
, 21, 5);
3506 unsigned len
= 32 - clen
;
3507 TCGv_reg dest
, src
, tmp
;
3513 dest
= dest_gpr(ctx
, rt
);
3514 src
= load_gpr(ctx
, rr
);
3515 tmp
= tcg_temp_new();
3517 /* Recall that SAR is using big-endian bit numbering. */
3518 tcg_gen_xori_reg(tmp
, cpu_sar
, TARGET_REGISTER_BITS
- 1);
3520 tcg_gen_sar_reg(dest
, src
, tmp
);
3521 tcg_gen_sextract_reg(dest
, dest
, 0, len
);
3523 tcg_gen_shr_reg(dest
, src
, tmp
);
3524 tcg_gen_extract_reg(dest
, dest
, 0, len
);
3527 save_gpr(ctx
, rt
, dest
);
3529 /* Install the new nullification. */
3530 cond_free(&ctx
->null_cond
);
3532 ctx
->null_cond
= do_sed_cond(c
, dest
);
3534 return nullify_end(ctx
, DISAS_NEXT
);
3537 static DisasJumpType
trans_extrw_imm(DisasContext
*ctx
, uint32_t insn
,
3538 const DisasInsn
*di
)
3540 unsigned clen
= extract32(insn
, 0, 5);
3541 unsigned pos
= extract32(insn
, 5, 5);
3542 unsigned is_se
= extract32(insn
, 10, 1);
3543 unsigned c
= extract32(insn
, 13, 3);
3544 unsigned rt
= extract32(insn
, 16, 5);
3545 unsigned rr
= extract32(insn
, 21, 5);
3546 unsigned len
= 32 - clen
;
3547 unsigned cpos
= 31 - pos
;
3554 dest
= dest_gpr(ctx
, rt
);
3555 src
= load_gpr(ctx
, rr
);
3557 tcg_gen_sextract_reg(dest
, src
, cpos
, len
);
3559 tcg_gen_extract_reg(dest
, src
, cpos
, len
);
3561 save_gpr(ctx
, rt
, dest
);
3563 /* Install the new nullification. */
3564 cond_free(&ctx
->null_cond
);
3566 ctx
->null_cond
= do_sed_cond(c
, dest
);
3568 return nullify_end(ctx
, DISAS_NEXT
);
3571 static const DisasInsn table_sh_ex
[] = {
3572 { 0xd0000000u
, 0xfc001fe0u
, trans_shrpw_sar
},
3573 { 0xd0000800u
, 0xfc001c00u
, trans_shrpw_imm
},
3574 { 0xd0001000u
, 0xfc001be0u
, trans_extrw_sar
},
3575 { 0xd0001800u
, 0xfc001800u
, trans_extrw_imm
},
3578 static DisasJumpType
trans_depw_imm_c(DisasContext
*ctx
, uint32_t insn
,
3579 const DisasInsn
*di
)
3581 unsigned clen
= extract32(insn
, 0, 5);
3582 unsigned cpos
= extract32(insn
, 5, 5);
3583 unsigned nz
= extract32(insn
, 10, 1);
3584 unsigned c
= extract32(insn
, 13, 3);
3585 target_sreg val
= low_sextract(insn
, 16, 5);
3586 unsigned rt
= extract32(insn
, 21, 5);
3587 unsigned len
= 32 - clen
;
3588 target_sreg mask0
, mask1
;
3594 if (cpos
+ len
> 32) {
3598 dest
= dest_gpr(ctx
, rt
);
3599 mask0
= deposit64(0, cpos
, len
, val
);
3600 mask1
= deposit64(-1, cpos
, len
, val
);
3603 TCGv_reg src
= load_gpr(ctx
, rt
);
3605 tcg_gen_andi_reg(dest
, src
, mask1
);
3608 tcg_gen_ori_reg(dest
, src
, mask0
);
3610 tcg_gen_movi_reg(dest
, mask0
);
3612 save_gpr(ctx
, rt
, dest
);
3614 /* Install the new nullification. */
3615 cond_free(&ctx
->null_cond
);
3617 ctx
->null_cond
= do_sed_cond(c
, dest
);
3619 return nullify_end(ctx
, DISAS_NEXT
);
3622 static DisasJumpType
trans_depw_imm(DisasContext
*ctx
, uint32_t insn
,
3623 const DisasInsn
*di
)
3625 unsigned clen
= extract32(insn
, 0, 5);
3626 unsigned cpos
= extract32(insn
, 5, 5);
3627 unsigned nz
= extract32(insn
, 10, 1);
3628 unsigned c
= extract32(insn
, 13, 3);
3629 unsigned rr
= extract32(insn
, 16, 5);
3630 unsigned rt
= extract32(insn
, 21, 5);
3631 unsigned rs
= nz
? rt
: 0;
3632 unsigned len
= 32 - clen
;
3638 if (cpos
+ len
> 32) {
3642 dest
= dest_gpr(ctx
, rt
);
3643 val
= load_gpr(ctx
, rr
);
3645 tcg_gen_deposit_z_reg(dest
, val
, cpos
, len
);
3647 tcg_gen_deposit_reg(dest
, cpu_gr
[rs
], val
, cpos
, len
);
3649 save_gpr(ctx
, rt
, dest
);
3651 /* Install the new nullification. */
3652 cond_free(&ctx
->null_cond
);
3654 ctx
->null_cond
= do_sed_cond(c
, dest
);
3656 return nullify_end(ctx
, DISAS_NEXT
);
3659 static DisasJumpType
trans_depw_sar(DisasContext
*ctx
, uint32_t insn
,
3660 const DisasInsn
*di
)
3662 unsigned clen
= extract32(insn
, 0, 5);
3663 unsigned nz
= extract32(insn
, 10, 1);
3664 unsigned i
= extract32(insn
, 12, 1);
3665 unsigned c
= extract32(insn
, 13, 3);
3666 unsigned rt
= extract32(insn
, 21, 5);
3667 unsigned rs
= nz
? rt
: 0;
3668 unsigned len
= 32 - clen
;
3669 TCGv_reg val
, mask
, tmp
, shift
, dest
;
3670 unsigned msb
= 1U << (len
- 1);
3677 val
= load_const(ctx
, low_sextract(insn
, 16, 5));
3679 val
= load_gpr(ctx
, extract32(insn
, 16, 5));
3681 dest
= dest_gpr(ctx
, rt
);
3682 shift
= tcg_temp_new();
3683 tmp
= tcg_temp_new();
3685 /* Convert big-endian bit numbering in SAR to left-shift. */
3686 tcg_gen_xori_reg(shift
, cpu_sar
, TARGET_REGISTER_BITS
- 1);
3688 mask
= tcg_const_reg(msb
+ (msb
- 1));
3689 tcg_gen_and_reg(tmp
, val
, mask
);
3691 tcg_gen_shl_reg(mask
, mask
, shift
);
3692 tcg_gen_shl_reg(tmp
, tmp
, shift
);
3693 tcg_gen_andc_reg(dest
, cpu_gr
[rs
], mask
);
3694 tcg_gen_or_reg(dest
, dest
, tmp
);
3696 tcg_gen_shl_reg(dest
, tmp
, shift
);
3698 tcg_temp_free(shift
);
3699 tcg_temp_free(mask
);
3701 save_gpr(ctx
, rt
, dest
);
3703 /* Install the new nullification. */
3704 cond_free(&ctx
->null_cond
);
3706 ctx
->null_cond
= do_sed_cond(c
, dest
);
3708 return nullify_end(ctx
, DISAS_NEXT
);
3711 static const DisasInsn table_depw
[] = {
3712 { 0xd4000000u
, 0xfc000be0u
, trans_depw_sar
},
3713 { 0xd4000800u
, 0xfc001800u
, trans_depw_imm
},
3714 { 0xd4001800u
, 0xfc001800u
, trans_depw_imm_c
},
3717 static DisasJumpType
trans_be(DisasContext
*ctx
, uint32_t insn
, bool is_l
)
3719 unsigned n
= extract32(insn
, 1, 1);
3720 unsigned b
= extract32(insn
, 21, 5);
3721 target_sreg disp
= assemble_17(insn
);
3724 #ifdef CONFIG_USER_ONLY
3725 /* ??? It seems like there should be a good way of using
3726 "be disp(sr2, r0)", the canonical gateway entry mechanism
3727 to our advantage. But that appears to be inconvenient to
3728 manage along side branch delay slots. Therefore we handle
3729 entry into the gateway page via absolute address. */
3730 /* Since we don't implement spaces, just branch. Do notice the special
3731 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3732 goto_tb to the TB containing the syscall. */
3734 return do_dbranch(ctx
, disp
, is_l
? 31 : 0, n
);
3737 int sp
= assemble_sr3(insn
);
3741 tmp
= get_temp(ctx
);
3742 tcg_gen_addi_reg(tmp
, load_gpr(ctx
, b
), disp
);
3743 tmp
= do_ibranch_priv(ctx
, tmp
);
3745 #ifdef CONFIG_USER_ONLY
3746 return do_ibranch(ctx
, tmp
, is_l
? 31 : 0, n
);
3748 TCGv_i64 new_spc
= tcg_temp_new_i64();
3750 load_spr(ctx
, new_spc
, sp
);
3752 copy_iaoq_entry(cpu_gr
[31], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
3753 tcg_gen_mov_i64(cpu_sr
[0], cpu_iasq_f
);
3755 if (n
&& use_nullify_skip(ctx
)) {
3756 tcg_gen_mov_reg(cpu_iaoq_f
, tmp
);
3757 tcg_gen_addi_reg(cpu_iaoq_b
, cpu_iaoq_f
, 4);
3758 tcg_gen_mov_i64(cpu_iasq_f
, new_spc
);
3759 tcg_gen_mov_i64(cpu_iasq_b
, cpu_iasq_f
);
3761 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
3762 if (ctx
->iaoq_b
== -1) {
3763 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
3765 tcg_gen_mov_reg(cpu_iaoq_b
, tmp
);
3766 tcg_gen_mov_i64(cpu_iasq_b
, new_spc
);
3767 nullify_set(ctx
, n
);
3769 tcg_temp_free_i64(new_spc
);
3770 tcg_gen_lookup_and_goto_ptr();
3771 return nullify_end(ctx
, DISAS_NORETURN
);
3775 static DisasJumpType
trans_bl(DisasContext
*ctx
, uint32_t insn
,
3776 const DisasInsn
*di
)
3778 unsigned n
= extract32(insn
, 1, 1);
3779 unsigned link
= extract32(insn
, 21, 5);
3780 target_sreg disp
= assemble_17(insn
);
3782 return do_dbranch(ctx
, iaoq_dest(ctx
, disp
), link
, n
);
3785 static DisasJumpType
trans_b_gate(DisasContext
*ctx
, uint32_t insn
,
3786 const DisasInsn
*di
)
3788 unsigned n
= extract32(insn
, 1, 1);
3789 unsigned link
= extract32(insn
, 21, 5);
3790 target_sreg disp
= assemble_17(insn
);
3791 target_ureg dest
= iaoq_dest(ctx
, disp
);
3793 /* Make sure the caller hasn't done something weird with the queue.
3794 * ??? This is not quite the same as the PSW[B] bit, which would be
3795 * expensive to track. Real hardware will trap for
3797 * b gateway+4 (in delay slot of first branch)
3798 * However, checking for a non-sequential instruction queue *will*
3799 * diagnose the security hole
3802 * in which instructions at evil would run with increased privs.
3804 if (ctx
->iaoq_b
== -1 || ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
3805 return gen_illegal(ctx
);
3808 #ifndef CONFIG_USER_ONLY
3809 if (ctx
->tb_flags
& PSW_C
) {
3810 CPUHPPAState
*env
= ctx
->cs
->env_ptr
;
3811 int type
= hppa_artype_for_page(env
, ctx
->base
.pc_next
);
3812 /* If we could not find a TLB entry, then we need to generate an
3813 ITLB miss exception so the kernel will provide it.
3814 The resulting TLB fill operation will invalidate this TB and
3815 we will re-translate, at which point we *will* be able to find
3816 the TLB entry and determine if this is in fact a gateway page. */
3818 return gen_excp(ctx
, EXCP_ITLB_MISS
);
3820 /* No change for non-gateway pages or for priv decrease. */
3821 if (type
>= 4 && type
- 4 < ctx
->privilege
) {
3822 dest
= deposit32(dest
, 0, 2, type
- 4);
3825 dest
&= -4; /* priv = 0 */
3829 return do_dbranch(ctx
, dest
, link
, n
);
3832 static DisasJumpType
trans_bl_long(DisasContext
*ctx
, uint32_t insn
,
3833 const DisasInsn
*di
)
3835 unsigned n
= extract32(insn
, 1, 1);
3836 target_sreg disp
= assemble_22(insn
);
3838 return do_dbranch(ctx
, iaoq_dest(ctx
, disp
), 2, n
);
3841 static DisasJumpType
trans_blr(DisasContext
*ctx
, uint32_t insn
,
3842 const DisasInsn
*di
)
3844 unsigned n
= extract32(insn
, 1, 1);
3845 unsigned rx
= extract32(insn
, 16, 5);
3846 unsigned link
= extract32(insn
, 21, 5);
3847 TCGv_reg tmp
= get_temp(ctx
);
3849 tcg_gen_shli_reg(tmp
, load_gpr(ctx
, rx
), 3);
3850 tcg_gen_addi_reg(tmp
, tmp
, ctx
->iaoq_f
+ 8);
3851 /* The computation here never changes privilege level. */
3852 return do_ibranch(ctx
, tmp
, link
, n
);
3855 static DisasJumpType
trans_bv(DisasContext
*ctx
, uint32_t insn
,
3856 const DisasInsn
*di
)
3858 unsigned n
= extract32(insn
, 1, 1);
3859 unsigned rx
= extract32(insn
, 16, 5);
3860 unsigned rb
= extract32(insn
, 21, 5);
3864 dest
= load_gpr(ctx
, rb
);
3866 dest
= get_temp(ctx
);
3867 tcg_gen_shli_reg(dest
, load_gpr(ctx
, rx
), 3);
3868 tcg_gen_add_reg(dest
, dest
, load_gpr(ctx
, rb
));
3870 dest
= do_ibranch_priv(ctx
, dest
);
3871 return do_ibranch(ctx
, dest
, 0, n
);
3874 static DisasJumpType
trans_bve(DisasContext
*ctx
, uint32_t insn
,
3875 const DisasInsn
*di
)
3877 unsigned n
= extract32(insn
, 1, 1);
3878 unsigned rb
= extract32(insn
, 21, 5);
3879 unsigned link
= extract32(insn
, 13, 1) ? 2 : 0;
3882 #ifdef CONFIG_USER_ONLY
3883 dest
= do_ibranch_priv(ctx
, load_gpr(ctx
, rb
));
3884 return do_ibranch(ctx
, dest
, link
, n
);
3887 dest
= do_ibranch_priv(ctx
, load_gpr(ctx
, rb
));
3889 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
3890 if (ctx
->iaoq_b
== -1) {
3891 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
3893 copy_iaoq_entry(cpu_iaoq_b
, -1, dest
);
3894 tcg_gen_mov_i64(cpu_iasq_b
, space_select(ctx
, 0, dest
));
3896 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
3898 nullify_set(ctx
, n
);
3899 tcg_gen_lookup_and_goto_ptr();
3900 return nullify_end(ctx
, DISAS_NORETURN
);
3904 static const DisasInsn table_branch
[] = {
3905 { 0xe8000000u
, 0xfc006000u
, trans_bl
}, /* B,L and B,L,PUSH */
3906 { 0xe800a000u
, 0xfc00e000u
, trans_bl_long
},
3907 { 0xe8004000u
, 0xfc00fffdu
, trans_blr
},
3908 { 0xe800c000u
, 0xfc00fffdu
, trans_bv
},
3909 { 0xe800d000u
, 0xfc00dffcu
, trans_bve
},
3910 { 0xe8002000u
, 0xfc00e000u
, trans_b_gate
},
3913 static DisasJumpType
trans_fop_wew_0c(DisasContext
*ctx
, uint32_t insn
,
3914 const DisasInsn
*di
)
3916 unsigned rt
= extract32(insn
, 0, 5);
3917 unsigned ra
= extract32(insn
, 21, 5);
3918 return do_fop_wew(ctx
, rt
, ra
, di
->f
.wew
);
3921 static DisasJumpType
trans_fop_wew_0e(DisasContext
*ctx
, uint32_t insn
,
3922 const DisasInsn
*di
)
3924 unsigned rt
= assemble_rt64(insn
);
3925 unsigned ra
= assemble_ra64(insn
);
3926 return do_fop_wew(ctx
, rt
, ra
, di
->f
.wew
);
3929 static DisasJumpType
trans_fop_ded(DisasContext
*ctx
, uint32_t insn
,
3930 const DisasInsn
*di
)
3932 unsigned rt
= extract32(insn
, 0, 5);
3933 unsigned ra
= extract32(insn
, 21, 5);
3934 return do_fop_ded(ctx
, rt
, ra
, di
->f
.ded
);
3937 static DisasJumpType
trans_fop_wed_0c(DisasContext
*ctx
, uint32_t insn
,
3938 const DisasInsn
*di
)
3940 unsigned rt
= extract32(insn
, 0, 5);
3941 unsigned ra
= extract32(insn
, 21, 5);
3942 return do_fop_wed(ctx
, rt
, ra
, di
->f
.wed
);
3945 static DisasJumpType
trans_fop_wed_0e(DisasContext
*ctx
, uint32_t insn
,
3946 const DisasInsn
*di
)
3948 unsigned rt
= assemble_rt64(insn
);
3949 unsigned ra
= extract32(insn
, 21, 5);
3950 return do_fop_wed(ctx
, rt
, ra
, di
->f
.wed
);
3953 static DisasJumpType
trans_fop_dew_0c(DisasContext
*ctx
, uint32_t insn
,
3954 const DisasInsn
*di
)
3956 unsigned rt
= extract32(insn
, 0, 5);
3957 unsigned ra
= extract32(insn
, 21, 5);
3958 return do_fop_dew(ctx
, rt
, ra
, di
->f
.dew
);
3961 static DisasJumpType
trans_fop_dew_0e(DisasContext
*ctx
, uint32_t insn
,
3962 const DisasInsn
*di
)
3964 unsigned rt
= extract32(insn
, 0, 5);
3965 unsigned ra
= assemble_ra64(insn
);
3966 return do_fop_dew(ctx
, rt
, ra
, di
->f
.dew
);
3969 static DisasJumpType
trans_fop_weww_0c(DisasContext
*ctx
, uint32_t insn
,
3970 const DisasInsn
*di
)
3972 unsigned rt
= extract32(insn
, 0, 5);
3973 unsigned rb
= extract32(insn
, 16, 5);
3974 unsigned ra
= extract32(insn
, 21, 5);
3975 return do_fop_weww(ctx
, rt
, ra
, rb
, di
->f
.weww
);
3978 static DisasJumpType
trans_fop_weww_0e(DisasContext
*ctx
, uint32_t insn
,
3979 const DisasInsn
*di
)
3981 unsigned rt
= assemble_rt64(insn
);
3982 unsigned rb
= assemble_rb64(insn
);
3983 unsigned ra
= assemble_ra64(insn
);
3984 return do_fop_weww(ctx
, rt
, ra
, rb
, di
->f
.weww
);
3987 static DisasJumpType
trans_fop_dedd(DisasContext
*ctx
, uint32_t insn
,
3988 const DisasInsn
*di
)
3990 unsigned rt
= extract32(insn
, 0, 5);
3991 unsigned rb
= extract32(insn
, 16, 5);
3992 unsigned ra
= extract32(insn
, 21, 5);
3993 return do_fop_dedd(ctx
, rt
, ra
, rb
, di
->f
.dedd
);
3996 static void gen_fcpy_s(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3998 tcg_gen_mov_i32(dst
, src
);
4001 static void gen_fcpy_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
4003 tcg_gen_mov_i64(dst
, src
);
4006 static void gen_fabs_s(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
4008 tcg_gen_andi_i32(dst
, src
, INT32_MAX
);
4011 static void gen_fabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
4013 tcg_gen_andi_i64(dst
, src
, INT64_MAX
);
4016 static void gen_fneg_s(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
4018 tcg_gen_xori_i32(dst
, src
, INT32_MIN
);
4021 static void gen_fneg_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
4023 tcg_gen_xori_i64(dst
, src
, INT64_MIN
);
4026 static void gen_fnegabs_s(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
4028 tcg_gen_ori_i32(dst
, src
, INT32_MIN
);
4031 static void gen_fnegabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
4033 tcg_gen_ori_i64(dst
, src
, INT64_MIN
);
4036 static DisasJumpType
do_fcmp_s(DisasContext
*ctx
, unsigned ra
, unsigned rb
,
4037 unsigned y
, unsigned c
)
4039 TCGv_i32 ta
, tb
, tc
, ty
;
4043 ta
= load_frw0_i32(ra
);
4044 tb
= load_frw0_i32(rb
);
4045 ty
= tcg_const_i32(y
);
4046 tc
= tcg_const_i32(c
);
4048 gen_helper_fcmp_s(cpu_env
, ta
, tb
, ty
, tc
);
4050 tcg_temp_free_i32(ta
);
4051 tcg_temp_free_i32(tb
);
4052 tcg_temp_free_i32(ty
);
4053 tcg_temp_free_i32(tc
);
4055 return nullify_end(ctx
, DISAS_NEXT
);
4058 static DisasJumpType
trans_fcmp_s_0c(DisasContext
*ctx
, uint32_t insn
,
4059 const DisasInsn
*di
)
4061 unsigned c
= extract32(insn
, 0, 5);
4062 unsigned y
= extract32(insn
, 13, 3);
4063 unsigned rb
= extract32(insn
, 16, 5);
4064 unsigned ra
= extract32(insn
, 21, 5);
4065 return do_fcmp_s(ctx
, ra
, rb
, y
, c
);
4068 static DisasJumpType
trans_fcmp_s_0e(DisasContext
*ctx
, uint32_t insn
,
4069 const DisasInsn
*di
)
4071 unsigned c
= extract32(insn
, 0, 5);
4072 unsigned y
= extract32(insn
, 13, 3);
4073 unsigned rb
= assemble_rb64(insn
);
4074 unsigned ra
= assemble_ra64(insn
);
4075 return do_fcmp_s(ctx
, ra
, rb
, y
, c
);
4078 static DisasJumpType
trans_fcmp_d(DisasContext
*ctx
, uint32_t insn
,
4079 const DisasInsn
*di
)
4081 unsigned c
= extract32(insn
, 0, 5);
4082 unsigned y
= extract32(insn
, 13, 3);
4083 unsigned rb
= extract32(insn
, 16, 5);
4084 unsigned ra
= extract32(insn
, 21, 5);
4092 ty
= tcg_const_i32(y
);
4093 tc
= tcg_const_i32(c
);
4095 gen_helper_fcmp_d(cpu_env
, ta
, tb
, ty
, tc
);
4097 tcg_temp_free_i64(ta
);
4098 tcg_temp_free_i64(tb
);
4099 tcg_temp_free_i32(ty
);
4100 tcg_temp_free_i32(tc
);
4102 return nullify_end(ctx
, DISAS_NEXT
);
4105 static DisasJumpType
trans_ftest_t(DisasContext
*ctx
, uint32_t insn
,
4106 const DisasInsn
*di
)
4108 unsigned y
= extract32(insn
, 13, 3);
4109 unsigned cbit
= (y
^ 1) - 1;
4115 tcg_gen_ld32u_reg(t
, cpu_env
, offsetof(CPUHPPAState
, fr0_shadow
));
4116 tcg_gen_extract_reg(t
, t
, 21 - cbit
, 1);
4117 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
4120 return nullify_end(ctx
, DISAS_NEXT
);
4123 static DisasJumpType
trans_ftest_q(DisasContext
*ctx
, uint32_t insn
,
4124 const DisasInsn
*di
)
4126 unsigned c
= extract32(insn
, 0, 5);
4134 tcg_gen_ld32u_reg(t
, cpu_env
, offsetof(CPUHPPAState
, fr0_shadow
));
4137 case 0: /* simple */
4138 tcg_gen_andi_reg(t
, t
, 0x4000000);
4139 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
4163 return gen_illegal(ctx
);
4166 TCGv_reg c
= load_const(ctx
, mask
);
4167 tcg_gen_or_reg(t
, t
, c
);
4168 ctx
->null_cond
= cond_make(TCG_COND_EQ
, t
, c
);
4170 tcg_gen_andi_reg(t
, t
, mask
);
4171 ctx
->null_cond
= cond_make_0(TCG_COND_EQ
, t
);
4174 return nullify_end(ctx
, DISAS_NEXT
);
4177 static DisasJumpType
trans_xmpyu(DisasContext
*ctx
, uint32_t insn
,
4178 const DisasInsn
*di
)
4180 unsigned rt
= extract32(insn
, 0, 5);
4181 unsigned rb
= assemble_rb64(insn
);
4182 unsigned ra
= assemble_ra64(insn
);
4187 a
= load_frw0_i64(ra
);
4188 b
= load_frw0_i64(rb
);
4189 tcg_gen_mul_i64(a
, a
, b
);
4191 tcg_temp_free_i64(a
);
4192 tcg_temp_free_i64(b
);
4194 return nullify_end(ctx
, DISAS_NEXT
);
4197 #define FOP_DED trans_fop_ded, .f.ded
4198 #define FOP_DEDD trans_fop_dedd, .f.dedd
4200 #define FOP_WEW trans_fop_wew_0c, .f.wew
4201 #define FOP_DEW trans_fop_dew_0c, .f.dew
4202 #define FOP_WED trans_fop_wed_0c, .f.wed
4203 #define FOP_WEWW trans_fop_weww_0c, .f.weww
4205 static const DisasInsn table_float_0c
[] = {
4206 /* floating point class zero */
4207 { 0x30004000, 0xfc1fffe0, FOP_WEW
= gen_fcpy_s
},
4208 { 0x30006000, 0xfc1fffe0, FOP_WEW
= gen_fabs_s
},
4209 { 0x30008000, 0xfc1fffe0, FOP_WEW
= gen_helper_fsqrt_s
},
4210 { 0x3000a000, 0xfc1fffe0, FOP_WEW
= gen_helper_frnd_s
},
4211 { 0x3000c000, 0xfc1fffe0, FOP_WEW
= gen_fneg_s
},
4212 { 0x3000e000, 0xfc1fffe0, FOP_WEW
= gen_fnegabs_s
},
4214 { 0x30004800, 0xfc1fffe0, FOP_DED
= gen_fcpy_d
},
4215 { 0x30006800, 0xfc1fffe0, FOP_DED
= gen_fabs_d
},
4216 { 0x30008800, 0xfc1fffe0, FOP_DED
= gen_helper_fsqrt_d
},
4217 { 0x3000a800, 0xfc1fffe0, FOP_DED
= gen_helper_frnd_d
},
4218 { 0x3000c800, 0xfc1fffe0, FOP_DED
= gen_fneg_d
},
4219 { 0x3000e800, 0xfc1fffe0, FOP_DED
= gen_fnegabs_d
},
4221 /* floating point class three */
4222 { 0x30000600, 0xfc00ffe0, FOP_WEWW
= gen_helper_fadd_s
},
4223 { 0x30002600, 0xfc00ffe0, FOP_WEWW
= gen_helper_fsub_s
},
4224 { 0x30004600, 0xfc00ffe0, FOP_WEWW
= gen_helper_fmpy_s
},
4225 { 0x30006600, 0xfc00ffe0, FOP_WEWW
= gen_helper_fdiv_s
},
4227 { 0x30000e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fadd_d
},
4228 { 0x30002e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fsub_d
},
4229 { 0x30004e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fmpy_d
},
4230 { 0x30006e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fdiv_d
},
4232 /* floating point class one */
4234 { 0x30000a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_d_s
},
4235 { 0x30002200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_s_d
},
4237 { 0x30008200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_w_s
},
4238 { 0x30008a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_dw_s
},
4239 { 0x3000a200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_w_d
},
4240 { 0x3000aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_dw_d
},
4242 { 0x30010200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_s_w
},
4243 { 0x30010a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_d_w
},
4244 { 0x30012200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_s_dw
},
4245 { 0x30012a00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_d_dw
},
4246 /* float/int truncate */
4247 { 0x30018200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_t_s_w
},
4248 { 0x30018a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_t_d_w
},
4249 { 0x3001a200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_t_s_dw
},
4250 { 0x3001aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_t_d_dw
},
4252 { 0x30028200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_uw_s
},
4253 { 0x30028a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_udw_s
},
4254 { 0x3002a200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_uw_d
},
4255 { 0x3002aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_udw_d
},
4257 { 0x30030200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_s_uw
},
4258 { 0x30030a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_d_uw
},
4259 { 0x30032200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_s_udw
},
4260 { 0x30032a00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_d_udw
},
4261 /* float/uint truncate */
4262 { 0x30038200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_t_s_uw
},
4263 { 0x30038a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_t_d_uw
},
4264 { 0x3003a200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_t_s_udw
},
4265 { 0x3003aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_t_d_udw
},
4267 /* floating point class two */
4268 { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c
},
4269 { 0x30000c00, 0xfc001fe0, trans_fcmp_d
},
4270 { 0x30002420, 0xffffffe0, trans_ftest_q
},
4271 { 0x30000420, 0xffff1fff, trans_ftest_t
},
4273 /* FID. Note that ra == rt == 0, which via fcpy puts 0 into fr0.
4274 This is machine/revision == 0, which is reserved for simulator. */
4275 { 0x30000000, 0xffffffff, FOP_WEW
= gen_fcpy_s
},
4282 #define FOP_WEW trans_fop_wew_0e, .f.wew
4283 #define FOP_DEW trans_fop_dew_0e, .f.dew
4284 #define FOP_WED trans_fop_wed_0e, .f.wed
4285 #define FOP_WEWW trans_fop_weww_0e, .f.weww
4287 static const DisasInsn table_float_0e
[] = {
4288 /* floating point class zero */
4289 { 0x38004000, 0xfc1fff20, FOP_WEW
= gen_fcpy_s
},
4290 { 0x38006000, 0xfc1fff20, FOP_WEW
= gen_fabs_s
},
4291 { 0x38008000, 0xfc1fff20, FOP_WEW
= gen_helper_fsqrt_s
},
4292 { 0x3800a000, 0xfc1fff20, FOP_WEW
= gen_helper_frnd_s
},
4293 { 0x3800c000, 0xfc1fff20, FOP_WEW
= gen_fneg_s
},
4294 { 0x3800e000, 0xfc1fff20, FOP_WEW
= gen_fnegabs_s
},
4296 { 0x38004800, 0xfc1fffe0, FOP_DED
= gen_fcpy_d
},
4297 { 0x38006800, 0xfc1fffe0, FOP_DED
= gen_fabs_d
},
4298 { 0x38008800, 0xfc1fffe0, FOP_DED
= gen_helper_fsqrt_d
},
4299 { 0x3800a800, 0xfc1fffe0, FOP_DED
= gen_helper_frnd_d
},
4300 { 0x3800c800, 0xfc1fffe0, FOP_DED
= gen_fneg_d
},
4301 { 0x3800e800, 0xfc1fffe0, FOP_DED
= gen_fnegabs_d
},
4303 /* floating point class three */
4304 { 0x38000600, 0xfc00ef20, FOP_WEWW
= gen_helper_fadd_s
},
4305 { 0x38002600, 0xfc00ef20, FOP_WEWW
= gen_helper_fsub_s
},
4306 { 0x38004600, 0xfc00ef20, FOP_WEWW
= gen_helper_fmpy_s
},
4307 { 0x38006600, 0xfc00ef20, FOP_WEWW
= gen_helper_fdiv_s
},
4309 { 0x38000e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fadd_d
},
4310 { 0x38002e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fsub_d
},
4311 { 0x38004e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fmpy_d
},
4312 { 0x38006e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fdiv_d
},
4314 { 0x38004700, 0xfc00ef60, trans_xmpyu
},
4316 /* floating point class one */
4318 { 0x38000a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_d_s
},
4319 { 0x38002200, 0xfc1fffc0, FOP_DEW
= gen_helper_fcnv_s_d
},
4321 { 0x38008200, 0xfc1ffe60, FOP_WEW
= gen_helper_fcnv_w_s
},
4322 { 0x38008a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_dw_s
},
4323 { 0x3800a200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_w_d
},
4324 { 0x3800aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_dw_d
},
4326 { 0x38010200, 0xfc1ffe60, FOP_WEW
= gen_helper_fcnv_s_w
},
4327 { 0x38010a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_d_w
},
4328 { 0x38012200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_s_dw
},
4329 { 0x38012a00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_d_dw
},
4330 /* float/int truncate */
4331 { 0x38018200, 0xfc1ffe60, FOP_WEW
= gen_helper_fcnv_t_s_w
},
4332 { 0x38018a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_t_d_w
},
4333 { 0x3801a200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_t_s_dw
},
4334 { 0x3801aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_t_d_dw
},
4336 { 0x38028200, 0xfc1ffe60, FOP_WEW
= gen_helper_fcnv_uw_s
},
4337 { 0x38028a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_udw_s
},
4338 { 0x3802a200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_uw_d
},
4339 { 0x3802aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_udw_d
},
4341 { 0x38030200, 0xfc1ffe60, FOP_WEW
= gen_helper_fcnv_s_uw
},
4342 { 0x38030a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_d_uw
},
4343 { 0x38032200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_s_udw
},
4344 { 0x38032a00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_d_udw
},
4345 /* float/uint truncate */
4346 { 0x38038200, 0xfc1ffe60, FOP_WEW
= gen_helper_fcnv_t_s_uw
},
4347 { 0x38038a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_t_d_uw
},
4348 { 0x3803a200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_t_s_udw
},
4349 { 0x3803aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_t_d_udw
},
4351 /* floating point class two */
4352 { 0x38000400, 0xfc000f60, trans_fcmp_s_0e
},
4353 { 0x38000c00, 0xfc001fe0, trans_fcmp_d
},
4363 /* Convert the fmpyadd single-precision register encodings to standard. */
4364 static inline int fmpyadd_s_reg(unsigned r
)
4366 return (r
& 16) * 2 + 16 + (r
& 15);
4369 static DisasJumpType
trans_fmpyadd(DisasContext
*ctx
,
4370 uint32_t insn
, bool is_sub
)
4372 unsigned tm
= extract32(insn
, 0, 5);
4373 unsigned f
= extract32(insn
, 5, 1);
4374 unsigned ra
= extract32(insn
, 6, 5);
4375 unsigned ta
= extract32(insn
, 11, 5);
4376 unsigned rm2
= extract32(insn
, 16, 5);
4377 unsigned rm1
= extract32(insn
, 21, 5);
4381 /* Independent multiply & add/sub, with undefined behaviour
4382 if outputs overlap inputs. */
4384 tm
= fmpyadd_s_reg(tm
);
4385 ra
= fmpyadd_s_reg(ra
);
4386 ta
= fmpyadd_s_reg(ta
);
4387 rm2
= fmpyadd_s_reg(rm2
);
4388 rm1
= fmpyadd_s_reg(rm1
);
4389 do_fop_weww(ctx
, tm
, rm1
, rm2
, gen_helper_fmpy_s
);
4390 do_fop_weww(ctx
, ta
, ta
, ra
,
4391 is_sub
? gen_helper_fsub_s
: gen_helper_fadd_s
);
4393 do_fop_dedd(ctx
, tm
, rm1
, rm2
, gen_helper_fmpy_d
);
4394 do_fop_dedd(ctx
, ta
, ta
, ra
,
4395 is_sub
? gen_helper_fsub_d
: gen_helper_fadd_d
);
4398 return nullify_end(ctx
, DISAS_NEXT
);
4401 static DisasJumpType
trans_fmpyfadd_s(DisasContext
*ctx
, uint32_t insn
,
4402 const DisasInsn
*di
)
4404 unsigned rt
= assemble_rt64(insn
);
4405 unsigned neg
= extract32(insn
, 5, 1);
4406 unsigned rm1
= assemble_ra64(insn
);
4407 unsigned rm2
= assemble_rb64(insn
);
4408 unsigned ra3
= assemble_rc64(insn
);
4412 a
= load_frw0_i32(rm1
);
4413 b
= load_frw0_i32(rm2
);
4414 c
= load_frw0_i32(ra3
);
4417 gen_helper_fmpynfadd_s(a
, cpu_env
, a
, b
, c
);
4419 gen_helper_fmpyfadd_s(a
, cpu_env
, a
, b
, c
);
4422 tcg_temp_free_i32(b
);
4423 tcg_temp_free_i32(c
);
4424 save_frw_i32(rt
, a
);
4425 tcg_temp_free_i32(a
);
4426 return nullify_end(ctx
, DISAS_NEXT
);
4429 static DisasJumpType
trans_fmpyfadd_d(DisasContext
*ctx
, uint32_t insn
,
4430 const DisasInsn
*di
)
4432 unsigned rt
= extract32(insn
, 0, 5);
4433 unsigned neg
= extract32(insn
, 5, 1);
4434 unsigned rm1
= extract32(insn
, 21, 5);
4435 unsigned rm2
= extract32(insn
, 16, 5);
4436 unsigned ra3
= assemble_rc64(insn
);
4445 gen_helper_fmpynfadd_d(a
, cpu_env
, a
, b
, c
);
4447 gen_helper_fmpyfadd_d(a
, cpu_env
, a
, b
, c
);
4450 tcg_temp_free_i64(b
);
4451 tcg_temp_free_i64(c
);
4453 tcg_temp_free_i64(a
);
4454 return nullify_end(ctx
, DISAS_NEXT
);
4457 static const DisasInsn table_fp_fused
[] = {
4458 { 0xb8000000u
, 0xfc000800u
, trans_fmpyfadd_s
},
4459 { 0xb8000800u
, 0xfc0019c0u
, trans_fmpyfadd_d
}
4462 static DisasJumpType
translate_table_int(DisasContext
*ctx
, uint32_t insn
,
4463 const DisasInsn table
[], size_t n
)
4466 for (i
= 0; i
< n
; ++i
) {
4467 if ((insn
& table
[i
].mask
) == table
[i
].insn
) {
4468 return table
[i
].trans(ctx
, insn
, &table
[i
]);
4471 qemu_log_mask(LOG_UNIMP
, "UNIMP insn %08x @ " TARGET_FMT_lx
"\n",
4472 insn
, ctx
->base
.pc_next
);
4473 return gen_illegal(ctx
);
4476 #define translate_table(ctx, insn, table) \
4477 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
4479 static DisasJumpType
translate_one(DisasContext
*ctx
, uint32_t insn
)
4481 uint32_t opc
= extract32(insn
, 26, 6);
4484 case 0x00: /* system op */
4485 return translate_table(ctx
, insn
, table_system
);
4487 return translate_table(ctx
, insn
, table_mem_mgmt
);
4489 return translate_table(ctx
, insn
, table_arith_log
);
4491 return translate_table(ctx
, insn
, table_index_mem
);
4493 return trans_fmpyadd(ctx
, insn
, false);
4495 return trans_ldil(ctx
, insn
);
4497 return trans_copr_w(ctx
, insn
);
4499 return trans_addil(ctx
, insn
);
4501 return trans_copr_dw(ctx
, insn
);
4503 return translate_table(ctx
, insn
, table_float_0c
);
4505 return trans_ldo(ctx
, insn
);
4507 return translate_table(ctx
, insn
, table_float_0e
);
4510 return trans_load(ctx
, insn
, false, MO_UB
);
4512 return trans_load(ctx
, insn
, false, MO_TEUW
);
4514 return trans_load(ctx
, insn
, false, MO_TEUL
);
4516 return trans_load(ctx
, insn
, true, MO_TEUL
);
4518 return trans_fload_mod(ctx
, insn
);
4520 return trans_load_w(ctx
, insn
);
4522 return trans_store(ctx
, insn
, false, MO_UB
);
4524 return trans_store(ctx
, insn
, false, MO_TEUW
);
4526 return trans_store(ctx
, insn
, false, MO_TEUL
);
4528 return trans_store(ctx
, insn
, true, MO_TEUL
);
4530 return trans_fstore_mod(ctx
, insn
);
4532 return trans_store_w(ctx
, insn
);
4535 return trans_cmpb(ctx
, insn
, true, false, false);
4537 return trans_cmpb(ctx
, insn
, true, true, false);
4539 return trans_cmpb(ctx
, insn
, false, false, false);
4541 return trans_cmpb(ctx
, insn
, false, true, false);
4543 return trans_cmpiclr(ctx
, insn
);
4545 return trans_subi(ctx
, insn
);
4547 return trans_fmpyadd(ctx
, insn
, true);
4549 return trans_cmpb(ctx
, insn
, true, false, true);
4551 return trans_addb(ctx
, insn
, true, false);
4553 return trans_addb(ctx
, insn
, true, true);
4555 return trans_addb(ctx
, insn
, false, false);
4557 return trans_addb(ctx
, insn
, false, true);
4560 return trans_addi(ctx
, insn
);
4562 return translate_table(ctx
, insn
, table_fp_fused
);
4564 return trans_cmpb(ctx
, insn
, false, false, true);
4568 return trans_bb(ctx
, insn
);
4570 return trans_movb(ctx
, insn
, false);
4572 return trans_movb(ctx
, insn
, true);
4574 return translate_table(ctx
, insn
, table_sh_ex
);
4576 return translate_table(ctx
, insn
, table_depw
);
4578 return trans_be(ctx
, insn
, false);
4580 return trans_be(ctx
, insn
, true);
4582 return translate_table(ctx
, insn
, table_branch
);
4584 case 0x04: /* spopn */
4585 case 0x05: /* diag */
4586 case 0x0F: /* product specific */
4589 case 0x07: /* unassigned */
4590 case 0x15: /* unassigned */
4591 case 0x1D: /* unassigned */
4592 case 0x37: /* unassigned */
4595 #ifndef CONFIG_USER_ONLY
4596 /* Unassigned, but use as system-halt. */
4597 if (insn
== 0xfffdead0) {
4598 return gen_hlt(ctx
, 0); /* halt system */
4600 if (insn
== 0xfffdead1) {
4601 return gen_hlt(ctx
, 1); /* reset system */
4608 return gen_illegal(ctx
);
4611 static int hppa_tr_init_disas_context(DisasContextBase
*dcbase
,
4612 CPUState
*cs
, int max_insns
)
4614 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4618 ctx
->tb_flags
= ctx
->base
.tb
->flags
;
4620 #ifdef CONFIG_USER_ONLY
4621 ctx
->privilege
= MMU_USER_IDX
;
4622 ctx
->mmu_idx
= MMU_USER_IDX
;
4623 ctx
->iaoq_f
= ctx
->base
.pc_first
;
4624 ctx
->iaoq_b
= ctx
->base
.tb
->cs_base
;
4626 ctx
->privilege
= (ctx
->tb_flags
>> TB_FLAG_PRIV_SHIFT
) & 3;
4627 ctx
->mmu_idx
= (ctx
->tb_flags
& PSW_D
? ctx
->privilege
: MMU_PHYS_IDX
);
4629 /* Recover the IAOQ values from the GVA + PRIV. */
4630 uint64_t cs_base
= ctx
->base
.tb
->cs_base
;
4631 uint64_t iasq_f
= cs_base
& ~0xffffffffull
;
4632 int32_t diff
= cs_base
;
4634 ctx
->iaoq_f
= (ctx
->base
.pc_first
& ~iasq_f
) + ctx
->privilege
;
4635 ctx
->iaoq_b
= (diff
? ctx
->iaoq_f
+ diff
: -1);
4638 ctx
->iaoq_n_var
= NULL
;
4640 /* Bound the number of instructions by those left on the page. */
4641 bound
= -(ctx
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
4642 bound
= MIN(max_insns
, bound
);
4646 memset(ctx
->tempr
, 0, sizeof(ctx
->tempr
));
4647 memset(ctx
->templ
, 0, sizeof(ctx
->templ
));
4652 static void hppa_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cs
)
4654 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4656 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4657 ctx
->null_cond
= cond_make_f();
4658 ctx
->psw_n_nonzero
= false;
4659 if (ctx
->tb_flags
& PSW_N
) {
4660 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
4661 ctx
->psw_n_nonzero
= true;
4663 ctx
->null_lab
= NULL
;
4666 static void hppa_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
4668 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4670 tcg_gen_insn_start(ctx
->iaoq_f
, ctx
->iaoq_b
);
4673 static bool hppa_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cs
,
4674 const CPUBreakpoint
*bp
)
4676 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4678 ctx
->base
.is_jmp
= gen_excp(ctx
, EXCP_DEBUG
);
4679 ctx
->base
.pc_next
+= 4;
4683 static void hppa_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
4685 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4686 CPUHPPAState
*env
= cs
->env_ptr
;
4690 /* Execute one insn. */
4691 #ifdef CONFIG_USER_ONLY
4692 if (ctx
->base
.pc_next
< TARGET_PAGE_SIZE
) {
4693 ret
= do_page_zero(ctx
);
4694 assert(ret
!= DISAS_NEXT
);
4698 /* Always fetch the insn, even if nullified, so that we check
4699 the page permissions for execute. */
4700 uint32_t insn
= cpu_ldl_code(env
, ctx
->base
.pc_next
);
4702 /* Set up the IA queue for the next insn.
4703 This will be overwritten by a branch. */
4704 if (ctx
->iaoq_b
== -1) {
4706 ctx
->iaoq_n_var
= get_temp(ctx
);
4707 tcg_gen_addi_reg(ctx
->iaoq_n_var
, cpu_iaoq_b
, 4);
4709 ctx
->iaoq_n
= ctx
->iaoq_b
+ 4;
4710 ctx
->iaoq_n_var
= NULL
;
4713 if (unlikely(ctx
->null_cond
.c
== TCG_COND_ALWAYS
)) {
4714 ctx
->null_cond
.c
= TCG_COND_NEVER
;
4718 ret
= translate_one(ctx
, insn
);
4719 assert(ctx
->null_lab
== NULL
);
4723 /* Free any temporaries allocated. */
4724 for (i
= 0, n
= ctx
->ntempr
; i
< n
; ++i
) {
4725 tcg_temp_free(ctx
->tempr
[i
]);
4726 ctx
->tempr
[i
] = NULL
;
4728 for (i
= 0, n
= ctx
->ntempl
; i
< n
; ++i
) {
4729 tcg_temp_free_tl(ctx
->templ
[i
]);
4730 ctx
->templ
[i
] = NULL
;
4735 /* Advance the insn queue. Note that this check also detects
4736 a priority change within the instruction queue. */
4737 if (ret
== DISAS_NEXT
&& ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
4738 if (ctx
->iaoq_b
!= -1 && ctx
->iaoq_n
!= -1
4739 && use_goto_tb(ctx
, ctx
->iaoq_b
)
4740 && (ctx
->null_cond
.c
== TCG_COND_NEVER
4741 || ctx
->null_cond
.c
== TCG_COND_ALWAYS
)) {
4742 nullify_set(ctx
, ctx
->null_cond
.c
== TCG_COND_ALWAYS
);
4743 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, ctx
->iaoq_n
);
4744 ret
= DISAS_NORETURN
;
4746 ret
= DISAS_IAQ_N_STALE
;
4749 ctx
->iaoq_f
= ctx
->iaoq_b
;
4750 ctx
->iaoq_b
= ctx
->iaoq_n
;
4751 ctx
->base
.is_jmp
= ret
;
4752 ctx
->base
.pc_next
+= 4;
4754 if (ret
== DISAS_NORETURN
|| ret
== DISAS_IAQ_N_UPDATED
) {
4757 if (ctx
->iaoq_f
== -1) {
4758 tcg_gen_mov_reg(cpu_iaoq_f
, cpu_iaoq_b
);
4759 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
4760 #ifndef CONFIG_USER_ONLY
4761 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
4764 ctx
->base
.is_jmp
= DISAS_IAQ_N_UPDATED
;
4765 } else if (ctx
->iaoq_b
== -1) {
4766 tcg_gen_mov_reg(cpu_iaoq_b
, ctx
->iaoq_n_var
);
4770 static void hppa_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
4772 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4773 DisasJumpType is_jmp
= ctx
->base
.is_jmp
;
4776 case DISAS_NORETURN
:
4778 case DISAS_TOO_MANY
:
4779 case DISAS_IAQ_N_STALE
:
4780 case DISAS_IAQ_N_STALE_EXIT
:
4781 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
4782 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
4785 case DISAS_IAQ_N_UPDATED
:
4786 if (ctx
->base
.singlestep_enabled
) {
4787 gen_excp_1(EXCP_DEBUG
);
4788 } else if (is_jmp
== DISAS_IAQ_N_STALE_EXIT
) {
4791 tcg_gen_lookup_and_goto_ptr();
4795 g_assert_not_reached();
4799 static void hppa_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cs
)
4801 target_ulong pc
= dcbase
->pc_first
;
4803 #ifdef CONFIG_USER_ONLY
4806 qemu_log("IN:\n0x00000000: (null)\n");
4809 qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
4812 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
4815 qemu_log("IN:\n0x00000100: syscall\n");
4820 qemu_log("IN: %s\n", lookup_symbol(pc
));
4821 log_target_disas(cs
, pc
, dcbase
->tb
->size
);
4824 static const TranslatorOps hppa_tr_ops
= {
4825 .init_disas_context
= hppa_tr_init_disas_context
,
4826 .tb_start
= hppa_tr_tb_start
,
4827 .insn_start
= hppa_tr_insn_start
,
4828 .breakpoint_check
= hppa_tr_breakpoint_check
,
4829 .translate_insn
= hppa_tr_translate_insn
,
4830 .tb_stop
= hppa_tr_tb_stop
,
4831 .disas_log
= hppa_tr_disas_log
,
4834 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
4838 translator_loop(&hppa_tr_ops
, &ctx
.base
, cs
, tb
);
4841 void restore_state_to_opc(CPUHPPAState
*env
, TranslationBlock
*tb
,
4844 env
->iaoq_f
= data
[0];
4845 if (data
[1] != (target_ureg
)-1) {
4846 env
->iaoq_b
= data
[1];
4848 /* Since we were executing the instruction at IAOQ_F, and took some
4849 sort of action that provoked the cpu_restore_state, we can infer
4850 that the instruction was not nullified. */