target/arm: Deliver BKPT/BRK exceptions to correct exception level
[qemu/ar7.git] / target / ppc / translate / fp-impl.inc.c
blob9dcff947c0a615c911bcdfc886fdf45f188aad0c
1 /*
2 * translate-fp.c
4 * Standard FPU translation
5 */
7 static inline void gen_reset_fpstatus(void)
9 gen_helper_reset_fpstatus(cpu_env);
12 static inline void gen_compute_fprf_float64(TCGv_i64 arg)
14 gen_helper_compute_fprf_float64(cpu_env, arg);
15 gen_helper_float_check_status(cpu_env);
18 #if defined(TARGET_PPC64)
19 static void gen_set_cr1_from_fpscr(DisasContext *ctx)
21 TCGv_i32 tmp = tcg_temp_new_i32();
22 tcg_gen_trunc_tl_i32(tmp, cpu_fpscr);
23 tcg_gen_shri_i32(cpu_crf[1], tmp, 28);
24 tcg_temp_free_i32(tmp);
26 #else
27 static void gen_set_cr1_from_fpscr(DisasContext *ctx)
29 tcg_gen_shri_tl(cpu_crf[1], cpu_fpscr, 28);
31 #endif
33 /*** Floating-Point arithmetic ***/
34 #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
35 static void gen_f##name(DisasContext *ctx) \
36 { \
37 TCGv_i64 t0; \
38 TCGv_i64 t1; \
39 TCGv_i64 t2; \
40 TCGv_i64 t3; \
41 if (unlikely(!ctx->fpu_enabled)) { \
42 gen_exception(ctx, POWERPC_EXCP_FPU); \
43 return; \
44 } \
45 t0 = tcg_temp_new_i64(); \
46 t1 = tcg_temp_new_i64(); \
47 t2 = tcg_temp_new_i64(); \
48 t3 = tcg_temp_new_i64(); \
49 gen_reset_fpstatus(); \
50 get_fpr(t0, rA(ctx->opcode)); \
51 get_fpr(t1, rC(ctx->opcode)); \
52 get_fpr(t2, rB(ctx->opcode)); \
53 gen_helper_f##op(t3, cpu_env, t0, t1, t2); \
54 if (isfloat) { \
55 gen_helper_frsp(t3, cpu_env, t3); \
56 } \
57 set_fpr(rD(ctx->opcode), t3); \
58 if (set_fprf) { \
59 gen_compute_fprf_float64(t3); \
60 } \
61 if (unlikely(Rc(ctx->opcode) != 0)) { \
62 gen_set_cr1_from_fpscr(ctx); \
63 } \
64 tcg_temp_free_i64(t0); \
65 tcg_temp_free_i64(t1); \
66 tcg_temp_free_i64(t2); \
67 tcg_temp_free_i64(t3); \
70 #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
71 _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
72 _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
74 #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
75 static void gen_f##name(DisasContext *ctx) \
76 { \
77 TCGv_i64 t0; \
78 TCGv_i64 t1; \
79 TCGv_i64 t2; \
80 if (unlikely(!ctx->fpu_enabled)) { \
81 gen_exception(ctx, POWERPC_EXCP_FPU); \
82 return; \
83 } \
84 t0 = tcg_temp_new_i64(); \
85 t1 = tcg_temp_new_i64(); \
86 t2 = tcg_temp_new_i64(); \
87 gen_reset_fpstatus(); \
88 get_fpr(t0, rA(ctx->opcode)); \
89 get_fpr(t1, rB(ctx->opcode)); \
90 gen_helper_f##op(t2, cpu_env, t0, t1); \
91 if (isfloat) { \
92 gen_helper_frsp(t2, cpu_env, t2); \
93 } \
94 set_fpr(rD(ctx->opcode), t2); \
95 if (set_fprf) { \
96 gen_compute_fprf_float64(t2); \
97 } \
98 if (unlikely(Rc(ctx->opcode) != 0)) { \
99 gen_set_cr1_from_fpscr(ctx); \
101 tcg_temp_free_i64(t0); \
102 tcg_temp_free_i64(t1); \
103 tcg_temp_free_i64(t2); \
105 #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
106 _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
107 _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
109 #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
110 static void gen_f##name(DisasContext *ctx) \
112 TCGv_i64 t0; \
113 TCGv_i64 t1; \
114 TCGv_i64 t2; \
115 if (unlikely(!ctx->fpu_enabled)) { \
116 gen_exception(ctx, POWERPC_EXCP_FPU); \
117 return; \
119 t0 = tcg_temp_new_i64(); \
120 t1 = tcg_temp_new_i64(); \
121 t2 = tcg_temp_new_i64(); \
122 gen_reset_fpstatus(); \
123 get_fpr(t0, rA(ctx->opcode)); \
124 get_fpr(t1, rC(ctx->opcode)); \
125 gen_helper_f##op(t2, cpu_env, t0, t1); \
126 if (isfloat) { \
127 gen_helper_frsp(t2, cpu_env, t2); \
129 set_fpr(rD(ctx->opcode), t2); \
130 if (set_fprf) { \
131 gen_compute_fprf_float64(t2); \
133 if (unlikely(Rc(ctx->opcode) != 0)) { \
134 gen_set_cr1_from_fpscr(ctx); \
136 tcg_temp_free_i64(t0); \
137 tcg_temp_free_i64(t1); \
138 tcg_temp_free_i64(t2); \
140 #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
141 _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
142 _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
144 #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
145 static void gen_f##name(DisasContext *ctx) \
147 TCGv_i64 t0; \
148 TCGv_i64 t1; \
149 if (unlikely(!ctx->fpu_enabled)) { \
150 gen_exception(ctx, POWERPC_EXCP_FPU); \
151 return; \
153 t0 = tcg_temp_new_i64(); \
154 t1 = tcg_temp_new_i64(); \
155 gen_reset_fpstatus(); \
156 get_fpr(t0, rB(ctx->opcode)); \
157 gen_helper_f##name(t1, cpu_env, t0); \
158 set_fpr(rD(ctx->opcode), t1); \
159 if (set_fprf) { \
160 gen_compute_fprf_float64(t1); \
162 if (unlikely(Rc(ctx->opcode) != 0)) { \
163 gen_set_cr1_from_fpscr(ctx); \
165 tcg_temp_free_i64(t0); \
166 tcg_temp_free_i64(t1); \
169 #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
170 static void gen_f##name(DisasContext *ctx) \
172 TCGv_i64 t0; \
173 TCGv_i64 t1; \
174 if (unlikely(!ctx->fpu_enabled)) { \
175 gen_exception(ctx, POWERPC_EXCP_FPU); \
176 return; \
178 t0 = tcg_temp_new_i64(); \
179 t1 = tcg_temp_new_i64(); \
180 gen_reset_fpstatus(); \
181 get_fpr(t0, rB(ctx->opcode)); \
182 gen_helper_f##name(t1, cpu_env, t0); \
183 set_fpr(rD(ctx->opcode), t1); \
184 if (set_fprf) { \
185 gen_compute_fprf_float64(t1); \
187 if (unlikely(Rc(ctx->opcode) != 0)) { \
188 gen_set_cr1_from_fpscr(ctx); \
190 tcg_temp_free_i64(t0); \
191 tcg_temp_free_i64(t1); \
194 /* fadd - fadds */
195 GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT);
196 /* fdiv - fdivs */
197 GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT);
198 /* fmul - fmuls */
199 GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT);
201 /* fre */
202 GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT);
204 /* fres */
205 GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES);
207 /* frsqrte */
208 GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE);
210 /* frsqrtes */
211 static void gen_frsqrtes(DisasContext *ctx)
213 TCGv_i64 t0;
214 TCGv_i64 t1;
215 if (unlikely(!ctx->fpu_enabled)) {
216 gen_exception(ctx, POWERPC_EXCP_FPU);
217 return;
219 t0 = tcg_temp_new_i64();
220 t1 = tcg_temp_new_i64();
221 gen_reset_fpstatus();
222 get_fpr(t0, rB(ctx->opcode));
223 gen_helper_frsqrte(t1, cpu_env, t0);
224 gen_helper_frsp(t1, cpu_env, t1);
225 set_fpr(rD(ctx->opcode), t1);
226 gen_compute_fprf_float64(t1);
227 if (unlikely(Rc(ctx->opcode) != 0)) {
228 gen_set_cr1_from_fpscr(ctx);
230 tcg_temp_free_i64(t0);
231 tcg_temp_free_i64(t1);
234 /* fsel */
235 _GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
236 /* fsub - fsubs */
237 GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
238 /* Optional: */
240 /* fsqrt */
241 static void gen_fsqrt(DisasContext *ctx)
243 TCGv_i64 t0;
244 TCGv_i64 t1;
245 if (unlikely(!ctx->fpu_enabled)) {
246 gen_exception(ctx, POWERPC_EXCP_FPU);
247 return;
249 t0 = tcg_temp_new_i64();
250 t1 = tcg_temp_new_i64();
251 gen_reset_fpstatus();
252 get_fpr(t0, rB(ctx->opcode));
253 gen_helper_fsqrt(t1, cpu_env, t0);
254 set_fpr(rD(ctx->opcode), t1);
255 gen_compute_fprf_float64(t1);
256 if (unlikely(Rc(ctx->opcode) != 0)) {
257 gen_set_cr1_from_fpscr(ctx);
259 tcg_temp_free_i64(t0);
260 tcg_temp_free_i64(t1);
263 static void gen_fsqrts(DisasContext *ctx)
265 TCGv_i64 t0;
266 TCGv_i64 t1;
267 if (unlikely(!ctx->fpu_enabled)) {
268 gen_exception(ctx, POWERPC_EXCP_FPU);
269 return;
271 t0 = tcg_temp_new_i64();
272 t1 = tcg_temp_new_i64();
273 gen_reset_fpstatus();
274 get_fpr(t0, rB(ctx->opcode));
275 gen_helper_fsqrt(t1, cpu_env, t0);
276 gen_helper_frsp(t1, cpu_env, t1);
277 set_fpr(rD(ctx->opcode), t1);
278 gen_compute_fprf_float64(t1);
279 if (unlikely(Rc(ctx->opcode) != 0)) {
280 gen_set_cr1_from_fpscr(ctx);
282 tcg_temp_free_i64(t0);
283 tcg_temp_free_i64(t1);
286 /*** Floating-Point multiply-and-add ***/
287 /* fmadd - fmadds */
288 GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
289 /* fmsub - fmsubs */
290 GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT);
291 /* fnmadd - fnmadds */
292 GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT);
293 /* fnmsub - fnmsubs */
294 GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT);
296 /*** Floating-Point round & convert ***/
297 /* fctiw */
298 GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT);
299 /* fctiwu */
300 GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206);
301 /* fctiwz */
302 GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT);
303 /* fctiwuz */
304 GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206);
305 /* frsp */
306 GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT);
307 /* fcfid */
308 GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64);
309 /* fcfids */
310 GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206);
311 /* fcfidu */
312 GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
313 /* fcfidus */
314 GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
315 /* fctid */
316 GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64);
317 /* fctidu */
318 GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206);
319 /* fctidz */
320 GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64);
321 /* fctidu */
322 GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206);
324 /* frin */
325 GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT);
326 /* friz */
327 GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT);
328 /* frip */
329 GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT);
330 /* frim */
331 GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT);
333 static void gen_ftdiv(DisasContext *ctx)
335 TCGv_i64 t0;
336 TCGv_i64 t1;
337 if (unlikely(!ctx->fpu_enabled)) {
338 gen_exception(ctx, POWERPC_EXCP_FPU);
339 return;
341 t0 = tcg_temp_new_i64();
342 t1 = tcg_temp_new_i64();
343 get_fpr(t0, rA(ctx->opcode));
344 get_fpr(t1, rB(ctx->opcode));
345 gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], t0, t1);
346 tcg_temp_free_i64(t0);
347 tcg_temp_free_i64(t1);
350 static void gen_ftsqrt(DisasContext *ctx)
352 TCGv_i64 t0;
353 if (unlikely(!ctx->fpu_enabled)) {
354 gen_exception(ctx, POWERPC_EXCP_FPU);
355 return;
357 t0 = tcg_temp_new_i64();
358 get_fpr(t0, rB(ctx->opcode));
359 gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], t0);
360 tcg_temp_free_i64(t0);
365 /*** Floating-Point compare ***/
367 /* fcmpo */
368 static void gen_fcmpo(DisasContext *ctx)
370 TCGv_i32 crf;
371 TCGv_i64 t0;
372 TCGv_i64 t1;
373 if (unlikely(!ctx->fpu_enabled)) {
374 gen_exception(ctx, POWERPC_EXCP_FPU);
375 return;
377 t0 = tcg_temp_new_i64();
378 t1 = tcg_temp_new_i64();
379 gen_reset_fpstatus();
380 crf = tcg_const_i32(crfD(ctx->opcode));
381 get_fpr(t0, rA(ctx->opcode));
382 get_fpr(t1, rB(ctx->opcode));
383 gen_helper_fcmpo(cpu_env, t0, t1, crf);
384 tcg_temp_free_i32(crf);
385 gen_helper_float_check_status(cpu_env);
386 tcg_temp_free_i64(t0);
387 tcg_temp_free_i64(t1);
390 /* fcmpu */
391 static void gen_fcmpu(DisasContext *ctx)
393 TCGv_i32 crf;
394 TCGv_i64 t0;
395 TCGv_i64 t1;
396 if (unlikely(!ctx->fpu_enabled)) {
397 gen_exception(ctx, POWERPC_EXCP_FPU);
398 return;
400 t0 = tcg_temp_new_i64();
401 t1 = tcg_temp_new_i64();
402 gen_reset_fpstatus();
403 crf = tcg_const_i32(crfD(ctx->opcode));
404 get_fpr(t0, rA(ctx->opcode));
405 get_fpr(t1, rB(ctx->opcode));
406 gen_helper_fcmpu(cpu_env, t0, t1, crf);
407 tcg_temp_free_i32(crf);
408 gen_helper_float_check_status(cpu_env);
409 tcg_temp_free_i64(t0);
410 tcg_temp_free_i64(t1);
413 /*** Floating-point move ***/
414 /* fabs */
415 /* XXX: beware that fabs never checks for NaNs nor update FPSCR */
416 static void gen_fabs(DisasContext *ctx)
418 TCGv_i64 t0;
419 TCGv_i64 t1;
420 if (unlikely(!ctx->fpu_enabled)) {
421 gen_exception(ctx, POWERPC_EXCP_FPU);
422 return;
424 t0 = tcg_temp_new_i64();
425 t1 = tcg_temp_new_i64();
426 get_fpr(t0, rB(ctx->opcode));
427 tcg_gen_andi_i64(t1, t0, ~(1ULL << 63));
428 set_fpr(rD(ctx->opcode), t1);
429 if (unlikely(Rc(ctx->opcode))) {
430 gen_set_cr1_from_fpscr(ctx);
432 tcg_temp_free_i64(t0);
433 tcg_temp_free_i64(t1);
436 /* fmr - fmr. */
437 /* XXX: beware that fmr never checks for NaNs nor update FPSCR */
438 static void gen_fmr(DisasContext *ctx)
440 TCGv_i64 t0;
441 if (unlikely(!ctx->fpu_enabled)) {
442 gen_exception(ctx, POWERPC_EXCP_FPU);
443 return;
445 t0 = tcg_temp_new_i64();
446 get_fpr(t0, rB(ctx->opcode));
447 set_fpr(rD(ctx->opcode), t0);
448 if (unlikely(Rc(ctx->opcode))) {
449 gen_set_cr1_from_fpscr(ctx);
451 tcg_temp_free_i64(t0);
454 /* fnabs */
455 /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
456 static void gen_fnabs(DisasContext *ctx)
458 TCGv_i64 t0;
459 TCGv_i64 t1;
460 if (unlikely(!ctx->fpu_enabled)) {
461 gen_exception(ctx, POWERPC_EXCP_FPU);
462 return;
464 t0 = tcg_temp_new_i64();
465 t1 = tcg_temp_new_i64();
466 get_fpr(t0, rB(ctx->opcode));
467 tcg_gen_ori_i64(t1, t0, 1ULL << 63);
468 set_fpr(rD(ctx->opcode), t1);
469 if (unlikely(Rc(ctx->opcode))) {
470 gen_set_cr1_from_fpscr(ctx);
472 tcg_temp_free_i64(t0);
473 tcg_temp_free_i64(t1);
476 /* fneg */
477 /* XXX: beware that fneg never checks for NaNs nor update FPSCR */
478 static void gen_fneg(DisasContext *ctx)
480 TCGv_i64 t0;
481 TCGv_i64 t1;
482 if (unlikely(!ctx->fpu_enabled)) {
483 gen_exception(ctx, POWERPC_EXCP_FPU);
484 return;
486 t0 = tcg_temp_new_i64();
487 t1 = tcg_temp_new_i64();
488 get_fpr(t0, rB(ctx->opcode));
489 tcg_gen_xori_i64(t1, t0, 1ULL << 63);
490 set_fpr(rD(ctx->opcode), t1);
491 if (unlikely(Rc(ctx->opcode))) {
492 gen_set_cr1_from_fpscr(ctx);
494 tcg_temp_free_i64(t0);
495 tcg_temp_free_i64(t1);
498 /* fcpsgn: PowerPC 2.05 specification */
499 /* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */
500 static void gen_fcpsgn(DisasContext *ctx)
502 TCGv_i64 t0;
503 TCGv_i64 t1;
504 TCGv_i64 t2;
505 if (unlikely(!ctx->fpu_enabled)) {
506 gen_exception(ctx, POWERPC_EXCP_FPU);
507 return;
509 t0 = tcg_temp_new_i64();
510 t1 = tcg_temp_new_i64();
511 t2 = tcg_temp_new_i64();
512 get_fpr(t0, rA(ctx->opcode));
513 get_fpr(t1, rB(ctx->opcode));
514 tcg_gen_deposit_i64(t2, t0, t1, 0, 63);
515 set_fpr(rD(ctx->opcode), t2);
516 if (unlikely(Rc(ctx->opcode))) {
517 gen_set_cr1_from_fpscr(ctx);
519 tcg_temp_free_i64(t0);
520 tcg_temp_free_i64(t1);
521 tcg_temp_free_i64(t2);
524 static void gen_fmrgew(DisasContext *ctx)
526 TCGv_i64 b0;
527 TCGv_i64 t0;
528 TCGv_i64 t1;
529 if (unlikely(!ctx->fpu_enabled)) {
530 gen_exception(ctx, POWERPC_EXCP_FPU);
531 return;
533 b0 = tcg_temp_new_i64();
534 t0 = tcg_temp_new_i64();
535 t1 = tcg_temp_new_i64();
536 get_fpr(t0, rB(ctx->opcode));
537 tcg_gen_shri_i64(b0, t0, 32);
538 get_fpr(t0, rA(ctx->opcode));
539 tcg_gen_deposit_i64(t1, t0, b0, 0, 32);
540 set_fpr(rD(ctx->opcode), t1);
541 tcg_temp_free_i64(b0);
542 tcg_temp_free_i64(t0);
543 tcg_temp_free_i64(t1);
546 static void gen_fmrgow(DisasContext *ctx)
548 TCGv_i64 t0;
549 TCGv_i64 t1;
550 TCGv_i64 t2;
551 if (unlikely(!ctx->fpu_enabled)) {
552 gen_exception(ctx, POWERPC_EXCP_FPU);
553 return;
555 t0 = tcg_temp_new_i64();
556 t1 = tcg_temp_new_i64();
557 t2 = tcg_temp_new_i64();
558 get_fpr(t0, rB(ctx->opcode));
559 get_fpr(t1, rA(ctx->opcode));
560 tcg_gen_deposit_i64(t2, t0, t1, 32, 32);
561 set_fpr(rD(ctx->opcode), t2);
562 tcg_temp_free_i64(t0);
563 tcg_temp_free_i64(t1);
564 tcg_temp_free_i64(t2);
567 /*** Floating-Point status & ctrl register ***/
569 /* mcrfs */
570 static void gen_mcrfs(DisasContext *ctx)
572 TCGv tmp = tcg_temp_new();
573 TCGv_i32 tmask;
574 TCGv_i64 tnew_fpscr = tcg_temp_new_i64();
575 int bfa;
576 int nibble;
577 int shift;
579 if (unlikely(!ctx->fpu_enabled)) {
580 gen_exception(ctx, POWERPC_EXCP_FPU);
581 return;
583 bfa = crfS(ctx->opcode);
584 nibble = 7 - bfa;
585 shift = 4 * nibble;
586 tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
587 tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
588 tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)],
589 0xf);
590 tcg_temp_free(tmp);
591 tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
592 /* Only the exception bits (including FX) should be cleared if read */
593 tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr,
594 ~((0xF << shift) & FP_EX_CLEAR_BITS));
595 /* FEX and VX need to be updated, so don't set fpscr directly */
596 tmask = tcg_const_i32(1 << nibble);
597 gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
598 tcg_temp_free_i32(tmask);
599 tcg_temp_free_i64(tnew_fpscr);
602 /* mffs */
603 static void gen_mffs(DisasContext *ctx)
605 TCGv_i64 t0;
606 if (unlikely(!ctx->fpu_enabled)) {
607 gen_exception(ctx, POWERPC_EXCP_FPU);
608 return;
610 t0 = tcg_temp_new_i64();
611 gen_reset_fpstatus();
612 tcg_gen_extu_tl_i64(t0, cpu_fpscr);
613 set_fpr(rD(ctx->opcode), t0);
614 if (unlikely(Rc(ctx->opcode))) {
615 gen_set_cr1_from_fpscr(ctx);
617 tcg_temp_free_i64(t0);
620 /* mtfsb0 */
621 static void gen_mtfsb0(DisasContext *ctx)
623 uint8_t crb;
625 if (unlikely(!ctx->fpu_enabled)) {
626 gen_exception(ctx, POWERPC_EXCP_FPU);
627 return;
629 crb = 31 - crbD(ctx->opcode);
630 gen_reset_fpstatus();
631 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
632 TCGv_i32 t0;
633 t0 = tcg_const_i32(crb);
634 gen_helper_fpscr_clrbit(cpu_env, t0);
635 tcg_temp_free_i32(t0);
637 if (unlikely(Rc(ctx->opcode) != 0)) {
638 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
639 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
643 /* mtfsb1 */
644 static void gen_mtfsb1(DisasContext *ctx)
646 uint8_t crb;
648 if (unlikely(!ctx->fpu_enabled)) {
649 gen_exception(ctx, POWERPC_EXCP_FPU);
650 return;
652 crb = 31 - crbD(ctx->opcode);
653 gen_reset_fpstatus();
654 /* XXX: we pretend we can only do IEEE floating-point computations */
655 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
656 TCGv_i32 t0;
657 t0 = tcg_const_i32(crb);
658 gen_helper_fpscr_setbit(cpu_env, t0);
659 tcg_temp_free_i32(t0);
661 if (unlikely(Rc(ctx->opcode) != 0)) {
662 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
663 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
665 /* We can raise a differed exception */
666 gen_helper_float_check_status(cpu_env);
669 /* mtfsf */
670 static void gen_mtfsf(DisasContext *ctx)
672 TCGv_i32 t0;
673 TCGv_i64 t1;
674 int flm, l, w;
676 if (unlikely(!ctx->fpu_enabled)) {
677 gen_exception(ctx, POWERPC_EXCP_FPU);
678 return;
680 flm = FPFLM(ctx->opcode);
681 l = FPL(ctx->opcode);
682 w = FPW(ctx->opcode);
683 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
684 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
685 return;
687 gen_reset_fpstatus();
688 if (l) {
689 t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff);
690 } else {
691 t0 = tcg_const_i32(flm << (w * 8));
693 t1 = tcg_temp_new_i64();
694 get_fpr(t1, rB(ctx->opcode));
695 gen_helper_store_fpscr(cpu_env, t1, t0);
696 tcg_temp_free_i32(t0);
697 if (unlikely(Rc(ctx->opcode) != 0)) {
698 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
699 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
701 /* We can raise a differed exception */
702 gen_helper_float_check_status(cpu_env);
703 tcg_temp_free_i64(t1);
706 /* mtfsfi */
707 static void gen_mtfsfi(DisasContext *ctx)
709 int bf, sh, w;
710 TCGv_i64 t0;
711 TCGv_i32 t1;
713 if (unlikely(!ctx->fpu_enabled)) {
714 gen_exception(ctx, POWERPC_EXCP_FPU);
715 return;
717 w = FPW(ctx->opcode);
718 bf = FPBF(ctx->opcode);
719 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
720 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
721 return;
723 sh = (8 * w) + 7 - bf;
724 gen_reset_fpstatus();
725 t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
726 t1 = tcg_const_i32(1 << sh);
727 gen_helper_store_fpscr(cpu_env, t0, t1);
728 tcg_temp_free_i64(t0);
729 tcg_temp_free_i32(t1);
730 if (unlikely(Rc(ctx->opcode) != 0)) {
731 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
732 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
734 /* We can raise a differed exception */
735 gen_helper_float_check_status(cpu_env);
738 /*** Floating-point load ***/
739 #define GEN_LDF(name, ldop, opc, type) \
740 static void glue(gen_, name)(DisasContext *ctx) \
742 TCGv EA; \
743 TCGv_i64 t0; \
744 if (unlikely(!ctx->fpu_enabled)) { \
745 gen_exception(ctx, POWERPC_EXCP_FPU); \
746 return; \
748 gen_set_access_type(ctx, ACCESS_FLOAT); \
749 EA = tcg_temp_new(); \
750 t0 = tcg_temp_new_i64(); \
751 gen_addr_imm_index(ctx, EA, 0); \
752 gen_qemu_##ldop(ctx, t0, EA); \
753 set_fpr(rD(ctx->opcode), t0); \
754 tcg_temp_free(EA); \
755 tcg_temp_free_i64(t0); \
758 #define GEN_LDUF(name, ldop, opc, type) \
759 static void glue(gen_, name##u)(DisasContext *ctx) \
761 TCGv EA; \
762 TCGv_i64 t0; \
763 if (unlikely(!ctx->fpu_enabled)) { \
764 gen_exception(ctx, POWERPC_EXCP_FPU); \
765 return; \
767 if (unlikely(rA(ctx->opcode) == 0)) { \
768 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
769 return; \
771 gen_set_access_type(ctx, ACCESS_FLOAT); \
772 EA = tcg_temp_new(); \
773 t0 = tcg_temp_new_i64(); \
774 gen_addr_imm_index(ctx, EA, 0); \
775 gen_qemu_##ldop(ctx, t0, EA); \
776 set_fpr(rD(ctx->opcode), t0); \
777 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
778 tcg_temp_free(EA); \
779 tcg_temp_free_i64(t0); \
782 #define GEN_LDUXF(name, ldop, opc, type) \
783 static void glue(gen_, name##ux)(DisasContext *ctx) \
785 TCGv EA; \
786 TCGv_i64 t0; \
787 if (unlikely(!ctx->fpu_enabled)) { \
788 gen_exception(ctx, POWERPC_EXCP_FPU); \
789 return; \
791 t0 = tcg_temp_new_i64(); \
792 if (unlikely(rA(ctx->opcode) == 0)) { \
793 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
794 return; \
796 gen_set_access_type(ctx, ACCESS_FLOAT); \
797 EA = tcg_temp_new(); \
798 gen_addr_reg_index(ctx, EA); \
799 gen_qemu_##ldop(ctx, t0, EA); \
800 set_fpr(rD(ctx->opcode), t0); \
801 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
802 tcg_temp_free(EA); \
803 tcg_temp_free_i64(t0); \
806 #define GEN_LDXF(name, ldop, opc2, opc3, type) \
807 static void glue(gen_, name##x)(DisasContext *ctx) \
809 TCGv EA; \
810 TCGv_i64 t0; \
811 if (unlikely(!ctx->fpu_enabled)) { \
812 gen_exception(ctx, POWERPC_EXCP_FPU); \
813 return; \
815 gen_set_access_type(ctx, ACCESS_FLOAT); \
816 EA = tcg_temp_new(); \
817 t0 = tcg_temp_new_i64(); \
818 gen_addr_reg_index(ctx, EA); \
819 gen_qemu_##ldop(ctx, t0, EA); \
820 set_fpr(rD(ctx->opcode), t0); \
821 tcg_temp_free(EA); \
822 tcg_temp_free_i64(t0); \
825 #define GEN_LDFS(name, ldop, op, type) \
826 GEN_LDF(name, ldop, op | 0x20, type); \
827 GEN_LDUF(name, ldop, op | 0x21, type); \
828 GEN_LDUXF(name, ldop, op | 0x01, type); \
829 GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
831 static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr)
833 TCGv_i32 tmp = tcg_temp_new_i32();
834 tcg_gen_qemu_ld_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL));
835 gen_helper_todouble(dest, tmp);
836 tcg_temp_free_i32(tmp);
839 /* lfd lfdu lfdux lfdx */
840 GEN_LDFS(lfd, ld64_i64, 0x12, PPC_FLOAT);
841 /* lfs lfsu lfsux lfsx */
842 GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT);
844 /* lfdepx (external PID lfdx) */
845 static void gen_lfdepx(DisasContext *ctx)
847 TCGv EA;
848 TCGv_i64 t0;
849 CHK_SV;
850 if (unlikely(!ctx->fpu_enabled)) {
851 gen_exception(ctx, POWERPC_EXCP_FPU);
852 return;
854 gen_set_access_type(ctx, ACCESS_FLOAT);
855 EA = tcg_temp_new();
856 t0 = tcg_temp_new_i64();
857 gen_addr_reg_index(ctx, EA);
858 tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_Q));
859 set_fpr(rD(ctx->opcode), t0);
860 tcg_temp_free(EA);
861 tcg_temp_free_i64(t0);
864 /* lfdp */
865 static void gen_lfdp(DisasContext *ctx)
867 TCGv EA;
868 TCGv_i64 t0;
869 if (unlikely(!ctx->fpu_enabled)) {
870 gen_exception(ctx, POWERPC_EXCP_FPU);
871 return;
873 gen_set_access_type(ctx, ACCESS_FLOAT);
874 EA = tcg_temp_new();
875 gen_addr_imm_index(ctx, EA, 0);
876 t0 = tcg_temp_new_i64();
878 * We only need to swap high and low halves. gen_qemu_ld64_i64
879 * does necessary 64-bit byteswap already.
881 if (unlikely(ctx->le_mode)) {
882 gen_qemu_ld64_i64(ctx, t0, EA);
883 set_fpr(rD(ctx->opcode) + 1, t0);
884 tcg_gen_addi_tl(EA, EA, 8);
885 gen_qemu_ld64_i64(ctx, t0, EA);
886 set_fpr(rD(ctx->opcode), t0);
887 } else {
888 gen_qemu_ld64_i64(ctx, t0, EA);
889 set_fpr(rD(ctx->opcode), t0);
890 tcg_gen_addi_tl(EA, EA, 8);
891 gen_qemu_ld64_i64(ctx, t0, EA);
892 set_fpr(rD(ctx->opcode) + 1, t0);
894 tcg_temp_free(EA);
895 tcg_temp_free_i64(t0);
898 /* lfdpx */
899 static void gen_lfdpx(DisasContext *ctx)
901 TCGv EA;
902 TCGv_i64 t0;
903 if (unlikely(!ctx->fpu_enabled)) {
904 gen_exception(ctx, POWERPC_EXCP_FPU);
905 return;
907 gen_set_access_type(ctx, ACCESS_FLOAT);
908 EA = tcg_temp_new();
909 gen_addr_reg_index(ctx, EA);
910 t0 = tcg_temp_new_i64();
912 * We only need to swap high and low halves. gen_qemu_ld64_i64
913 * does necessary 64-bit byteswap already.
915 if (unlikely(ctx->le_mode)) {
916 gen_qemu_ld64_i64(ctx, t0, EA);
917 set_fpr(rD(ctx->opcode) + 1, t0);
918 tcg_gen_addi_tl(EA, EA, 8);
919 gen_qemu_ld64_i64(ctx, t0, EA);
920 set_fpr(rD(ctx->opcode), t0);
921 } else {
922 gen_qemu_ld64_i64(ctx, t0, EA);
923 set_fpr(rD(ctx->opcode), t0);
924 tcg_gen_addi_tl(EA, EA, 8);
925 gen_qemu_ld64_i64(ctx, t0, EA);
926 set_fpr(rD(ctx->opcode) + 1, t0);
928 tcg_temp_free(EA);
929 tcg_temp_free_i64(t0);
932 /* lfiwax */
933 static void gen_lfiwax(DisasContext *ctx)
935 TCGv EA;
936 TCGv t0;
937 TCGv_i64 t1;
938 if (unlikely(!ctx->fpu_enabled)) {
939 gen_exception(ctx, POWERPC_EXCP_FPU);
940 return;
942 gen_set_access_type(ctx, ACCESS_FLOAT);
943 EA = tcg_temp_new();
944 t0 = tcg_temp_new();
945 t1 = tcg_temp_new_i64();
946 gen_addr_reg_index(ctx, EA);
947 gen_qemu_ld32s(ctx, t0, EA);
948 tcg_gen_ext_tl_i64(t1, t0);
949 set_fpr(rD(ctx->opcode), t1);
950 tcg_temp_free(EA);
951 tcg_temp_free(t0);
952 tcg_temp_free_i64(t1);
955 /* lfiwzx */
956 static void gen_lfiwzx(DisasContext *ctx)
958 TCGv EA;
959 TCGv_i64 t0;
960 if (unlikely(!ctx->fpu_enabled)) {
961 gen_exception(ctx, POWERPC_EXCP_FPU);
962 return;
964 gen_set_access_type(ctx, ACCESS_FLOAT);
965 EA = tcg_temp_new();
966 t0 = tcg_temp_new_i64();
967 gen_addr_reg_index(ctx, EA);
968 gen_qemu_ld32u_i64(ctx, t0, EA);
969 set_fpr(rD(ctx->opcode), t0);
970 tcg_temp_free(EA);
971 tcg_temp_free_i64(t0);
973 /*** Floating-point store ***/
974 #define GEN_STF(name, stop, opc, type) \
975 static void glue(gen_, name)(DisasContext *ctx) \
977 TCGv EA; \
978 TCGv_i64 t0; \
979 if (unlikely(!ctx->fpu_enabled)) { \
980 gen_exception(ctx, POWERPC_EXCP_FPU); \
981 return; \
983 gen_set_access_type(ctx, ACCESS_FLOAT); \
984 EA = tcg_temp_new(); \
985 t0 = tcg_temp_new_i64(); \
986 gen_addr_imm_index(ctx, EA, 0); \
987 get_fpr(t0, rS(ctx->opcode)); \
988 gen_qemu_##stop(ctx, t0, EA); \
989 tcg_temp_free(EA); \
990 tcg_temp_free_i64(t0); \
993 #define GEN_STUF(name, stop, opc, type) \
994 static void glue(gen_, name##u)(DisasContext *ctx) \
996 TCGv EA; \
997 TCGv_i64 t0; \
998 if (unlikely(!ctx->fpu_enabled)) { \
999 gen_exception(ctx, POWERPC_EXCP_FPU); \
1000 return; \
1002 if (unlikely(rA(ctx->opcode) == 0)) { \
1003 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
1004 return; \
1006 gen_set_access_type(ctx, ACCESS_FLOAT); \
1007 EA = tcg_temp_new(); \
1008 t0 = tcg_temp_new_i64(); \
1009 gen_addr_imm_index(ctx, EA, 0); \
1010 get_fpr(t0, rS(ctx->opcode)); \
1011 gen_qemu_##stop(ctx, t0, EA); \
1012 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
1013 tcg_temp_free(EA); \
1014 tcg_temp_free_i64(t0); \
1017 #define GEN_STUXF(name, stop, opc, type) \
1018 static void glue(gen_, name##ux)(DisasContext *ctx) \
1020 TCGv EA; \
1021 TCGv_i64 t0; \
1022 if (unlikely(!ctx->fpu_enabled)) { \
1023 gen_exception(ctx, POWERPC_EXCP_FPU); \
1024 return; \
1026 if (unlikely(rA(ctx->opcode) == 0)) { \
1027 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
1028 return; \
1030 gen_set_access_type(ctx, ACCESS_FLOAT); \
1031 EA = tcg_temp_new(); \
1032 t0 = tcg_temp_new_i64(); \
1033 gen_addr_reg_index(ctx, EA); \
1034 get_fpr(t0, rS(ctx->opcode)); \
1035 gen_qemu_##stop(ctx, t0, EA); \
1036 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
1037 tcg_temp_free(EA); \
1038 tcg_temp_free_i64(t0); \
1041 #define GEN_STXF(name, stop, opc2, opc3, type) \
1042 static void glue(gen_, name##x)(DisasContext *ctx) \
1044 TCGv EA; \
1045 TCGv_i64 t0; \
1046 if (unlikely(!ctx->fpu_enabled)) { \
1047 gen_exception(ctx, POWERPC_EXCP_FPU); \
1048 return; \
1050 gen_set_access_type(ctx, ACCESS_FLOAT); \
1051 EA = tcg_temp_new(); \
1052 t0 = tcg_temp_new_i64(); \
1053 gen_addr_reg_index(ctx, EA); \
1054 get_fpr(t0, rS(ctx->opcode)); \
1055 gen_qemu_##stop(ctx, t0, EA); \
1056 tcg_temp_free(EA); \
1057 tcg_temp_free_i64(t0); \
1060 #define GEN_STFS(name, stop, op, type) \
1061 GEN_STF(name, stop, op | 0x20, type); \
1062 GEN_STUF(name, stop, op | 0x21, type); \
1063 GEN_STUXF(name, stop, op | 0x01, type); \
1064 GEN_STXF(name, stop, 0x17, op | 0x00, type)
1066 static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr)
1068 TCGv_i32 tmp = tcg_temp_new_i32();
1069 gen_helper_tosingle(tmp, src);
1070 tcg_gen_qemu_st_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL));
1071 tcg_temp_free_i32(tmp);
1074 /* stfd stfdu stfdux stfdx */
1075 GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT);
1076 /* stfs stfsu stfsux stfsx */
1077 GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
1079 /* stfdepx (external PID lfdx) */
1080 static void gen_stfdepx(DisasContext *ctx)
1082 TCGv EA;
1083 TCGv_i64 t0;
1084 CHK_SV;
1085 if (unlikely(!ctx->fpu_enabled)) {
1086 gen_exception(ctx, POWERPC_EXCP_FPU);
1087 return;
1089 gen_set_access_type(ctx, ACCESS_FLOAT);
1090 EA = tcg_temp_new();
1091 t0 = tcg_temp_new_i64();
1092 gen_addr_reg_index(ctx, EA);
1093 get_fpr(t0, rD(ctx->opcode));
1094 tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_Q));
1095 tcg_temp_free(EA);
1096 tcg_temp_free_i64(t0);
1099 /* stfdp */
1100 static void gen_stfdp(DisasContext *ctx)
1102 TCGv EA;
1103 TCGv_i64 t0;
1104 if (unlikely(!ctx->fpu_enabled)) {
1105 gen_exception(ctx, POWERPC_EXCP_FPU);
1106 return;
1108 gen_set_access_type(ctx, ACCESS_FLOAT);
1109 EA = tcg_temp_new();
1110 t0 = tcg_temp_new_i64();
1111 gen_addr_imm_index(ctx, EA, 0);
1113 * We only need to swap high and low halves. gen_qemu_st64_i64
1114 * does necessary 64-bit byteswap already.
1116 if (unlikely(ctx->le_mode)) {
1117 get_fpr(t0, rD(ctx->opcode) + 1);
1118 gen_qemu_st64_i64(ctx, t0, EA);
1119 tcg_gen_addi_tl(EA, EA, 8);
1120 get_fpr(t0, rD(ctx->opcode));
1121 gen_qemu_st64_i64(ctx, t0, EA);
1122 } else {
1123 get_fpr(t0, rD(ctx->opcode));
1124 gen_qemu_st64_i64(ctx, t0, EA);
1125 tcg_gen_addi_tl(EA, EA, 8);
1126 get_fpr(t0, rD(ctx->opcode) + 1);
1127 gen_qemu_st64_i64(ctx, t0, EA);
1129 tcg_temp_free(EA);
1130 tcg_temp_free_i64(t0);
1133 /* stfdpx */
1134 static void gen_stfdpx(DisasContext *ctx)
1136 TCGv EA;
1137 TCGv_i64 t0;
1138 if (unlikely(!ctx->fpu_enabled)) {
1139 gen_exception(ctx, POWERPC_EXCP_FPU);
1140 return;
1142 gen_set_access_type(ctx, ACCESS_FLOAT);
1143 EA = tcg_temp_new();
1144 t0 = tcg_temp_new_i64();
1145 gen_addr_reg_index(ctx, EA);
1147 * We only need to swap high and low halves. gen_qemu_st64_i64
1148 * does necessary 64-bit byteswap already.
1150 if (unlikely(ctx->le_mode)) {
1151 get_fpr(t0, rD(ctx->opcode) + 1);
1152 gen_qemu_st64_i64(ctx, t0, EA);
1153 tcg_gen_addi_tl(EA, EA, 8);
1154 get_fpr(t0, rD(ctx->opcode));
1155 gen_qemu_st64_i64(ctx, t0, EA);
1156 } else {
1157 get_fpr(t0, rD(ctx->opcode));
1158 gen_qemu_st64_i64(ctx, t0, EA);
1159 tcg_gen_addi_tl(EA, EA, 8);
1160 get_fpr(t0, rD(ctx->opcode) + 1);
1161 gen_qemu_st64_i64(ctx, t0, EA);
1163 tcg_temp_free(EA);
1164 tcg_temp_free_i64(t0);
1167 /* Optional: */
1168 static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
1170 TCGv t0 = tcg_temp_new();
1171 tcg_gen_trunc_i64_tl(t0, arg1),
1172 gen_qemu_st32(ctx, t0, arg2);
1173 tcg_temp_free(t0);
1175 /* stfiwx */
1176 GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX);
1178 /* POWER2 specific instructions */
1179 /* Quad manipulation (load/store two floats at a time) */
1181 /* lfq */
1182 static void gen_lfq(DisasContext *ctx)
1184 int rd = rD(ctx->opcode);
1185 TCGv t0;
1186 TCGv_i64 t1;
1187 gen_set_access_type(ctx, ACCESS_FLOAT);
1188 t0 = tcg_temp_new();
1189 t1 = tcg_temp_new_i64();
1190 gen_addr_imm_index(ctx, t0, 0);
1191 gen_qemu_ld64_i64(ctx, t1, t0);
1192 set_fpr(rd, t1);
1193 gen_addr_add(ctx, t0, t0, 8);
1194 gen_qemu_ld64_i64(ctx, t1, t0);
1195 set_fpr((rd + 1) % 32, t1);
1196 tcg_temp_free(t0);
1197 tcg_temp_free_i64(t1);
1200 /* lfqu */
1201 static void gen_lfqu(DisasContext *ctx)
1203 int ra = rA(ctx->opcode);
1204 int rd = rD(ctx->opcode);
1205 TCGv t0, t1;
1206 TCGv_i64 t2;
1207 gen_set_access_type(ctx, ACCESS_FLOAT);
1208 t0 = tcg_temp_new();
1209 t1 = tcg_temp_new();
1210 t2 = tcg_temp_new_i64();
1211 gen_addr_imm_index(ctx, t0, 0);
1212 gen_qemu_ld64_i64(ctx, t2, t0);
1213 set_fpr(rd, t2);
1214 gen_addr_add(ctx, t1, t0, 8);
1215 gen_qemu_ld64_i64(ctx, t2, t1);
1216 set_fpr((rd + 1) % 32, t2);
1217 if (ra != 0) {
1218 tcg_gen_mov_tl(cpu_gpr[ra], t0);
1220 tcg_temp_free(t0);
1221 tcg_temp_free(t1);
1222 tcg_temp_free_i64(t2);
1225 /* lfqux */
1226 static void gen_lfqux(DisasContext *ctx)
1228 int ra = rA(ctx->opcode);
1229 int rd = rD(ctx->opcode);
1230 gen_set_access_type(ctx, ACCESS_FLOAT);
1231 TCGv t0, t1;
1232 TCGv_i64 t2;
1233 t2 = tcg_temp_new_i64();
1234 t0 = tcg_temp_new();
1235 gen_addr_reg_index(ctx, t0);
1236 gen_qemu_ld64_i64(ctx, t2, t0);
1237 set_fpr(rd, t2);
1238 t1 = tcg_temp_new();
1239 gen_addr_add(ctx, t1, t0, 8);
1240 gen_qemu_ld64_i64(ctx, t2, t1);
1241 set_fpr((rd + 1) % 32, t2);
1242 tcg_temp_free(t1);
1243 if (ra != 0) {
1244 tcg_gen_mov_tl(cpu_gpr[ra], t0);
1246 tcg_temp_free(t0);
1247 tcg_temp_free_i64(t2);
1250 /* lfqx */
1251 static void gen_lfqx(DisasContext *ctx)
1253 int rd = rD(ctx->opcode);
1254 TCGv t0;
1255 TCGv_i64 t1;
1256 gen_set_access_type(ctx, ACCESS_FLOAT);
1257 t0 = tcg_temp_new();
1258 t1 = tcg_temp_new_i64();
1259 gen_addr_reg_index(ctx, t0);
1260 gen_qemu_ld64_i64(ctx, t1, t0);
1261 set_fpr(rd, t1);
1262 gen_addr_add(ctx, t0, t0, 8);
1263 gen_qemu_ld64_i64(ctx, t1, t0);
1264 set_fpr((rd + 1) % 32, t1);
1265 tcg_temp_free(t0);
1266 tcg_temp_free_i64(t1);
1269 /* stfq */
1270 static void gen_stfq(DisasContext *ctx)
1272 int rd = rD(ctx->opcode);
1273 TCGv t0;
1274 TCGv_i64 t1;
1275 gen_set_access_type(ctx, ACCESS_FLOAT);
1276 t0 = tcg_temp_new();
1277 t1 = tcg_temp_new_i64();
1278 gen_addr_imm_index(ctx, t0, 0);
1279 get_fpr(t1, rd);
1280 gen_qemu_st64_i64(ctx, t1, t0);
1281 gen_addr_add(ctx, t0, t0, 8);
1282 get_fpr(t1, (rd + 1) % 32);
1283 gen_qemu_st64_i64(ctx, t1, t0);
1284 tcg_temp_free(t0);
1285 tcg_temp_free_i64(t1);
1288 /* stfqu */
1289 static void gen_stfqu(DisasContext *ctx)
1291 int ra = rA(ctx->opcode);
1292 int rd = rD(ctx->opcode);
1293 TCGv t0, t1;
1294 TCGv_i64 t2;
1295 gen_set_access_type(ctx, ACCESS_FLOAT);
1296 t2 = tcg_temp_new_i64();
1297 t0 = tcg_temp_new();
1298 gen_addr_imm_index(ctx, t0, 0);
1299 get_fpr(t2, rd);
1300 gen_qemu_st64_i64(ctx, t2, t0);
1301 t1 = tcg_temp_new();
1302 gen_addr_add(ctx, t1, t0, 8);
1303 get_fpr(t2, (rd + 1) % 32);
1304 gen_qemu_st64_i64(ctx, t2, t1);
1305 tcg_temp_free(t1);
1306 if (ra != 0) {
1307 tcg_gen_mov_tl(cpu_gpr[ra], t0);
1309 tcg_temp_free(t0);
1310 tcg_temp_free_i64(t2);
1313 /* stfqux */
1314 static void gen_stfqux(DisasContext *ctx)
1316 int ra = rA(ctx->opcode);
1317 int rd = rD(ctx->opcode);
1318 TCGv t0, t1;
1319 TCGv_i64 t2;
1320 gen_set_access_type(ctx, ACCESS_FLOAT);
1321 t2 = tcg_temp_new_i64();
1322 t0 = tcg_temp_new();
1323 gen_addr_reg_index(ctx, t0);
1324 get_fpr(t2, rd);
1325 gen_qemu_st64_i64(ctx, t2, t0);
1326 t1 = tcg_temp_new();
1327 gen_addr_add(ctx, t1, t0, 8);
1328 get_fpr(t2, (rd + 1) % 32);
1329 gen_qemu_st64_i64(ctx, t2, t1);
1330 tcg_temp_free(t1);
1331 if (ra != 0) {
1332 tcg_gen_mov_tl(cpu_gpr[ra], t0);
1334 tcg_temp_free(t0);
1335 tcg_temp_free_i64(t2);
1338 /* stfqx */
1339 static void gen_stfqx(DisasContext *ctx)
1341 int rd = rD(ctx->opcode);
1342 TCGv t0;
1343 TCGv_i64 t1;
1344 gen_set_access_type(ctx, ACCESS_FLOAT);
1345 t1 = tcg_temp_new_i64();
1346 t0 = tcg_temp_new();
1347 gen_addr_reg_index(ctx, t0);
1348 get_fpr(t1, rd);
1349 gen_qemu_st64_i64(ctx, t1, t0);
1350 gen_addr_add(ctx, t0, t0, 8);
1351 get_fpr(t1, (rd + 1) % 32);
1352 gen_qemu_st64_i64(ctx, t1, t0);
1353 tcg_temp_free(t0);
1354 tcg_temp_free_i64(t1);
1357 #undef _GEN_FLOAT_ACB
1358 #undef GEN_FLOAT_ACB
1359 #undef _GEN_FLOAT_AB
1360 #undef GEN_FLOAT_AB
1361 #undef _GEN_FLOAT_AC
1362 #undef GEN_FLOAT_AC
1363 #undef GEN_FLOAT_B
1364 #undef GEN_FLOAT_BS
1366 #undef GEN_LDF
1367 #undef GEN_LDUF
1368 #undef GEN_LDUXF
1369 #undef GEN_LDXF
1370 #undef GEN_LDFS
1372 #undef GEN_STF
1373 #undef GEN_STUF
1374 #undef GEN_STUXF
1375 #undef GEN_STXF
1376 #undef GEN_STFS