trace: forbid use of %m in trace event format strings
[qemu/ar7.git] / target / ppc / translate / fp-impl.inc.c
blob0f21a4e47741cc111faf95d04642b5534f073b95
1 /*
2 * translate-fp.c
4 * Standard FPU translation
5 */
7 static inline void gen_reset_fpstatus(void)
9 gen_helper_reset_fpstatus(cpu_env);
12 static inline void gen_compute_fprf_float64(TCGv_i64 arg)
14 gen_helper_compute_fprf_float64(cpu_env, arg);
15 gen_helper_float_check_status(cpu_env);
18 #if defined(TARGET_PPC64)
19 static void gen_set_cr1_from_fpscr(DisasContext *ctx)
21 TCGv_i32 tmp = tcg_temp_new_i32();
22 tcg_gen_trunc_tl_i32(tmp, cpu_fpscr);
23 tcg_gen_shri_i32(cpu_crf[1], tmp, 28);
24 tcg_temp_free_i32(tmp);
26 #else
27 static void gen_set_cr1_from_fpscr(DisasContext *ctx)
29 tcg_gen_shri_tl(cpu_crf[1], cpu_fpscr, 28);
31 #endif
33 /*** Floating-Point arithmetic ***/
34 #define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
35 static void gen_f##name(DisasContext *ctx) \
36 { \
37 TCGv_i64 t0; \
38 TCGv_i64 t1; \
39 TCGv_i64 t2; \
40 TCGv_i64 t3; \
41 if (unlikely(!ctx->fpu_enabled)) { \
42 gen_exception(ctx, POWERPC_EXCP_FPU); \
43 return; \
44 } \
45 t0 = tcg_temp_new_i64(); \
46 t1 = tcg_temp_new_i64(); \
47 t2 = tcg_temp_new_i64(); \
48 t3 = tcg_temp_new_i64(); \
49 gen_reset_fpstatus(); \
50 get_fpr(t0, rA(ctx->opcode)); \
51 get_fpr(t1, rC(ctx->opcode)); \
52 get_fpr(t2, rB(ctx->opcode)); \
53 gen_helper_f##op(t3, cpu_env, t0, t1, t2); \
54 if (isfloat) { \
55 gen_helper_frsp(t3, cpu_env, t3); \
56 } \
57 set_fpr(rD(ctx->opcode), t3); \
58 if (set_fprf) { \
59 gen_compute_fprf_float64(t3); \
60 } \
61 if (unlikely(Rc(ctx->opcode) != 0)) { \
62 gen_set_cr1_from_fpscr(ctx); \
63 } \
64 tcg_temp_free_i64(t0); \
65 tcg_temp_free_i64(t1); \
66 tcg_temp_free_i64(t2); \
67 tcg_temp_free_i64(t3); \
70 #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
71 _GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
72 _GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
74 #define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
75 static void gen_f##name(DisasContext *ctx) \
76 { \
77 TCGv_i64 t0; \
78 TCGv_i64 t1; \
79 TCGv_i64 t2; \
80 if (unlikely(!ctx->fpu_enabled)) { \
81 gen_exception(ctx, POWERPC_EXCP_FPU); \
82 return; \
83 } \
84 t0 = tcg_temp_new_i64(); \
85 t1 = tcg_temp_new_i64(); \
86 t2 = tcg_temp_new_i64(); \
87 gen_reset_fpstatus(); \
88 get_fpr(t0, rA(ctx->opcode)); \
89 get_fpr(t1, rB(ctx->opcode)); \
90 gen_helper_f##op(t2, cpu_env, t0, t1); \
91 if (isfloat) { \
92 gen_helper_frsp(t2, cpu_env, t2); \
93 } \
94 set_fpr(rD(ctx->opcode), t2); \
95 if (set_fprf) { \
96 gen_compute_fprf_float64(t2); \
97 } \
98 if (unlikely(Rc(ctx->opcode) != 0)) { \
99 gen_set_cr1_from_fpscr(ctx); \
101 tcg_temp_free_i64(t0); \
102 tcg_temp_free_i64(t1); \
103 tcg_temp_free_i64(t2); \
105 #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
106 _GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
107 _GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
109 #define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
110 static void gen_f##name(DisasContext *ctx) \
112 TCGv_i64 t0; \
113 TCGv_i64 t1; \
114 TCGv_i64 t2; \
115 if (unlikely(!ctx->fpu_enabled)) { \
116 gen_exception(ctx, POWERPC_EXCP_FPU); \
117 return; \
119 t0 = tcg_temp_new_i64(); \
120 t1 = tcg_temp_new_i64(); \
121 t2 = tcg_temp_new_i64(); \
122 gen_reset_fpstatus(); \
123 get_fpr(t0, rA(ctx->opcode)); \
124 get_fpr(t1, rC(ctx->opcode)); \
125 gen_helper_f##op(t2, cpu_env, t0, t1); \
126 if (isfloat) { \
127 gen_helper_frsp(t2, cpu_env, t2); \
129 set_fpr(rD(ctx->opcode), t2); \
130 if (set_fprf) { \
131 gen_compute_fprf_float64(t2); \
133 if (unlikely(Rc(ctx->opcode) != 0)) { \
134 gen_set_cr1_from_fpscr(ctx); \
136 tcg_temp_free_i64(t0); \
137 tcg_temp_free_i64(t1); \
138 tcg_temp_free_i64(t2); \
140 #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
141 _GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
142 _GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
144 #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
145 static void gen_f##name(DisasContext *ctx) \
147 TCGv_i64 t0; \
148 TCGv_i64 t1; \
149 if (unlikely(!ctx->fpu_enabled)) { \
150 gen_exception(ctx, POWERPC_EXCP_FPU); \
151 return; \
153 t0 = tcg_temp_new_i64(); \
154 t1 = tcg_temp_new_i64(); \
155 gen_reset_fpstatus(); \
156 get_fpr(t0, rB(ctx->opcode)); \
157 gen_helper_f##name(t1, cpu_env, t0); \
158 set_fpr(rD(ctx->opcode), t1); \
159 if (set_fprf) { \
160 gen_compute_fprf_float64(t1); \
162 if (unlikely(Rc(ctx->opcode) != 0)) { \
163 gen_set_cr1_from_fpscr(ctx); \
165 tcg_temp_free_i64(t0); \
166 tcg_temp_free_i64(t1); \
169 #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
170 static void gen_f##name(DisasContext *ctx) \
172 TCGv_i64 t0; \
173 TCGv_i64 t1; \
174 if (unlikely(!ctx->fpu_enabled)) { \
175 gen_exception(ctx, POWERPC_EXCP_FPU); \
176 return; \
178 t0 = tcg_temp_new_i64(); \
179 t1 = tcg_temp_new_i64(); \
180 gen_reset_fpstatus(); \
181 get_fpr(t0, rB(ctx->opcode)); \
182 gen_helper_f##name(t1, cpu_env, t0); \
183 set_fpr(rD(ctx->opcode), t1); \
184 if (set_fprf) { \
185 gen_compute_fprf_float64(t1); \
187 if (unlikely(Rc(ctx->opcode) != 0)) { \
188 gen_set_cr1_from_fpscr(ctx); \
190 tcg_temp_free_i64(t0); \
191 tcg_temp_free_i64(t1); \
194 /* fadd - fadds */
195 GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT);
196 /* fdiv - fdivs */
197 GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT);
198 /* fmul - fmuls */
199 GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT);
201 /* fre */
202 GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT);
204 /* fres */
205 GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES);
207 /* frsqrte */
208 GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE);
210 /* frsqrtes */
211 static void gen_frsqrtes(DisasContext *ctx)
213 TCGv_i64 t0;
214 TCGv_i64 t1;
215 if (unlikely(!ctx->fpu_enabled)) {
216 gen_exception(ctx, POWERPC_EXCP_FPU);
217 return;
219 t0 = tcg_temp_new_i64();
220 t1 = tcg_temp_new_i64();
221 gen_reset_fpstatus();
222 get_fpr(t0, rB(ctx->opcode));
223 gen_helper_frsqrte(t1, cpu_env, t0);
224 gen_helper_frsp(t1, cpu_env, t1);
225 set_fpr(rD(ctx->opcode), t1);
226 gen_compute_fprf_float64(t1);
227 if (unlikely(Rc(ctx->opcode) != 0)) {
228 gen_set_cr1_from_fpscr(ctx);
230 tcg_temp_free_i64(t0);
231 tcg_temp_free_i64(t1);
234 /* fsel */
235 _GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
236 /* fsub - fsubs */
237 GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
238 /* Optional: */
240 /* fsqrt */
241 static void gen_fsqrt(DisasContext *ctx)
243 TCGv_i64 t0;
244 TCGv_i64 t1;
245 if (unlikely(!ctx->fpu_enabled)) {
246 gen_exception(ctx, POWERPC_EXCP_FPU);
247 return;
249 t0 = tcg_temp_new_i64();
250 t1 = tcg_temp_new_i64();
251 gen_reset_fpstatus();
252 get_fpr(t0, rB(ctx->opcode));
253 gen_helper_fsqrt(t1, cpu_env, t0);
254 set_fpr(rD(ctx->opcode), t1);
255 gen_compute_fprf_float64(t1);
256 if (unlikely(Rc(ctx->opcode) != 0)) {
257 gen_set_cr1_from_fpscr(ctx);
259 tcg_temp_free_i64(t0);
260 tcg_temp_free_i64(t1);
263 static void gen_fsqrts(DisasContext *ctx)
265 TCGv_i64 t0;
266 TCGv_i64 t1;
267 if (unlikely(!ctx->fpu_enabled)) {
268 gen_exception(ctx, POWERPC_EXCP_FPU);
269 return;
271 t0 = tcg_temp_new_i64();
272 t1 = tcg_temp_new_i64();
273 gen_reset_fpstatus();
274 get_fpr(t0, rB(ctx->opcode));
275 gen_helper_fsqrt(t1, cpu_env, t0);
276 gen_helper_frsp(t1, cpu_env, t1);
277 set_fpr(rD(ctx->opcode), t1);
278 gen_compute_fprf_float64(t1);
279 if (unlikely(Rc(ctx->opcode) != 0)) {
280 gen_set_cr1_from_fpscr(ctx);
282 tcg_temp_free_i64(t0);
283 tcg_temp_free_i64(t1);
286 /*** Floating-Point multiply-and-add ***/
287 /* fmadd - fmadds */
288 GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
289 /* fmsub - fmsubs */
290 GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT);
291 /* fnmadd - fnmadds */
292 GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT);
293 /* fnmsub - fnmsubs */
294 GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT);
296 /*** Floating-Point round & convert ***/
297 /* fctiw */
298 GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT);
299 /* fctiwu */
300 GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206);
301 /* fctiwz */
302 GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT);
303 /* fctiwuz */
304 GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206);
305 /* frsp */
306 GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT);
307 /* fcfid */
308 GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64);
309 /* fcfids */
310 GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206);
311 /* fcfidu */
312 GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
313 /* fcfidus */
314 GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
315 /* fctid */
316 GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64);
317 /* fctidu */
318 GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206);
319 /* fctidz */
320 GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64);
321 /* fctidu */
322 GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206);
324 /* frin */
325 GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT);
326 /* friz */
327 GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT);
328 /* frip */
329 GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT);
330 /* frim */
331 GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT);
333 static void gen_ftdiv(DisasContext *ctx)
335 TCGv_i64 t0;
336 TCGv_i64 t1;
337 if (unlikely(!ctx->fpu_enabled)) {
338 gen_exception(ctx, POWERPC_EXCP_FPU);
339 return;
341 t0 = tcg_temp_new_i64();
342 t1 = tcg_temp_new_i64();
343 get_fpr(t0, rA(ctx->opcode));
344 get_fpr(t1, rB(ctx->opcode));
345 gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], t0, t1);
346 tcg_temp_free_i64(t0);
347 tcg_temp_free_i64(t1);
350 static void gen_ftsqrt(DisasContext *ctx)
352 TCGv_i64 t0;
353 if (unlikely(!ctx->fpu_enabled)) {
354 gen_exception(ctx, POWERPC_EXCP_FPU);
355 return;
357 t0 = tcg_temp_new_i64();
358 get_fpr(t0, rB(ctx->opcode));
359 gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], t0);
360 tcg_temp_free_i64(t0);
365 /*** Floating-Point compare ***/
367 /* fcmpo */
368 static void gen_fcmpo(DisasContext *ctx)
370 TCGv_i32 crf;
371 TCGv_i64 t0;
372 TCGv_i64 t1;
373 if (unlikely(!ctx->fpu_enabled)) {
374 gen_exception(ctx, POWERPC_EXCP_FPU);
375 return;
377 t0 = tcg_temp_new_i64();
378 t1 = tcg_temp_new_i64();
379 gen_reset_fpstatus();
380 crf = tcg_const_i32(crfD(ctx->opcode));
381 get_fpr(t0, rA(ctx->opcode));
382 get_fpr(t1, rB(ctx->opcode));
383 gen_helper_fcmpo(cpu_env, t0, t1, crf);
384 tcg_temp_free_i32(crf);
385 gen_helper_float_check_status(cpu_env);
386 tcg_temp_free_i64(t0);
387 tcg_temp_free_i64(t1);
390 /* fcmpu */
391 static void gen_fcmpu(DisasContext *ctx)
393 TCGv_i32 crf;
394 TCGv_i64 t0;
395 TCGv_i64 t1;
396 if (unlikely(!ctx->fpu_enabled)) {
397 gen_exception(ctx, POWERPC_EXCP_FPU);
398 return;
400 t0 = tcg_temp_new_i64();
401 t1 = tcg_temp_new_i64();
402 gen_reset_fpstatus();
403 crf = tcg_const_i32(crfD(ctx->opcode));
404 get_fpr(t0, rA(ctx->opcode));
405 get_fpr(t1, rB(ctx->opcode));
406 gen_helper_fcmpu(cpu_env, t0, t1, crf);
407 tcg_temp_free_i32(crf);
408 gen_helper_float_check_status(cpu_env);
409 tcg_temp_free_i64(t0);
410 tcg_temp_free_i64(t1);
413 /*** Floating-point move ***/
414 /* fabs */
415 /* XXX: beware that fabs never checks for NaNs nor update FPSCR */
416 static void gen_fabs(DisasContext *ctx)
418 TCGv_i64 t0;
419 TCGv_i64 t1;
420 if (unlikely(!ctx->fpu_enabled)) {
421 gen_exception(ctx, POWERPC_EXCP_FPU);
422 return;
424 t0 = tcg_temp_new_i64();
425 t1 = tcg_temp_new_i64();
426 get_fpr(t0, rB(ctx->opcode));
427 tcg_gen_andi_i64(t1, t0, ~(1ULL << 63));
428 set_fpr(rD(ctx->opcode), t1);
429 if (unlikely(Rc(ctx->opcode))) {
430 gen_set_cr1_from_fpscr(ctx);
432 tcg_temp_free_i64(t0);
433 tcg_temp_free_i64(t1);
436 /* fmr - fmr. */
437 /* XXX: beware that fmr never checks for NaNs nor update FPSCR */
438 static void gen_fmr(DisasContext *ctx)
440 TCGv_i64 t0;
441 if (unlikely(!ctx->fpu_enabled)) {
442 gen_exception(ctx, POWERPC_EXCP_FPU);
443 return;
445 t0 = tcg_temp_new_i64();
446 get_fpr(t0, rB(ctx->opcode));
447 set_fpr(rD(ctx->opcode), t0);
448 if (unlikely(Rc(ctx->opcode))) {
449 gen_set_cr1_from_fpscr(ctx);
451 tcg_temp_free_i64(t0);
454 /* fnabs */
455 /* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
456 static void gen_fnabs(DisasContext *ctx)
458 TCGv_i64 t0;
459 TCGv_i64 t1;
460 if (unlikely(!ctx->fpu_enabled)) {
461 gen_exception(ctx, POWERPC_EXCP_FPU);
462 return;
464 t0 = tcg_temp_new_i64();
465 t1 = tcg_temp_new_i64();
466 get_fpr(t0, rB(ctx->opcode));
467 tcg_gen_ori_i64(t1, t0, 1ULL << 63);
468 set_fpr(rD(ctx->opcode), t1);
469 if (unlikely(Rc(ctx->opcode))) {
470 gen_set_cr1_from_fpscr(ctx);
472 tcg_temp_free_i64(t0);
473 tcg_temp_free_i64(t1);
476 /* fneg */
477 /* XXX: beware that fneg never checks for NaNs nor update FPSCR */
478 static void gen_fneg(DisasContext *ctx)
480 TCGv_i64 t0;
481 TCGv_i64 t1;
482 if (unlikely(!ctx->fpu_enabled)) {
483 gen_exception(ctx, POWERPC_EXCP_FPU);
484 return;
486 t0 = tcg_temp_new_i64();
487 t1 = tcg_temp_new_i64();
488 get_fpr(t0, rB(ctx->opcode));
489 tcg_gen_xori_i64(t1, t0, 1ULL << 63);
490 set_fpr(rD(ctx->opcode), t1);
491 if (unlikely(Rc(ctx->opcode))) {
492 gen_set_cr1_from_fpscr(ctx);
494 tcg_temp_free_i64(t0);
495 tcg_temp_free_i64(t1);
498 /* fcpsgn: PowerPC 2.05 specification */
499 /* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */
500 static void gen_fcpsgn(DisasContext *ctx)
502 TCGv_i64 t0;
503 TCGv_i64 t1;
504 TCGv_i64 t2;
505 if (unlikely(!ctx->fpu_enabled)) {
506 gen_exception(ctx, POWERPC_EXCP_FPU);
507 return;
509 t0 = tcg_temp_new_i64();
510 t1 = tcg_temp_new_i64();
511 t2 = tcg_temp_new_i64();
512 get_fpr(t0, rA(ctx->opcode));
513 get_fpr(t1, rB(ctx->opcode));
514 tcg_gen_deposit_i64(t2, t0, t1, 0, 63);
515 set_fpr(rD(ctx->opcode), t2);
516 if (unlikely(Rc(ctx->opcode))) {
517 gen_set_cr1_from_fpscr(ctx);
519 tcg_temp_free_i64(t0);
520 tcg_temp_free_i64(t1);
521 tcg_temp_free_i64(t2);
524 static void gen_fmrgew(DisasContext *ctx)
526 TCGv_i64 b0;
527 TCGv_i64 t0;
528 TCGv_i64 t1;
529 if (unlikely(!ctx->fpu_enabled)) {
530 gen_exception(ctx, POWERPC_EXCP_FPU);
531 return;
533 b0 = tcg_temp_new_i64();
534 t0 = tcg_temp_new_i64();
535 t1 = tcg_temp_new_i64();
536 get_fpr(t0, rB(ctx->opcode));
537 tcg_gen_shri_i64(b0, t0, 32);
538 get_fpr(t0, rA(ctx->opcode));
539 tcg_gen_deposit_i64(t1, t0, b0, 0, 32);
540 set_fpr(rD(ctx->opcode), t1);
541 tcg_temp_free_i64(b0);
542 tcg_temp_free_i64(t0);
543 tcg_temp_free_i64(t1);
546 static void gen_fmrgow(DisasContext *ctx)
548 TCGv_i64 t0;
549 TCGv_i64 t1;
550 TCGv_i64 t2;
551 if (unlikely(!ctx->fpu_enabled)) {
552 gen_exception(ctx, POWERPC_EXCP_FPU);
553 return;
555 t0 = tcg_temp_new_i64();
556 t1 = tcg_temp_new_i64();
557 t2 = tcg_temp_new_i64();
558 get_fpr(t0, rB(ctx->opcode));
559 get_fpr(t1, rA(ctx->opcode));
560 tcg_gen_deposit_i64(t2, t0, t1, 32, 32);
561 set_fpr(rD(ctx->opcode), t2);
562 tcg_temp_free_i64(t0);
563 tcg_temp_free_i64(t1);
564 tcg_temp_free_i64(t2);
567 /*** Floating-Point status & ctrl register ***/
569 /* mcrfs */
570 static void gen_mcrfs(DisasContext *ctx)
572 TCGv tmp = tcg_temp_new();
573 TCGv_i32 tmask;
574 TCGv_i64 tnew_fpscr = tcg_temp_new_i64();
575 int bfa;
576 int nibble;
577 int shift;
579 if (unlikely(!ctx->fpu_enabled)) {
580 gen_exception(ctx, POWERPC_EXCP_FPU);
581 return;
583 bfa = crfS(ctx->opcode);
584 nibble = 7 - bfa;
585 shift = 4 * nibble;
586 tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
587 tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
588 tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
589 tcg_temp_free(tmp);
590 tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
591 /* Only the exception bits (including FX) should be cleared if read */
592 tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS));
593 /* FEX and VX need to be updated, so don't set fpscr directly */
594 tmask = tcg_const_i32(1 << nibble);
595 gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
596 tcg_temp_free_i32(tmask);
597 tcg_temp_free_i64(tnew_fpscr);
600 /* mffs */
601 static void gen_mffs(DisasContext *ctx)
603 TCGv_i64 t0;
604 if (unlikely(!ctx->fpu_enabled)) {
605 gen_exception(ctx, POWERPC_EXCP_FPU);
606 return;
608 t0 = tcg_temp_new_i64();
609 gen_reset_fpstatus();
610 tcg_gen_extu_tl_i64(t0, cpu_fpscr);
611 set_fpr(rD(ctx->opcode), t0);
612 if (unlikely(Rc(ctx->opcode))) {
613 gen_set_cr1_from_fpscr(ctx);
615 tcg_temp_free_i64(t0);
618 /* mtfsb0 */
619 static void gen_mtfsb0(DisasContext *ctx)
621 uint8_t crb;
623 if (unlikely(!ctx->fpu_enabled)) {
624 gen_exception(ctx, POWERPC_EXCP_FPU);
625 return;
627 crb = 31 - crbD(ctx->opcode);
628 gen_reset_fpstatus();
629 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
630 TCGv_i32 t0;
631 t0 = tcg_const_i32(crb);
632 gen_helper_fpscr_clrbit(cpu_env, t0);
633 tcg_temp_free_i32(t0);
635 if (unlikely(Rc(ctx->opcode) != 0)) {
636 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
637 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
641 /* mtfsb1 */
642 static void gen_mtfsb1(DisasContext *ctx)
644 uint8_t crb;
646 if (unlikely(!ctx->fpu_enabled)) {
647 gen_exception(ctx, POWERPC_EXCP_FPU);
648 return;
650 crb = 31 - crbD(ctx->opcode);
651 gen_reset_fpstatus();
652 /* XXX: we pretend we can only do IEEE floating-point computations */
653 if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
654 TCGv_i32 t0;
655 t0 = tcg_const_i32(crb);
656 gen_helper_fpscr_setbit(cpu_env, t0);
657 tcg_temp_free_i32(t0);
659 if (unlikely(Rc(ctx->opcode) != 0)) {
660 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
661 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
663 /* We can raise a differed exception */
664 gen_helper_float_check_status(cpu_env);
667 /* mtfsf */
668 static void gen_mtfsf(DisasContext *ctx)
670 TCGv_i32 t0;
671 TCGv_i64 t1;
672 int flm, l, w;
674 if (unlikely(!ctx->fpu_enabled)) {
675 gen_exception(ctx, POWERPC_EXCP_FPU);
676 return;
678 flm = FPFLM(ctx->opcode);
679 l = FPL(ctx->opcode);
680 w = FPW(ctx->opcode);
681 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
682 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
683 return;
685 gen_reset_fpstatus();
686 if (l) {
687 t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff);
688 } else {
689 t0 = tcg_const_i32(flm << (w * 8));
691 t1 = tcg_temp_new_i64();
692 get_fpr(t1, rB(ctx->opcode));
693 gen_helper_store_fpscr(cpu_env, t1, t0);
694 tcg_temp_free_i32(t0);
695 if (unlikely(Rc(ctx->opcode) != 0)) {
696 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
697 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
699 /* We can raise a differed exception */
700 gen_helper_float_check_status(cpu_env);
701 tcg_temp_free_i64(t1);
704 /* mtfsfi */
705 static void gen_mtfsfi(DisasContext *ctx)
707 int bf, sh, w;
708 TCGv_i64 t0;
709 TCGv_i32 t1;
711 if (unlikely(!ctx->fpu_enabled)) {
712 gen_exception(ctx, POWERPC_EXCP_FPU);
713 return;
715 w = FPW(ctx->opcode);
716 bf = FPBF(ctx->opcode);
717 if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
718 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
719 return;
721 sh = (8 * w) + 7 - bf;
722 gen_reset_fpstatus();
723 t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
724 t1 = tcg_const_i32(1 << sh);
725 gen_helper_store_fpscr(cpu_env, t0, t1);
726 tcg_temp_free_i64(t0);
727 tcg_temp_free_i32(t1);
728 if (unlikely(Rc(ctx->opcode) != 0)) {
729 tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
730 tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
732 /* We can raise a differed exception */
733 gen_helper_float_check_status(cpu_env);
736 /*** Floating-point load ***/
737 #define GEN_LDF(name, ldop, opc, type) \
738 static void glue(gen_, name)(DisasContext *ctx) \
740 TCGv EA; \
741 TCGv_i64 t0; \
742 if (unlikely(!ctx->fpu_enabled)) { \
743 gen_exception(ctx, POWERPC_EXCP_FPU); \
744 return; \
746 gen_set_access_type(ctx, ACCESS_FLOAT); \
747 EA = tcg_temp_new(); \
748 t0 = tcg_temp_new_i64(); \
749 gen_addr_imm_index(ctx, EA, 0); \
750 gen_qemu_##ldop(ctx, t0, EA); \
751 set_fpr(rD(ctx->opcode), t0); \
752 tcg_temp_free(EA); \
753 tcg_temp_free_i64(t0); \
756 #define GEN_LDUF(name, ldop, opc, type) \
757 static void glue(gen_, name##u)(DisasContext *ctx) \
759 TCGv EA; \
760 TCGv_i64 t0; \
761 if (unlikely(!ctx->fpu_enabled)) { \
762 gen_exception(ctx, POWERPC_EXCP_FPU); \
763 return; \
765 if (unlikely(rA(ctx->opcode) == 0)) { \
766 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
767 return; \
769 gen_set_access_type(ctx, ACCESS_FLOAT); \
770 EA = tcg_temp_new(); \
771 t0 = tcg_temp_new_i64(); \
772 gen_addr_imm_index(ctx, EA, 0); \
773 gen_qemu_##ldop(ctx, t0, EA); \
774 set_fpr(rD(ctx->opcode), t0); \
775 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
776 tcg_temp_free(EA); \
777 tcg_temp_free_i64(t0); \
780 #define GEN_LDUXF(name, ldop, opc, type) \
781 static void glue(gen_, name##ux)(DisasContext *ctx) \
783 TCGv EA; \
784 TCGv_i64 t0; \
785 if (unlikely(!ctx->fpu_enabled)) { \
786 gen_exception(ctx, POWERPC_EXCP_FPU); \
787 return; \
789 t0 = tcg_temp_new_i64(); \
790 if (unlikely(rA(ctx->opcode) == 0)) { \
791 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
792 return; \
794 gen_set_access_type(ctx, ACCESS_FLOAT); \
795 EA = tcg_temp_new(); \
796 gen_addr_reg_index(ctx, EA); \
797 gen_qemu_##ldop(ctx, t0, EA); \
798 set_fpr(rD(ctx->opcode), t0); \
799 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
800 tcg_temp_free(EA); \
801 tcg_temp_free_i64(t0); \
804 #define GEN_LDXF(name, ldop, opc2, opc3, type) \
805 static void glue(gen_, name##x)(DisasContext *ctx) \
807 TCGv EA; \
808 TCGv_i64 t0; \
809 if (unlikely(!ctx->fpu_enabled)) { \
810 gen_exception(ctx, POWERPC_EXCP_FPU); \
811 return; \
813 gen_set_access_type(ctx, ACCESS_FLOAT); \
814 EA = tcg_temp_new(); \
815 t0 = tcg_temp_new_i64(); \
816 gen_addr_reg_index(ctx, EA); \
817 gen_qemu_##ldop(ctx, t0, EA); \
818 set_fpr(rD(ctx->opcode), t0); \
819 tcg_temp_free(EA); \
820 tcg_temp_free_i64(t0); \
823 #define GEN_LDFS(name, ldop, op, type) \
824 GEN_LDF(name, ldop, op | 0x20, type); \
825 GEN_LDUF(name, ldop, op | 0x21, type); \
826 GEN_LDUXF(name, ldop, op | 0x01, type); \
827 GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
829 static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr)
831 TCGv_i32 tmp = tcg_temp_new_i32();
832 tcg_gen_qemu_ld_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL));
833 gen_helper_todouble(dest, tmp);
834 tcg_temp_free_i32(tmp);
837 /* lfd lfdu lfdux lfdx */
838 GEN_LDFS(lfd, ld64_i64, 0x12, PPC_FLOAT);
839 /* lfs lfsu lfsux lfsx */
840 GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT);
842 /* lfdepx (external PID lfdx) */
843 static void gen_lfdepx(DisasContext *ctx)
845 TCGv EA;
846 TCGv_i64 t0;
847 CHK_SV;
848 if (unlikely(!ctx->fpu_enabled)) {
849 gen_exception(ctx, POWERPC_EXCP_FPU);
850 return;
852 gen_set_access_type(ctx, ACCESS_FLOAT);
853 EA = tcg_temp_new();
854 t0 = tcg_temp_new_i64();
855 gen_addr_reg_index(ctx, EA);
856 tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_Q));
857 set_fpr(rD(ctx->opcode), t0);
858 tcg_temp_free(EA);
859 tcg_temp_free_i64(t0);
862 /* lfdp */
863 static void gen_lfdp(DisasContext *ctx)
865 TCGv EA;
866 TCGv_i64 t0;
867 if (unlikely(!ctx->fpu_enabled)) {
868 gen_exception(ctx, POWERPC_EXCP_FPU);
869 return;
871 gen_set_access_type(ctx, ACCESS_FLOAT);
872 EA = tcg_temp_new();
873 gen_addr_imm_index(ctx, EA, 0);
874 t0 = tcg_temp_new_i64();
875 /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
876 necessary 64-bit byteswap already. */
877 if (unlikely(ctx->le_mode)) {
878 gen_qemu_ld64_i64(ctx, t0, EA);
879 set_fpr(rD(ctx->opcode) + 1, t0);
880 tcg_gen_addi_tl(EA, EA, 8);
881 gen_qemu_ld64_i64(ctx, t0, EA);
882 set_fpr(rD(ctx->opcode), t0);
883 } else {
884 gen_qemu_ld64_i64(ctx, t0, EA);
885 set_fpr(rD(ctx->opcode), t0);
886 tcg_gen_addi_tl(EA, EA, 8);
887 gen_qemu_ld64_i64(ctx, t0, EA);
888 set_fpr(rD(ctx->opcode) + 1, t0);
890 tcg_temp_free(EA);
891 tcg_temp_free_i64(t0);
894 /* lfdpx */
895 static void gen_lfdpx(DisasContext *ctx)
897 TCGv EA;
898 TCGv_i64 t0;
899 if (unlikely(!ctx->fpu_enabled)) {
900 gen_exception(ctx, POWERPC_EXCP_FPU);
901 return;
903 gen_set_access_type(ctx, ACCESS_FLOAT);
904 EA = tcg_temp_new();
905 gen_addr_reg_index(ctx, EA);
906 t0 = tcg_temp_new_i64();
907 /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
908 necessary 64-bit byteswap already. */
909 if (unlikely(ctx->le_mode)) {
910 gen_qemu_ld64_i64(ctx, t0, EA);
911 set_fpr(rD(ctx->opcode) + 1, t0);
912 tcg_gen_addi_tl(EA, EA, 8);
913 gen_qemu_ld64_i64(ctx, t0, EA);
914 set_fpr(rD(ctx->opcode), t0);
915 } else {
916 gen_qemu_ld64_i64(ctx, t0, EA);
917 set_fpr(rD(ctx->opcode), t0);
918 tcg_gen_addi_tl(EA, EA, 8);
919 gen_qemu_ld64_i64(ctx, t0, EA);
920 set_fpr(rD(ctx->opcode) + 1, t0);
922 tcg_temp_free(EA);
923 tcg_temp_free_i64(t0);
926 /* lfiwax */
927 static void gen_lfiwax(DisasContext *ctx)
929 TCGv EA;
930 TCGv t0;
931 TCGv_i64 t1;
932 if (unlikely(!ctx->fpu_enabled)) {
933 gen_exception(ctx, POWERPC_EXCP_FPU);
934 return;
936 gen_set_access_type(ctx, ACCESS_FLOAT);
937 EA = tcg_temp_new();
938 t0 = tcg_temp_new();
939 t1 = tcg_temp_new_i64();
940 gen_addr_reg_index(ctx, EA);
941 gen_qemu_ld32s(ctx, t0, EA);
942 tcg_gen_ext_tl_i64(t1, t0);
943 set_fpr(rD(ctx->opcode), t1);
944 tcg_temp_free(EA);
945 tcg_temp_free(t0);
946 tcg_temp_free_i64(t1);
949 /* lfiwzx */
950 static void gen_lfiwzx(DisasContext *ctx)
952 TCGv EA;
953 TCGv_i64 t0;
954 if (unlikely(!ctx->fpu_enabled)) {
955 gen_exception(ctx, POWERPC_EXCP_FPU);
956 return;
958 gen_set_access_type(ctx, ACCESS_FLOAT);
959 EA = tcg_temp_new();
960 t0 = tcg_temp_new_i64();
961 gen_addr_reg_index(ctx, EA);
962 gen_qemu_ld32u_i64(ctx, t0, EA);
963 set_fpr(rD(ctx->opcode), t0);
964 tcg_temp_free(EA);
965 tcg_temp_free_i64(t0);
967 /*** Floating-point store ***/
968 #define GEN_STF(name, stop, opc, type) \
969 static void glue(gen_, name)(DisasContext *ctx) \
971 TCGv EA; \
972 TCGv_i64 t0; \
973 if (unlikely(!ctx->fpu_enabled)) { \
974 gen_exception(ctx, POWERPC_EXCP_FPU); \
975 return; \
977 gen_set_access_type(ctx, ACCESS_FLOAT); \
978 EA = tcg_temp_new(); \
979 t0 = tcg_temp_new_i64(); \
980 gen_addr_imm_index(ctx, EA, 0); \
981 get_fpr(t0, rS(ctx->opcode)); \
982 gen_qemu_##stop(ctx, t0, EA); \
983 tcg_temp_free(EA); \
984 tcg_temp_free_i64(t0); \
987 #define GEN_STUF(name, stop, opc, type) \
988 static void glue(gen_, name##u)(DisasContext *ctx) \
990 TCGv EA; \
991 TCGv_i64 t0; \
992 if (unlikely(!ctx->fpu_enabled)) { \
993 gen_exception(ctx, POWERPC_EXCP_FPU); \
994 return; \
996 if (unlikely(rA(ctx->opcode) == 0)) { \
997 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
998 return; \
1000 gen_set_access_type(ctx, ACCESS_FLOAT); \
1001 EA = tcg_temp_new(); \
1002 t0 = tcg_temp_new_i64(); \
1003 gen_addr_imm_index(ctx, EA, 0); \
1004 get_fpr(t0, rS(ctx->opcode)); \
1005 gen_qemu_##stop(ctx, t0, EA); \
1006 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
1007 tcg_temp_free(EA); \
1008 tcg_temp_free_i64(t0); \
1011 #define GEN_STUXF(name, stop, opc, type) \
1012 static void glue(gen_, name##ux)(DisasContext *ctx) \
1014 TCGv EA; \
1015 TCGv_i64 t0; \
1016 if (unlikely(!ctx->fpu_enabled)) { \
1017 gen_exception(ctx, POWERPC_EXCP_FPU); \
1018 return; \
1020 if (unlikely(rA(ctx->opcode) == 0)) { \
1021 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
1022 return; \
1024 gen_set_access_type(ctx, ACCESS_FLOAT); \
1025 EA = tcg_temp_new(); \
1026 t0 = tcg_temp_new_i64(); \
1027 gen_addr_reg_index(ctx, EA); \
1028 get_fpr(t0, rS(ctx->opcode)); \
1029 gen_qemu_##stop(ctx, t0, EA); \
1030 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
1031 tcg_temp_free(EA); \
1032 tcg_temp_free_i64(t0); \
1035 #define GEN_STXF(name, stop, opc2, opc3, type) \
1036 static void glue(gen_, name##x)(DisasContext *ctx) \
1038 TCGv EA; \
1039 TCGv_i64 t0; \
1040 if (unlikely(!ctx->fpu_enabled)) { \
1041 gen_exception(ctx, POWERPC_EXCP_FPU); \
1042 return; \
1044 gen_set_access_type(ctx, ACCESS_FLOAT); \
1045 EA = tcg_temp_new(); \
1046 t0 = tcg_temp_new_i64(); \
1047 gen_addr_reg_index(ctx, EA); \
1048 get_fpr(t0, rS(ctx->opcode)); \
1049 gen_qemu_##stop(ctx, t0, EA); \
1050 tcg_temp_free(EA); \
1051 tcg_temp_free_i64(t0); \
1054 #define GEN_STFS(name, stop, op, type) \
1055 GEN_STF(name, stop, op | 0x20, type); \
1056 GEN_STUF(name, stop, op | 0x21, type); \
1057 GEN_STUXF(name, stop, op | 0x01, type); \
1058 GEN_STXF(name, stop, 0x17, op | 0x00, type)
1060 static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr)
1062 TCGv_i32 tmp = tcg_temp_new_i32();
1063 gen_helper_tosingle(tmp, src);
1064 tcg_gen_qemu_st_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL));
1065 tcg_temp_free_i32(tmp);
1068 /* stfd stfdu stfdux stfdx */
1069 GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT);
1070 /* stfs stfsu stfsux stfsx */
1071 GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
1073 /* stfdepx (external PID lfdx) */
1074 static void gen_stfdepx(DisasContext *ctx)
1076 TCGv EA;
1077 TCGv_i64 t0;
1078 CHK_SV;
1079 if (unlikely(!ctx->fpu_enabled)) {
1080 gen_exception(ctx, POWERPC_EXCP_FPU);
1081 return;
1083 gen_set_access_type(ctx, ACCESS_FLOAT);
1084 EA = tcg_temp_new();
1085 t0 = tcg_temp_new_i64();
1086 gen_addr_reg_index(ctx, EA);
1087 get_fpr(t0, rD(ctx->opcode));
1088 tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_Q));
1089 tcg_temp_free(EA);
1090 tcg_temp_free_i64(t0);
1093 /* stfdp */
1094 static void gen_stfdp(DisasContext *ctx)
1096 TCGv EA;
1097 TCGv_i64 t0;
1098 if (unlikely(!ctx->fpu_enabled)) {
1099 gen_exception(ctx, POWERPC_EXCP_FPU);
1100 return;
1102 gen_set_access_type(ctx, ACCESS_FLOAT);
1103 EA = tcg_temp_new();
1104 t0 = tcg_temp_new_i64();
1105 gen_addr_imm_index(ctx, EA, 0);
1106 /* We only need to swap high and low halves. gen_qemu_st64_i64 does
1107 necessary 64-bit byteswap already. */
1108 if (unlikely(ctx->le_mode)) {
1109 get_fpr(t0, rD(ctx->opcode) + 1);
1110 gen_qemu_st64_i64(ctx, t0, EA);
1111 tcg_gen_addi_tl(EA, EA, 8);
1112 get_fpr(t0, rD(ctx->opcode));
1113 gen_qemu_st64_i64(ctx, t0, EA);
1114 } else {
1115 get_fpr(t0, rD(ctx->opcode));
1116 gen_qemu_st64_i64(ctx, t0, EA);
1117 tcg_gen_addi_tl(EA, EA, 8);
1118 get_fpr(t0, rD(ctx->opcode) + 1);
1119 gen_qemu_st64_i64(ctx, t0, EA);
1121 tcg_temp_free(EA);
1122 tcg_temp_free_i64(t0);
1125 /* stfdpx */
1126 static void gen_stfdpx(DisasContext *ctx)
1128 TCGv EA;
1129 TCGv_i64 t0;
1130 if (unlikely(!ctx->fpu_enabled)) {
1131 gen_exception(ctx, POWERPC_EXCP_FPU);
1132 return;
1134 gen_set_access_type(ctx, ACCESS_FLOAT);
1135 EA = tcg_temp_new();
1136 t0 = tcg_temp_new_i64();
1137 gen_addr_reg_index(ctx, EA);
1138 /* We only need to swap high and low halves. gen_qemu_st64_i64 does
1139 necessary 64-bit byteswap already. */
1140 if (unlikely(ctx->le_mode)) {
1141 get_fpr(t0, rD(ctx->opcode) + 1);
1142 gen_qemu_st64_i64(ctx, t0, EA);
1143 tcg_gen_addi_tl(EA, EA, 8);
1144 get_fpr(t0, rD(ctx->opcode));
1145 gen_qemu_st64_i64(ctx, t0, EA);
1146 } else {
1147 get_fpr(t0, rD(ctx->opcode));
1148 gen_qemu_st64_i64(ctx, t0, EA);
1149 tcg_gen_addi_tl(EA, EA, 8);
1150 get_fpr(t0, rD(ctx->opcode) + 1);
1151 gen_qemu_st64_i64(ctx, t0, EA);
1153 tcg_temp_free(EA);
1154 tcg_temp_free_i64(t0);
1157 /* Optional: */
1158 static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
1160 TCGv t0 = tcg_temp_new();
1161 tcg_gen_trunc_i64_tl(t0, arg1),
1162 gen_qemu_st32(ctx, t0, arg2);
1163 tcg_temp_free(t0);
1165 /* stfiwx */
1166 GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX);
1168 /* POWER2 specific instructions */
1169 /* Quad manipulation (load/store two floats at a time) */
1171 /* lfq */
1172 static void gen_lfq(DisasContext *ctx)
1174 int rd = rD(ctx->opcode);
1175 TCGv t0;
1176 TCGv_i64 t1;
1177 gen_set_access_type(ctx, ACCESS_FLOAT);
1178 t0 = tcg_temp_new();
1179 t1 = tcg_temp_new_i64();
1180 gen_addr_imm_index(ctx, t0, 0);
1181 gen_qemu_ld64_i64(ctx, t1, t0);
1182 set_fpr(rd, t1);
1183 gen_addr_add(ctx, t0, t0, 8);
1184 gen_qemu_ld64_i64(ctx, t1, t0);
1185 set_fpr((rd + 1) % 32, t1);
1186 tcg_temp_free(t0);
1187 tcg_temp_free_i64(t1);
1190 /* lfqu */
1191 static void gen_lfqu(DisasContext *ctx)
1193 int ra = rA(ctx->opcode);
1194 int rd = rD(ctx->opcode);
1195 TCGv t0, t1;
1196 TCGv_i64 t2;
1197 gen_set_access_type(ctx, ACCESS_FLOAT);
1198 t0 = tcg_temp_new();
1199 t1 = tcg_temp_new();
1200 t2 = tcg_temp_new_i64();
1201 gen_addr_imm_index(ctx, t0, 0);
1202 gen_qemu_ld64_i64(ctx, t2, t0);
1203 set_fpr(rd, t2);
1204 gen_addr_add(ctx, t1, t0, 8);
1205 gen_qemu_ld64_i64(ctx, t2, t1);
1206 set_fpr((rd + 1) % 32, t2);
1207 if (ra != 0)
1208 tcg_gen_mov_tl(cpu_gpr[ra], t0);
1209 tcg_temp_free(t0);
1210 tcg_temp_free(t1);
1211 tcg_temp_free_i64(t2);
1214 /* lfqux */
1215 static void gen_lfqux(DisasContext *ctx)
1217 int ra = rA(ctx->opcode);
1218 int rd = rD(ctx->opcode);
1219 gen_set_access_type(ctx, ACCESS_FLOAT);
1220 TCGv t0, t1;
1221 TCGv_i64 t2;
1222 t2 = tcg_temp_new_i64();
1223 t0 = tcg_temp_new();
1224 gen_addr_reg_index(ctx, t0);
1225 gen_qemu_ld64_i64(ctx, t2, t0);
1226 set_fpr(rd, t2);
1227 t1 = tcg_temp_new();
1228 gen_addr_add(ctx, t1, t0, 8);
1229 gen_qemu_ld64_i64(ctx, t2, t1);
1230 set_fpr((rd + 1) % 32, t2);
1231 tcg_temp_free(t1);
1232 if (ra != 0)
1233 tcg_gen_mov_tl(cpu_gpr[ra], t0);
1234 tcg_temp_free(t0);
1235 tcg_temp_free_i64(t2);
1238 /* lfqx */
1239 static void gen_lfqx(DisasContext *ctx)
1241 int rd = rD(ctx->opcode);
1242 TCGv t0;
1243 TCGv_i64 t1;
1244 gen_set_access_type(ctx, ACCESS_FLOAT);
1245 t0 = tcg_temp_new();
1246 t1 = tcg_temp_new_i64();
1247 gen_addr_reg_index(ctx, t0);
1248 gen_qemu_ld64_i64(ctx, t1, t0);
1249 set_fpr(rd, t1);
1250 gen_addr_add(ctx, t0, t0, 8);
1251 gen_qemu_ld64_i64(ctx, t1, t0);
1252 set_fpr((rd + 1) % 32, t1);
1253 tcg_temp_free(t0);
1254 tcg_temp_free_i64(t1);
1257 /* stfq */
1258 static void gen_stfq(DisasContext *ctx)
1260 int rd = rD(ctx->opcode);
1261 TCGv t0;
1262 TCGv_i64 t1;
1263 gen_set_access_type(ctx, ACCESS_FLOAT);
1264 t0 = tcg_temp_new();
1265 t1 = tcg_temp_new_i64();
1266 gen_addr_imm_index(ctx, t0, 0);
1267 get_fpr(t1, rd);
1268 gen_qemu_st64_i64(ctx, t1, t0);
1269 gen_addr_add(ctx, t0, t0, 8);
1270 get_fpr(t1, (rd + 1) % 32);
1271 gen_qemu_st64_i64(ctx, t1, t0);
1272 tcg_temp_free(t0);
1273 tcg_temp_free_i64(t1);
1276 /* stfqu */
1277 static void gen_stfqu(DisasContext *ctx)
1279 int ra = rA(ctx->opcode);
1280 int rd = rD(ctx->opcode);
1281 TCGv t0, t1;
1282 TCGv_i64 t2;
1283 gen_set_access_type(ctx, ACCESS_FLOAT);
1284 t2 = tcg_temp_new_i64();
1285 t0 = tcg_temp_new();
1286 gen_addr_imm_index(ctx, t0, 0);
1287 get_fpr(t2, rd);
1288 gen_qemu_st64_i64(ctx, t2, t0);
1289 t1 = tcg_temp_new();
1290 gen_addr_add(ctx, t1, t0, 8);
1291 get_fpr(t2, (rd + 1) % 32);
1292 gen_qemu_st64_i64(ctx, t2, t1);
1293 tcg_temp_free(t1);
1294 if (ra != 0) {
1295 tcg_gen_mov_tl(cpu_gpr[ra], t0);
1297 tcg_temp_free(t0);
1298 tcg_temp_free_i64(t2);
1301 /* stfqux */
1302 static void gen_stfqux(DisasContext *ctx)
1304 int ra = rA(ctx->opcode);
1305 int rd = rD(ctx->opcode);
1306 TCGv t0, t1;
1307 TCGv_i64 t2;
1308 gen_set_access_type(ctx, ACCESS_FLOAT);
1309 t2 = tcg_temp_new_i64();
1310 t0 = tcg_temp_new();
1311 gen_addr_reg_index(ctx, t0);
1312 get_fpr(t2, rd);
1313 gen_qemu_st64_i64(ctx, t2, t0);
1314 t1 = tcg_temp_new();
1315 gen_addr_add(ctx, t1, t0, 8);
1316 get_fpr(t2, (rd + 1) % 32);
1317 gen_qemu_st64_i64(ctx, t2, t1);
1318 tcg_temp_free(t1);
1319 if (ra != 0) {
1320 tcg_gen_mov_tl(cpu_gpr[ra], t0);
1322 tcg_temp_free(t0);
1323 tcg_temp_free_i64(t2);
1326 /* stfqx */
1327 static void gen_stfqx(DisasContext *ctx)
1329 int rd = rD(ctx->opcode);
1330 TCGv t0;
1331 TCGv_i64 t1;
1332 gen_set_access_type(ctx, ACCESS_FLOAT);
1333 t1 = tcg_temp_new_i64();
1334 t0 = tcg_temp_new();
1335 gen_addr_reg_index(ctx, t0);
1336 get_fpr(t1, rd);
1337 gen_qemu_st64_i64(ctx, t1, t0);
1338 gen_addr_add(ctx, t0, t0, 8);
1339 get_fpr(t1, (rd + 1) % 32);
1340 gen_qemu_st64_i64(ctx, t1, t0);
1341 tcg_temp_free(t0);
1342 tcg_temp_free_i64(t1);
1345 #undef _GEN_FLOAT_ACB
1346 #undef GEN_FLOAT_ACB
1347 #undef _GEN_FLOAT_AB
1348 #undef GEN_FLOAT_AB
1349 #undef _GEN_FLOAT_AC
1350 #undef GEN_FLOAT_AC
1351 #undef GEN_FLOAT_B
1352 #undef GEN_FLOAT_BS
1354 #undef GEN_LDF
1355 #undef GEN_LDUF
1356 #undef GEN_LDUXF
1357 #undef GEN_LDXF
1358 #undef GEN_LDFS
1360 #undef GEN_STF
1361 #undef GEN_STUF
1362 #undef GEN_STUXF
1363 #undef GEN_STXF
1364 #undef GEN_STFS