Merge remote-tracking branch 'qemu-project/master'
[qemu/ar7.git] / target / ppc / translate / vsx-impl.c.inc
blob0266f091194b6984441631c772269f91fd60c067
1 /***                           VSX extension                               ***/
3 static inline void get_cpu_vsr(TCGv_i64 dst, int n, bool high)
5     tcg_gen_ld_i64(dst, tcg_env, vsr64_offset(n, high));
8 static inline void set_cpu_vsr(int n, TCGv_i64 src, bool high)
10     tcg_gen_st_i64(src, tcg_env, vsr64_offset(n, high));
13 static inline TCGv_ptr gen_vsr_ptr(int reg)
15     TCGv_ptr r = tcg_temp_new_ptr();
16     tcg_gen_addi_ptr(r, tcg_env, vsr_full_offset(reg));
17     return r;
20 static inline TCGv_ptr gen_acc_ptr(int reg)
22     TCGv_ptr r = tcg_temp_new_ptr();
23     tcg_gen_addi_ptr(r, tcg_env, acc_full_offset(reg));
24     return r;
27 #define VSX_LOAD_SCALAR(name, operation)                      \
28 static void gen_##name(DisasContext *ctx)                     \
29 {                                                             \
30     TCGv EA;                                                  \
31     TCGv_i64 t0;                                              \
32     if (unlikely(!ctx->vsx_enabled)) {                        \
33         gen_exception(ctx, POWERPC_EXCP_VSXU);                \
34         return;                                               \
35     }                                                         \
36     t0 = tcg_temp_new_i64();                                  \
37     gen_set_access_type(ctx, ACCESS_INT);                     \
38     EA = tcg_temp_new();                                      \
39     gen_addr_reg_index(ctx, EA);                              \
40     gen_qemu_##operation(ctx, t0, EA);                        \
41     set_cpu_vsr(xT(ctx->opcode), t0, true);                   \
42     /* NOTE: cpu_vsrl is undefined */                         \
45 VSX_LOAD_SCALAR(lxsdx, ld64_i64)
46 VSX_LOAD_SCALAR(lxsiwax, ld32s_i64)
47 VSX_LOAD_SCALAR(lxsibzx, ld8u_i64)
48 VSX_LOAD_SCALAR(lxsihzx, ld16u_i64)
49 VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64)
50 VSX_LOAD_SCALAR(lxsspx, ld32fs)
52 static void gen_lxvd2x(DisasContext *ctx)
54     TCGv EA;
55     TCGv_i64 t0;
56     if (unlikely(!ctx->vsx_enabled)) {
57         gen_exception(ctx, POWERPC_EXCP_VSXU);
58         return;
59     }
60     t0 = tcg_temp_new_i64();
61     gen_set_access_type(ctx, ACCESS_INT);
62     EA = tcg_temp_new();
63     gen_addr_reg_index(ctx, EA);
64     gen_qemu_ld64_i64(ctx, t0, EA);
65     set_cpu_vsr(xT(ctx->opcode), t0, true);
66     tcg_gen_addi_tl(EA, EA, 8);
67     gen_qemu_ld64_i64(ctx, t0, EA);
68     set_cpu_vsr(xT(ctx->opcode), t0, false);
71 static void gen_lxvw4x(DisasContext *ctx)
73     TCGv EA;
74     TCGv_i64 xth;
75     TCGv_i64 xtl;
76     if (unlikely(!ctx->vsx_enabled)) {
77         gen_exception(ctx, POWERPC_EXCP_VSXU);
78         return;
79     }
80     xth = tcg_temp_new_i64();
81     xtl = tcg_temp_new_i64();
83     gen_set_access_type(ctx, ACCESS_INT);
84     EA = tcg_temp_new();
86     gen_addr_reg_index(ctx, EA);
87     if (ctx->le_mode) {
88         TCGv_i64 t0 = tcg_temp_new_i64();
89         TCGv_i64 t1 = tcg_temp_new_i64();
91         tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ);
92         tcg_gen_shri_i64(t1, t0, 32);
93         tcg_gen_deposit_i64(xth, t1, t0, 32, 32);
94         tcg_gen_addi_tl(EA, EA, 8);
95         tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ);
96         tcg_gen_shri_i64(t1, t0, 32);
97         tcg_gen_deposit_i64(xtl, t1, t0, 32, 32);
98     } else {
99         tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
100         tcg_gen_addi_tl(EA, EA, 8);
101         tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
102     }
103     set_cpu_vsr(xT(ctx->opcode), xth, true);
104     set_cpu_vsr(xT(ctx->opcode), xtl, false);
107 static void gen_lxvwsx(DisasContext *ctx)
109     TCGv EA;
110     TCGv_i32 data;
112     if (xT(ctx->opcode) < 32) {
113         if (unlikely(!ctx->vsx_enabled)) {
114             gen_exception(ctx, POWERPC_EXCP_VSXU);
115             return;
116         }
117     } else {
118         if (unlikely(!ctx->altivec_enabled)) {
119             gen_exception(ctx, POWERPC_EXCP_VPU);
120             return;
121         }
122     }
124     gen_set_access_type(ctx, ACCESS_INT);
125     EA = tcg_temp_new();
127     gen_addr_reg_index(ctx, EA);
129     data = tcg_temp_new_i32();
130     tcg_gen_qemu_ld_i32(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UL));
131     tcg_gen_gvec_dup_i32(MO_UL, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
134 static void gen_lxvdsx(DisasContext *ctx)
136     TCGv EA;
137     TCGv_i64 data;
139     if (unlikely(!ctx->vsx_enabled)) {
140         gen_exception(ctx, POWERPC_EXCP_VSXU);
141         return;
142     }
144     gen_set_access_type(ctx, ACCESS_INT);
145     EA = tcg_temp_new();
147     gen_addr_reg_index(ctx, EA);
149     data = tcg_temp_new_i64();
150     tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UQ));
151     tcg_gen_gvec_dup_i64(MO_UQ, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
154 static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl,
155                           TCGv_i64 inh, TCGv_i64 inl)
157     TCGv_i64 mask = tcg_constant_i64(0x00FF00FF00FF00FF);
158     TCGv_i64 t0 = tcg_temp_new_i64();
159     TCGv_i64 t1 = tcg_temp_new_i64();
161     /* outh = ((inh & mask) << 8) | ((inh >> 8) & mask) */
162     tcg_gen_and_i64(t0, inh, mask);
163     tcg_gen_shli_i64(t0, t0, 8);
164     tcg_gen_shri_i64(t1, inh, 8);
165     tcg_gen_and_i64(t1, t1, mask);
166     tcg_gen_or_i64(outh, t0, t1);
168     /* outl = ((inl & mask) << 8) | ((inl >> 8) & mask) */
169     tcg_gen_and_i64(t0, inl, mask);
170     tcg_gen_shli_i64(t0, t0, 8);
171     tcg_gen_shri_i64(t1, inl, 8);
172     tcg_gen_and_i64(t1, t1, mask);
173     tcg_gen_or_i64(outl, t0, t1);
176 static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl,
177                           TCGv_i64 inh, TCGv_i64 inl)
179     TCGv_i64 hi = tcg_temp_new_i64();
180     TCGv_i64 lo = tcg_temp_new_i64();
182     tcg_gen_bswap64_i64(hi, inh);
183     tcg_gen_bswap64_i64(lo, inl);
184     tcg_gen_shri_i64(outh, hi, 32);
185     tcg_gen_deposit_i64(outh, outh, hi, 32, 32);
186     tcg_gen_shri_i64(outl, lo, 32);
187     tcg_gen_deposit_i64(outl, outl, lo, 32, 32);
190 static void gen_lxvh8x(DisasContext *ctx)
192     TCGv EA;
193     TCGv_i64 xth;
194     TCGv_i64 xtl;
196     if (unlikely(!ctx->vsx_enabled)) {
197         gen_exception(ctx, POWERPC_EXCP_VSXU);
198         return;
199     }
200     xth = tcg_temp_new_i64();
201     xtl = tcg_temp_new_i64();
202     gen_set_access_type(ctx, ACCESS_INT);
204     EA = tcg_temp_new();
205     gen_addr_reg_index(ctx, EA);
206     tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
207     tcg_gen_addi_tl(EA, EA, 8);
208     tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
209     if (ctx->le_mode) {
210         gen_bswap16x8(xth, xtl, xth, xtl);
211     }
212     set_cpu_vsr(xT(ctx->opcode), xth, true);
213     set_cpu_vsr(xT(ctx->opcode), xtl, false);
216 static void gen_lxvb16x(DisasContext *ctx)
218     TCGv EA;
219     TCGv_i64 xth;
220     TCGv_i64 xtl;
222     if (unlikely(!ctx->vsx_enabled)) {
223         gen_exception(ctx, POWERPC_EXCP_VSXU);
224         return;
225     }
226     xth = tcg_temp_new_i64();
227     xtl = tcg_temp_new_i64();
228     gen_set_access_type(ctx, ACCESS_INT);
229     EA = tcg_temp_new();
230     gen_addr_reg_index(ctx, EA);
231     tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
232     tcg_gen_addi_tl(EA, EA, 8);
233     tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
234     set_cpu_vsr(xT(ctx->opcode), xth, true);
235     set_cpu_vsr(xT(ctx->opcode), xtl, false);
238 #ifdef TARGET_PPC64
239 #define VSX_VECTOR_LOAD_STORE_LENGTH(name)                         \
240 static void gen_##name(DisasContext *ctx)                          \
241 {                                                                  \
242     TCGv EA;                                                       \
243     TCGv_ptr xt;                                                   \
244                                                                    \
245     if (xT(ctx->opcode) < 32) {                                    \
246         if (unlikely(!ctx->vsx_enabled)) {                         \
247             gen_exception(ctx, POWERPC_EXCP_VSXU);                 \
248             return;                                                \
249         }                                                          \
250     } else {                                                       \
251         if (unlikely(!ctx->altivec_enabled)) {                     \
252             gen_exception(ctx, POWERPC_EXCP_VPU);                  \
253             return;                                                \
254         }                                                          \
255     }                                                              \
256     EA = tcg_temp_new();                                           \
257     xt = gen_vsr_ptr(xT(ctx->opcode));                             \
258     gen_set_access_type(ctx, ACCESS_INT);                          \
259     gen_addr_register(ctx, EA);                                    \
260     gen_helper_##name(tcg_env, EA, xt, cpu_gpr[rB(ctx->opcode)]);  \
263 VSX_VECTOR_LOAD_STORE_LENGTH(lxvl)
264 VSX_VECTOR_LOAD_STORE_LENGTH(lxvll)
265 VSX_VECTOR_LOAD_STORE_LENGTH(stxvl)
266 VSX_VECTOR_LOAD_STORE_LENGTH(stxvll)
267 #endif
269 #define VSX_STORE_SCALAR(name, operation)                     \
270 static void gen_##name(DisasContext *ctx)                     \
271 {                                                             \
272     TCGv EA;                                                  \
273     TCGv_i64 t0;                                              \
274     if (unlikely(!ctx->vsx_enabled)) {                        \
275         gen_exception(ctx, POWERPC_EXCP_VSXU);                \
276         return;                                               \
277     }                                                         \
278     t0 = tcg_temp_new_i64();                                  \
279     gen_set_access_type(ctx, ACCESS_INT);                     \
280     EA = tcg_temp_new();                                      \
281     gen_addr_reg_index(ctx, EA);                              \
282     get_cpu_vsr(t0, xS(ctx->opcode), true);                   \
283     gen_qemu_##operation(ctx, t0, EA);                        \
286 VSX_STORE_SCALAR(stxsdx, st64_i64)
288 VSX_STORE_SCALAR(stxsibx, st8_i64)
289 VSX_STORE_SCALAR(stxsihx, st16_i64)
290 VSX_STORE_SCALAR(stxsiwx, st32_i64)
291 VSX_STORE_SCALAR(stxsspx, st32fs)
293 static void gen_stxvd2x(DisasContext *ctx)
295     TCGv EA;
296     TCGv_i64 t0;
297     if (unlikely(!ctx->vsx_enabled)) {
298         gen_exception(ctx, POWERPC_EXCP_VSXU);
299         return;
300     }
301     t0 = tcg_temp_new_i64();
302     gen_set_access_type(ctx, ACCESS_INT);
303     EA = tcg_temp_new();
304     gen_addr_reg_index(ctx, EA);
305     get_cpu_vsr(t0, xS(ctx->opcode), true);
306     gen_qemu_st64_i64(ctx, t0, EA);
307     tcg_gen_addi_tl(EA, EA, 8);
308     get_cpu_vsr(t0, xS(ctx->opcode), false);
309     gen_qemu_st64_i64(ctx, t0, EA);
312 static void gen_stxvw4x(DisasContext *ctx)
314     TCGv EA;
315     TCGv_i64 xsh;
316     TCGv_i64 xsl;
318     if (unlikely(!ctx->vsx_enabled)) {
319         gen_exception(ctx, POWERPC_EXCP_VSXU);
320         return;
321     }
322     xsh = tcg_temp_new_i64();
323     xsl = tcg_temp_new_i64();
324     get_cpu_vsr(xsh, xS(ctx->opcode), true);
325     get_cpu_vsr(xsl, xS(ctx->opcode), false);
326     gen_set_access_type(ctx, ACCESS_INT);
327     EA = tcg_temp_new();
328     gen_addr_reg_index(ctx, EA);
329     if (ctx->le_mode) {
330         TCGv_i64 t0 = tcg_temp_new_i64();
331         TCGv_i64 t1 = tcg_temp_new_i64();
333         tcg_gen_shri_i64(t0, xsh, 32);
334         tcg_gen_deposit_i64(t1, t0, xsh, 32, 32);
335         tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ);
336         tcg_gen_addi_tl(EA, EA, 8);
337         tcg_gen_shri_i64(t0, xsl, 32);
338         tcg_gen_deposit_i64(t1, t0, xsl, 32, 32);
339         tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ);
340     } else {
341         tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
342         tcg_gen_addi_tl(EA, EA, 8);
343         tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
344     }
347 static void gen_stxvh8x(DisasContext *ctx)
349     TCGv EA;
350     TCGv_i64 xsh;
351     TCGv_i64 xsl;
353     if (unlikely(!ctx->vsx_enabled)) {
354         gen_exception(ctx, POWERPC_EXCP_VSXU);
355         return;
356     }
357     xsh = tcg_temp_new_i64();
358     xsl = tcg_temp_new_i64();
359     get_cpu_vsr(xsh, xS(ctx->opcode), true);
360     get_cpu_vsr(xsl, xS(ctx->opcode), false);
361     gen_set_access_type(ctx, ACCESS_INT);
362     EA = tcg_temp_new();
363     gen_addr_reg_index(ctx, EA);
364     if (ctx->le_mode) {
365         TCGv_i64 outh = tcg_temp_new_i64();
366         TCGv_i64 outl = tcg_temp_new_i64();
368         gen_bswap16x8(outh, outl, xsh, xsl);
369         tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEUQ);
370         tcg_gen_addi_tl(EA, EA, 8);
371         tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEUQ);
372     } else {
373         tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
374         tcg_gen_addi_tl(EA, EA, 8);
375         tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
376     }
379 static void gen_stxvb16x(DisasContext *ctx)
381     TCGv EA;
382     TCGv_i64 xsh;
383     TCGv_i64 xsl;
385     if (unlikely(!ctx->vsx_enabled)) {
386         gen_exception(ctx, POWERPC_EXCP_VSXU);
387         return;
388     }
389     xsh = tcg_temp_new_i64();
390     xsl = tcg_temp_new_i64();
391     get_cpu_vsr(xsh, xS(ctx->opcode), true);
392     get_cpu_vsr(xsl, xS(ctx->opcode), false);
393     gen_set_access_type(ctx, ACCESS_INT);
394     EA = tcg_temp_new();
395     gen_addr_reg_index(ctx, EA);
396     tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
397     tcg_gen_addi_tl(EA, EA, 8);
398     tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
401 static void gen_mfvsrwz(DisasContext *ctx)
403     if (xS(ctx->opcode) < 32) {
404         if (unlikely(!ctx->fpu_enabled)) {
405             gen_exception(ctx, POWERPC_EXCP_FPU);
406             return;
407         }
408     } else {
409         if (unlikely(!ctx->altivec_enabled)) {
410             gen_exception(ctx, POWERPC_EXCP_VPU);
411             return;
412         }
413     }
414     TCGv_i64 tmp = tcg_temp_new_i64();
415     TCGv_i64 xsh = tcg_temp_new_i64();
416     get_cpu_vsr(xsh, xS(ctx->opcode), true);
417     tcg_gen_ext32u_i64(tmp, xsh);
418     tcg_gen_trunc_i64_tl(cpu_gpr[rA(ctx->opcode)], tmp);
421 static void gen_mtvsrwa(DisasContext *ctx)
423     if (xS(ctx->opcode) < 32) {
424         if (unlikely(!ctx->fpu_enabled)) {
425             gen_exception(ctx, POWERPC_EXCP_FPU);
426             return;
427         }
428     } else {
429         if (unlikely(!ctx->altivec_enabled)) {
430             gen_exception(ctx, POWERPC_EXCP_VPU);
431             return;
432         }
433     }
434     TCGv_i64 tmp = tcg_temp_new_i64();
435     TCGv_i64 xsh = tcg_temp_new_i64();
436     tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
437     tcg_gen_ext32s_i64(xsh, tmp);
438     set_cpu_vsr(xT(ctx->opcode), xsh, true);
441 static void gen_mtvsrwz(DisasContext *ctx)
443     if (xS(ctx->opcode) < 32) {
444         if (unlikely(!ctx->fpu_enabled)) {
445             gen_exception(ctx, POWERPC_EXCP_FPU);
446             return;
447         }
448     } else {
449         if (unlikely(!ctx->altivec_enabled)) {
450             gen_exception(ctx, POWERPC_EXCP_VPU);
451             return;
452         }
453     }
454     TCGv_i64 tmp = tcg_temp_new_i64();
455     TCGv_i64 xsh = tcg_temp_new_i64();
456     tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
457     tcg_gen_ext32u_i64(xsh, tmp);
458     set_cpu_vsr(xT(ctx->opcode), xsh, true);
461 #if defined(TARGET_PPC64)
462 static void gen_mfvsrd(DisasContext *ctx)
464     TCGv_i64 t0;
465     if (xS(ctx->opcode) < 32) {
466         if (unlikely(!ctx->fpu_enabled)) {
467             gen_exception(ctx, POWERPC_EXCP_FPU);
468             return;
469         }
470     } else {
471         if (unlikely(!ctx->altivec_enabled)) {
472             gen_exception(ctx, POWERPC_EXCP_VPU);
473             return;
474         }
475     }
476     t0 = tcg_temp_new_i64();
477     get_cpu_vsr(t0, xS(ctx->opcode), true);
478     tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
481 static void gen_mtvsrd(DisasContext *ctx)
483     TCGv_i64 t0;
484     if (xS(ctx->opcode) < 32) {
485         if (unlikely(!ctx->fpu_enabled)) {
486             gen_exception(ctx, POWERPC_EXCP_FPU);
487             return;
488         }
489     } else {
490         if (unlikely(!ctx->altivec_enabled)) {
491             gen_exception(ctx, POWERPC_EXCP_VPU);
492             return;
493         }
494     }
495     t0 = tcg_temp_new_i64();
496     tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
497     set_cpu_vsr(xT(ctx->opcode), t0, true);
500 static void gen_mfvsrld(DisasContext *ctx)
502     TCGv_i64 t0;
503     if (xS(ctx->opcode) < 32) {
504         if (unlikely(!ctx->vsx_enabled)) {
505             gen_exception(ctx, POWERPC_EXCP_VSXU);
506             return;
507         }
508     } else {
509         if (unlikely(!ctx->altivec_enabled)) {
510             gen_exception(ctx, POWERPC_EXCP_VPU);
511             return;
512         }
513     }
514     t0 = tcg_temp_new_i64();
515     get_cpu_vsr(t0, xS(ctx->opcode), false);
516     tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
519 static void gen_mtvsrdd(DisasContext *ctx)
521     TCGv_i64 t0;
522     if (xT(ctx->opcode) < 32) {
523         if (unlikely(!ctx->vsx_enabled)) {
524             gen_exception(ctx, POWERPC_EXCP_VSXU);
525             return;
526         }
527     } else {
528         if (unlikely(!ctx->altivec_enabled)) {
529             gen_exception(ctx, POWERPC_EXCP_VPU);
530             return;
531         }
532     }
534     t0 = tcg_temp_new_i64();
535     if (!rA(ctx->opcode)) {
536         tcg_gen_movi_i64(t0, 0);
537     } else {
538         tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
539     }
540     set_cpu_vsr(xT(ctx->opcode), t0, true);
542     tcg_gen_mov_i64(t0, cpu_gpr[rB(ctx->opcode)]);
543     set_cpu_vsr(xT(ctx->opcode), t0, false);
546 static void gen_mtvsrws(DisasContext *ctx)
548     TCGv_i64 t0;
549     if (xT(ctx->opcode) < 32) {
550         if (unlikely(!ctx->vsx_enabled)) {
551             gen_exception(ctx, POWERPC_EXCP_VSXU);
552             return;
553         }
554     } else {
555         if (unlikely(!ctx->altivec_enabled)) {
556             gen_exception(ctx, POWERPC_EXCP_VPU);
557             return;
558         }
559     }
561     t0 = tcg_temp_new_i64();
562     tcg_gen_deposit_i64(t0, cpu_gpr[rA(ctx->opcode)],
563                         cpu_gpr[rA(ctx->opcode)], 32, 32);
564     set_cpu_vsr(xT(ctx->opcode), t0, false);
565     set_cpu_vsr(xT(ctx->opcode), t0, true);
568 #endif
570 #define OP_ABS 1
571 #define OP_NABS 2
572 #define OP_NEG 3
573 #define OP_CPSGN 4
574 #define SGN_MASK_DP  0x8000000000000000ull
575 #define SGN_MASK_SP 0x8000000080000000ull
576 #define EXP_MASK_DP  0x7FF0000000000000ull
577 #define EXP_MASK_SP 0x7F8000007F800000ull
578 #define FRC_MASK_DP (~(SGN_MASK_DP | EXP_MASK_DP))
579 #define FRC_MASK_SP (~(SGN_MASK_SP | EXP_MASK_SP))
581 #define VSX_SCALAR_MOVE(name, op, sgn_mask)                       \
582 static void glue(gen_, name)(DisasContext *ctx)                   \
583     {                                                             \
584         TCGv_i64 xb, sgm;                                         \
585         if (unlikely(!ctx->vsx_enabled)) {                        \
586             gen_exception(ctx, POWERPC_EXCP_VSXU);                \
587             return;                                               \
588         }                                                         \
589         xb = tcg_temp_new_i64();                                  \
590         sgm = tcg_temp_new_i64();                                 \
591         get_cpu_vsr(xb, xB(ctx->opcode), true);                   \
592         tcg_gen_movi_i64(sgm, sgn_mask);                          \
593         switch (op) {                                             \
594             case OP_ABS: {                                        \
595                 tcg_gen_andc_i64(xb, xb, sgm);                    \
596                 break;                                            \
597             }                                                     \
598             case OP_NABS: {                                       \
599                 tcg_gen_or_i64(xb, xb, sgm);                      \
600                 break;                                            \
601             }                                                     \
602             case OP_NEG: {                                        \
603                 tcg_gen_xor_i64(xb, xb, sgm);                     \
604                 break;                                            \
605             }                                                     \
606             case OP_CPSGN: {                                      \
607                 TCGv_i64 xa = tcg_temp_new_i64();                 \
608                 get_cpu_vsr(xa, xA(ctx->opcode), true);           \
609                 tcg_gen_and_i64(xa, xa, sgm);                     \
610                 tcg_gen_andc_i64(xb, xb, sgm);                    \
611                 tcg_gen_or_i64(xb, xb, xa);                       \
612                 break;                                            \
613             }                                                     \
614         }                                                         \
615         set_cpu_vsr(xT(ctx->opcode), xb, true);                   \
616         set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \
617     }
619 VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP)
620 VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP)
621 VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP)
622 VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP)
624 #define VSX_SCALAR_MOVE_QP(name, op, sgn_mask)                    \
625 static void glue(gen_, name)(DisasContext *ctx)                   \
626 {                                                                 \
627     int xa;                                                       \
628     int xt = rD(ctx->opcode) + 32;                                \
629     int xb = rB(ctx->opcode) + 32;                                \
630     TCGv_i64 xah, xbh, xbl, sgm, tmp;                             \
631                                                                   \
632     if (unlikely(!ctx->vsx_enabled)) {                            \
633         gen_exception(ctx, POWERPC_EXCP_VSXU);                    \
634         return;                                                   \
635     }                                                             \
636     xbh = tcg_temp_new_i64();                                     \
637     xbl = tcg_temp_new_i64();                                     \
638     sgm = tcg_temp_new_i64();                                     \
639     tmp = tcg_temp_new_i64();                                     \
640     get_cpu_vsr(xbh, xb, true);                                   \
641     get_cpu_vsr(xbl, xb, false);                                  \
642     tcg_gen_movi_i64(sgm, sgn_mask);                              \
643     switch (op) {                                                 \
644     case OP_ABS:                                                  \
645         tcg_gen_andc_i64(xbh, xbh, sgm);                          \
646         break;                                                    \
647     case OP_NABS:                                                 \
648         tcg_gen_or_i64(xbh, xbh, sgm);                            \
649         break;                                                    \
650     case OP_NEG:                                                  \
651         tcg_gen_xor_i64(xbh, xbh, sgm);                           \
652         break;                                                    \
653     case OP_CPSGN:                                                \
654         xah = tcg_temp_new_i64();                                 \
655         xa = rA(ctx->opcode) + 32;                                \
656         get_cpu_vsr(tmp, xa, true);                               \
657         tcg_gen_and_i64(xah, tmp, sgm);                           \
658         tcg_gen_andc_i64(xbh, xbh, sgm);                          \
659         tcg_gen_or_i64(xbh, xbh, xah);                            \
660         break;                                                    \
661     }                                                             \
662     set_cpu_vsr(xt, xbh, true);                                   \
663     set_cpu_vsr(xt, xbl, false);                                  \
666 VSX_SCALAR_MOVE_QP(xsabsqp, OP_ABS, SGN_MASK_DP)
667 VSX_SCALAR_MOVE_QP(xsnabsqp, OP_NABS, SGN_MASK_DP)
668 VSX_SCALAR_MOVE_QP(xsnegqp, OP_NEG, SGN_MASK_DP)
669 VSX_SCALAR_MOVE_QP(xscpsgnqp, OP_CPSGN, SGN_MASK_DP)
671 #define TCG_OP_IMM_i64(FUNC, OP, IMM)                           \
672     static void FUNC(TCGv_i64 t, TCGv_i64 b)                    \
673     {                                                           \
674         OP(t, b, IMM);                                          \
675     }
677 TCG_OP_IMM_i64(do_xvabssp_i64, tcg_gen_andi_i64, ~SGN_MASK_SP)
678 TCG_OP_IMM_i64(do_xvnabssp_i64, tcg_gen_ori_i64, SGN_MASK_SP)
679 TCG_OP_IMM_i64(do_xvnegsp_i64, tcg_gen_xori_i64, SGN_MASK_SP)
680 TCG_OP_IMM_i64(do_xvabsdp_i64, tcg_gen_andi_i64, ~SGN_MASK_DP)
681 TCG_OP_IMM_i64(do_xvnabsdp_i64, tcg_gen_ori_i64, SGN_MASK_DP)
682 TCG_OP_IMM_i64(do_xvnegdp_i64, tcg_gen_xori_i64, SGN_MASK_DP)
683 #undef TCG_OP_IMM_i64
685 static void xv_msb_op1(unsigned vece, TCGv_vec t, TCGv_vec b,
686                  void (*tcg_gen_op_vec)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
688     uint64_t msb = (vece == MO_32) ? SGN_MASK_SP : SGN_MASK_DP;
689     tcg_gen_op_vec(vece, t, b, tcg_constant_vec_matching(t, vece, msb));
692 static void do_xvabs_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
694     xv_msb_op1(vece, t, b, tcg_gen_andc_vec);
697 static void do_xvnabs_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
699     xv_msb_op1(vece, t, b, tcg_gen_or_vec);
702 static void do_xvneg_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
704     xv_msb_op1(vece, t, b, tcg_gen_xor_vec);
707 static bool do_vsx_msb_op(DisasContext *ctx, arg_XX2 *a, unsigned vece,
708                           void (*vec)(unsigned, TCGv_vec, TCGv_vec),
709                           void (*i64)(TCGv_i64, TCGv_i64))
711     static const TCGOpcode vecop_list[] = {
712         0
713     };
715     const GVecGen2 op = {
716        .fni8 = i64,
717        .fniv = vec,
718        .opt_opc = vecop_list,
719        .vece = vece
720     };
722     REQUIRE_INSNS_FLAGS2(ctx, VSX);
723     REQUIRE_VSX(ctx);
725     tcg_gen_gvec_2(vsr_full_offset(a->xt), vsr_full_offset(a->xb),
726                    16, 16, &op);
728     return true;
731 TRANS(XVABSDP, do_vsx_msb_op, MO_64, do_xvabs_vec, do_xvabsdp_i64)
732 TRANS(XVNABSDP, do_vsx_msb_op, MO_64, do_xvnabs_vec, do_xvnabsdp_i64)
733 TRANS(XVNEGDP, do_vsx_msb_op, MO_64, do_xvneg_vec, do_xvnegdp_i64)
734 TRANS(XVABSSP, do_vsx_msb_op, MO_32, do_xvabs_vec, do_xvabssp_i64)
735 TRANS(XVNABSSP, do_vsx_msb_op, MO_32, do_xvnabs_vec, do_xvnabssp_i64)
736 TRANS(XVNEGSP, do_vsx_msb_op, MO_32, do_xvneg_vec, do_xvnegsp_i64)
738 static void do_xvcpsgndp_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
740     tcg_gen_andi_i64(a, a, SGN_MASK_DP);
741     tcg_gen_andi_i64(b, b, ~SGN_MASK_DP);
742     tcg_gen_or_i64(t, a, b);
745 static void do_xvcpsgnsp_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
747     tcg_gen_andi_i64(a, a, SGN_MASK_SP);
748     tcg_gen_andi_i64(b, b, ~SGN_MASK_SP);
749     tcg_gen_or_i64(t, a, b);
752 static void do_xvcpsgn_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
754     uint64_t msb = (vece == MO_32) ? SGN_MASK_SP : SGN_MASK_DP;
755     tcg_gen_bitsel_vec(vece, t, tcg_constant_vec_matching(t, vece, msb), a, b);
758 static bool do_xvcpsgn(DisasContext *ctx, arg_XX3 *a, unsigned vece)
760     static const TCGOpcode vecop_list[] = {
761         0
762     };
764     static const GVecGen3 op[] = {
765         {
766             .fni8 = do_xvcpsgnsp_i64,
767             .fniv = do_xvcpsgn_vec,
768             .opt_opc = vecop_list,
769             .vece = MO_32
770         },
771         {
772             .fni8 = do_xvcpsgndp_i64,
773             .fniv = do_xvcpsgn_vec,
774             .opt_opc = vecop_list,
775             .vece = MO_64
776         },
777     };
779     REQUIRE_INSNS_FLAGS2(ctx, VSX);
780     REQUIRE_VSX(ctx);
782     tcg_gen_gvec_3(vsr_full_offset(a->xt), vsr_full_offset(a->xa),
783                    vsr_full_offset(a->xb), 16, 16, &op[vece - MO_32]);
785     return true;
788 TRANS(XVCPSGNSP, do_xvcpsgn, MO_32)
789 TRANS(XVCPSGNDP, do_xvcpsgn, MO_64)
791 #define VSX_CMP(name, op1, op2, inval, type)                                  \
792 static void gen_##name(DisasContext *ctx)                                     \
793 {                                                                             \
794     TCGv_i32 ignored;                                                         \
795     TCGv_ptr xt, xa, xb;                                                      \
796     if (unlikely(!ctx->vsx_enabled)) {                                        \
797         gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
798         return;                                                               \
799     }                                                                         \
800     xt = gen_vsr_ptr(xT(ctx->opcode));                                        \
801     xa = gen_vsr_ptr(xA(ctx->opcode));                                        \
802     xb = gen_vsr_ptr(xB(ctx->opcode));                                        \
803     if ((ctx->opcode >> (31 - 21)) & 1) {                                     \
804         gen_helper_##name(cpu_crf[6], tcg_env, xt, xa, xb);                   \
805     } else {                                                                  \
806         ignored = tcg_temp_new_i32();                                         \
807         gen_helper_##name(ignored, tcg_env, xt, xa, xb);                      \
808     }                                                                         \
811 VSX_CMP(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
812 VSX_CMP(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX)
813 VSX_CMP(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX)
814 VSX_CMP(xvcmpnedp, 0x0C, 0x0F, 0, PPC2_ISA300)
815 VSX_CMP(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX)
816 VSX_CMP(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
817 VSX_CMP(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
818 VSX_CMP(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX)
820 static bool trans_XSCVQPDP(DisasContext *ctx, arg_X_tb_rc *a)
822     TCGv_i32 ro;
823     TCGv_ptr xt, xb;
825     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
826     REQUIRE_VSX(ctx);
828     ro = tcg_constant_i32(a->rc);
830     xt = gen_avr_ptr(a->rt);
831     xb = gen_avr_ptr(a->rb);
832     gen_helper_XSCVQPDP(tcg_env, ro, xt, xb);
833     return true;
836 static bool do_helper_env_X_tb(DisasContext *ctx, arg_X_tb *a,
837                                void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr))
839     TCGv_ptr xt, xb;
841     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
842     REQUIRE_VSX(ctx);
844     xt = gen_avr_ptr(a->rt);
845     xb = gen_avr_ptr(a->rb);
846     gen_helper(tcg_env, xt, xb);
847     return true;
850 TRANS(XSCVUQQP, do_helper_env_X_tb, gen_helper_XSCVUQQP)
851 TRANS(XSCVSQQP, do_helper_env_X_tb, gen_helper_XSCVSQQP)
852 TRANS(XSCVQPUQZ, do_helper_env_X_tb, gen_helper_XSCVQPUQZ)
853 TRANS(XSCVQPSQZ, do_helper_env_X_tb, gen_helper_XSCVQPSQZ)
855 #define GEN_VSX_HELPER_2(name, op1, op2, inval, type)                         \
856 static void gen_##name(DisasContext *ctx)                                     \
857 {                                                                             \
858     TCGv_i32 opc;                                                             \
859     if (unlikely(!ctx->vsx_enabled)) {                                        \
860         gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
861         return;                                                               \
862     }                                                                         \
863     opc = tcg_constant_i32(ctx->opcode);                                      \
864     gen_helper_##name(tcg_env, opc);                                          \
867 #define GEN_VSX_HELPER_X3(name, op1, op2, inval, type)                        \
868 static void gen_##name(DisasContext *ctx)                                     \
869 {                                                                             \
870     TCGv_ptr xt, xa, xb;                                                      \
871     if (unlikely(!ctx->vsx_enabled)) {                                        \
872         gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
873         return;                                                               \
874     }                                                                         \
875     xt = gen_vsr_ptr(xT(ctx->opcode));                                        \
876     xa = gen_vsr_ptr(xA(ctx->opcode));                                        \
877     xb = gen_vsr_ptr(xB(ctx->opcode));                                        \
878     gen_helper_##name(tcg_env, xt, xa, xb);                                   \
881 #define GEN_VSX_HELPER_X2(name, op1, op2, inval, type)                        \
882 static void gen_##name(DisasContext *ctx)                                     \
883 {                                                                             \
884     TCGv_ptr xt, xb;                                                          \
885     if (unlikely(!ctx->vsx_enabled)) {                                        \
886         gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
887         return;                                                               \
888     }                                                                         \
889     xt = gen_vsr_ptr(xT(ctx->opcode));                                        \
890     xb = gen_vsr_ptr(xB(ctx->opcode));                                        \
891     gen_helper_##name(tcg_env, xt, xb);                                       \
894 #define GEN_VSX_HELPER_X2_AB(name, op1, op2, inval, type)                     \
895 static void gen_##name(DisasContext *ctx)                                     \
896 {                                                                             \
897     TCGv_i32 opc;                                                             \
898     TCGv_ptr xa, xb;                                                          \
899     if (unlikely(!ctx->vsx_enabled)) {                                        \
900         gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
901         return;                                                               \
902     }                                                                         \
903     opc = tcg_constant_i32(ctx->opcode);                                      \
904     xa = gen_vsr_ptr(xA(ctx->opcode));                                        \
905     xb = gen_vsr_ptr(xB(ctx->opcode));                                        \
906     gen_helper_##name(tcg_env, opc, xa, xb);                                  \
909 #define GEN_VSX_HELPER_X1(name, op1, op2, inval, type)                        \
910 static void gen_##name(DisasContext *ctx)                                     \
911 {                                                                             \
912     TCGv_i32 opc;                                                             \
913     TCGv_ptr xb;                                                              \
914     if (unlikely(!ctx->vsx_enabled)) {                                        \
915         gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
916         return;                                                               \
917     }                                                                         \
918     opc = tcg_constant_i32(ctx->opcode);                                      \
919     xb = gen_vsr_ptr(xB(ctx->opcode));                                        \
920     gen_helper_##name(tcg_env, opc, xb);                                      \
923 #define GEN_VSX_HELPER_R3(name, op1, op2, inval, type)                        \
924 static void gen_##name(DisasContext *ctx)                                     \
925 {                                                                             \
926     TCGv_i32 opc;                                                             \
927     TCGv_ptr xt, xa, xb;                                                      \
928     if (unlikely(!ctx->vsx_enabled)) {                                        \
929         gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
930         return;                                                               \
931     }                                                                         \
932     opc = tcg_constant_i32(ctx->opcode);                                      \
933     xt = gen_vsr_ptr(rD(ctx->opcode) + 32);                                   \
934     xa = gen_vsr_ptr(rA(ctx->opcode) + 32);                                   \
935     xb = gen_vsr_ptr(rB(ctx->opcode) + 32);                                   \
936     gen_helper_##name(tcg_env, opc, xt, xa, xb);                              \
939 #define GEN_VSX_HELPER_R2(name, op1, op2, inval, type)                        \
940 static void gen_##name(DisasContext *ctx)                                     \
941 {                                                                             \
942     TCGv_i32 opc;                                                             \
943     TCGv_ptr xt, xb;                                                          \
944     if (unlikely(!ctx->vsx_enabled)) {                                        \
945         gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
946         return;                                                               \
947     }                                                                         \
948     opc = tcg_constant_i32(ctx->opcode);                                      \
949     xt = gen_vsr_ptr(rD(ctx->opcode) + 32);                                   \
950     xb = gen_vsr_ptr(rB(ctx->opcode) + 32);                                   \
951     gen_helper_##name(tcg_env, opc, xt, xb);                                  \
954 #define GEN_VSX_HELPER_R2_AB(name, op1, op2, inval, type)                     \
955 static void gen_##name(DisasContext *ctx)                                     \
956 {                                                                             \
957     TCGv_i32 opc;                                                             \
958     TCGv_ptr xa, xb;                                                          \
959     if (unlikely(!ctx->vsx_enabled)) {                                        \
960         gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
961         return;                                                               \
962     }                                                                         \
963     opc = tcg_constant_i32(ctx->opcode);                                      \
964     xa = gen_vsr_ptr(rA(ctx->opcode) + 32);                                   \
965     xb = gen_vsr_ptr(rB(ctx->opcode) + 32);                                   \
966     gen_helper_##name(tcg_env, opc, xa, xb);                                  \
969 #define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
970 static void gen_##name(DisasContext *ctx)                     \
971 {                                                             \
972     TCGv_i64 t0;                                              \
973     TCGv_i64 t1;                                              \
974     if (unlikely(!ctx->vsx_enabled)) {                        \
975         gen_exception(ctx, POWERPC_EXCP_VSXU);                \
976         return;                                               \
977     }                                                         \
978     t0 = tcg_temp_new_i64();                                  \
979     t1 = tcg_temp_new_i64();                                  \
980     get_cpu_vsr(t0, xB(ctx->opcode), true);                   \
981     gen_helper_##name(t1, tcg_env, t0);                       \
982     set_cpu_vsr(xT(ctx->opcode), t1, true);                   \
983     set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \
986 GEN_VSX_HELPER_X3(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
987 GEN_VSX_HELPER_R3(xsaddqp, 0x04, 0x00, 0, PPC2_ISA300)
988 GEN_VSX_HELPER_X3(xssubdp, 0x00, 0x05, 0, PPC2_VSX)
989 GEN_VSX_HELPER_X3(xsmuldp, 0x00, 0x06, 0, PPC2_VSX)
990 GEN_VSX_HELPER_R3(xsmulqp, 0x04, 0x01, 0, PPC2_ISA300)
991 GEN_VSX_HELPER_X3(xsdivdp, 0x00, 0x07, 0, PPC2_VSX)
992 GEN_VSX_HELPER_R3(xsdivqp, 0x04, 0x11, 0, PPC2_ISA300)
993 GEN_VSX_HELPER_X2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
994 GEN_VSX_HELPER_X2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
995 GEN_VSX_HELPER_X2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
996 GEN_VSX_HELPER_X2_AB(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
997 GEN_VSX_HELPER_X1(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
998 GEN_VSX_HELPER_X2_AB(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300)
999 GEN_VSX_HELPER_R2_AB(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300)
1000 GEN_VSX_HELPER_X2_AB(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
1001 GEN_VSX_HELPER_X2_AB(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
1002 GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX)
1003 GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX)
1004 GEN_VSX_HELPER_X3(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
1005 GEN_VSX_HELPER_X3(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
1006 GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300)
1007 GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
1008 GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300)
1009 GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207)
1010 GEN_VSX_HELPER_R2(xscvqpsdz, 0x04, 0x1A, 0x19, PPC2_ISA300)
1011 GEN_VSX_HELPER_R2(xscvqpswz, 0x04, 0x1A, 0x09, PPC2_ISA300)
1012 GEN_VSX_HELPER_R2(xscvqpudz, 0x04, 0x1A, 0x11, PPC2_ISA300)
1013 GEN_VSX_HELPER_R2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300)
1014 GEN_VSX_HELPER_X2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300)
1015 GEN_VSX_HELPER_R2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300)
1016 GEN_VSX_HELPER_X2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
1018 /* test if +Inf */
1019 static void gen_is_pos_inf(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1021     uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
1022     tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1023                     tcg_constant_vec_matching(t, vece, exp_msk));
1026 /* test if -Inf */
1027 static void gen_is_neg_inf(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1029     uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
1030     uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1031     tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1032                     tcg_constant_vec_matching(t, vece, sgn_msk | exp_msk));
1035 /* test if +Inf or -Inf */
1036 static void gen_is_any_inf(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1038     uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
1039     uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1040     tcg_gen_andc_vec(vece, b, b, tcg_constant_vec_matching(t, vece, sgn_msk));
1041     tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1042                     tcg_constant_vec_matching(t, vece, exp_msk));
1045 /* test if +0 */
1046 static void gen_is_pos_zero(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1048     tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1049                     tcg_constant_vec_matching(t, vece, 0));
1052 /* test if -0 */
1053 static void gen_is_neg_zero(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1055     uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1056     tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1057                     tcg_constant_vec_matching(t, vece, sgn_msk));
1060 /* test if +0 or -0 */
1061 static void gen_is_any_zero(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1063     uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1064     tcg_gen_andc_vec(vece, b, b, tcg_constant_vec_matching(t, vece, sgn_msk));
1065     tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
1066                     tcg_constant_vec_matching(t, vece, 0));
1069 /* test if +Denormal */
1070 static void gen_is_pos_denormal(unsigned vece, TCGv_vec t,
1071                                 TCGv_vec b, int64_t v)
1073     uint64_t frc_msk = (vece == MO_32) ? (uint32_t)FRC_MASK_SP : FRC_MASK_DP;
1074     tcg_gen_cmp_vec(TCG_COND_LEU, vece, t, b,
1075                     tcg_constant_vec_matching(t, vece, frc_msk));
1076     tcg_gen_cmp_vec(TCG_COND_NE, vece, b, b,
1077                     tcg_constant_vec_matching(t, vece, 0));
1078     tcg_gen_and_vec(vece, t, t, b);
1081 /* test if -Denormal */
1082 static void gen_is_neg_denormal(unsigned vece, TCGv_vec t,
1083                                 TCGv_vec b, int64_t v)
1085     uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1086     uint64_t frc_msk = (vece == MO_32) ? (uint32_t)FRC_MASK_SP : FRC_MASK_DP;
1087     tcg_gen_cmp_vec(TCG_COND_LEU, vece, t, b,
1088                     tcg_constant_vec_matching(t, vece, sgn_msk | frc_msk));
1089     tcg_gen_cmp_vec(TCG_COND_GTU, vece, b, b,
1090                     tcg_constant_vec_matching(t, vece, sgn_msk));
1091     tcg_gen_and_vec(vece, t, t, b);
1094 /* test if +Denormal or -Denormal */
1095 static void gen_is_any_denormal(unsigned vece, TCGv_vec t,
1096                                 TCGv_vec b, int64_t v)
1098     uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1099     uint64_t frc_msk = (vece == MO_32) ? (uint32_t)FRC_MASK_SP : FRC_MASK_DP;
1100     tcg_gen_andc_vec(vece, b, b, tcg_constant_vec_matching(t, vece, sgn_msk));
1101     tcg_gen_cmp_vec(TCG_COND_LE, vece, t, b,
1102                     tcg_constant_vec_matching(t, vece, frc_msk));
1103     tcg_gen_cmp_vec(TCG_COND_NE, vece, b, b,
1104                     tcg_constant_vec_matching(t, vece, 0));
1105     tcg_gen_and_vec(vece, t, t, b);
1108 /* test if NaN */
1109 static void gen_is_nan(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
1111     uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
1112     uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
1113     tcg_gen_and_vec(vece, b, b, tcg_constant_vec_matching(t, vece, ~sgn_msk));
1114     tcg_gen_cmp_vec(TCG_COND_GT, vece, t, b,
1115                     tcg_constant_vec_matching(t, vece, exp_msk));
1118 static bool do_xvtstdc(DisasContext *ctx, arg_XX2_uim *a, unsigned vece)
1120     static const TCGOpcode vecop_list[] = {
1121         INDEX_op_cmp_vec, 0
1122     };
1124     GVecGen2i op = {
1125         .fnoi = (vece == MO_32) ? gen_helper_XVTSTDCSP : gen_helper_XVTSTDCDP,
1126         .vece = vece,
1127         .opt_opc = vecop_list
1128     };
1130     REQUIRE_VSX(ctx);
1132     switch (a->uim) {
1133     case 0:
1134         set_cpu_vsr(a->xt, tcg_constant_i64(0), true);
1135         set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
1136         return true;
1137     case ((1 << 0) | (1 << 1)):
1138         /* test if +Denormal or -Denormal */
1139         op.fniv = gen_is_any_denormal;
1140         break;
1141     case (1 << 0):
1142         /* test if -Denormal */
1143         op.fniv = gen_is_neg_denormal;
1144         break;
1145     case (1 << 1):
1146         /* test if +Denormal */
1147         op.fniv = gen_is_pos_denormal;
1148         break;
1149     case ((1 << 2) | (1 << 3)):
1150         /* test if +0 or -0 */
1151         op.fniv = gen_is_any_zero;
1152         break;
1153     case (1 << 2):
1154         /* test if -0 */
1155         op.fniv = gen_is_neg_zero;
1156         break;
1157     case (1 << 3):
1158         /* test if +0 */
1159         op.fniv = gen_is_pos_zero;
1160         break;
1161     case ((1 << 4) | (1 << 5)):
1162         /* test if +Inf or -Inf */
1163         op.fniv = gen_is_any_inf;
1164         break;
1165     case (1 << 4):
1166         /* test if -Inf */
1167         op.fniv = gen_is_neg_inf;
1168         break;
1169     case (1 << 5):
1170         /* test if +Inf */
1171         op.fniv = gen_is_pos_inf;
1172         break;
1173     case (1 << 6):
1174         /* test if NaN */
1175         op.fniv = gen_is_nan;
1176         break;
1177     }
1178     tcg_gen_gvec_2i(vsr_full_offset(a->xt), vsr_full_offset(a->xb),
1179                     16, 16, a->uim, &op);
1181     return true;
1184 TRANS_FLAGS2(VSX, XVTSTDCSP, do_xvtstdc, MO_32)
1185 TRANS_FLAGS2(VSX, XVTSTDCDP, do_xvtstdc, MO_64)
1187 static bool do_XX2_bf_uim(DisasContext *ctx, arg_XX2_bf_uim *a, bool vsr,
1188                      void (*gen_helper)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_ptr))
1190     TCGv_ptr xb;
1192     REQUIRE_VSX(ctx);
1193     xb = vsr ? gen_vsr_ptr(a->xb) : gen_avr_ptr(a->xb);
1194     gen_helper(tcg_env, tcg_constant_i32(a->bf), tcg_constant_i32(a->uim), xb);
1195     return true;
1198 TRANS_FLAGS2(ISA300, XSTSTDCSP, do_XX2_bf_uim, true, gen_helper_XSTSTDCSP)
1199 TRANS_FLAGS2(ISA300, XSTSTDCDP, do_XX2_bf_uim, true, gen_helper_XSTSTDCDP)
1200 TRANS_FLAGS2(ISA300, XSTSTDCQP, do_XX2_bf_uim, false, gen_helper_XSTSTDCQP)
1202 bool trans_XSCVSPDPN(DisasContext *ctx, arg_XX2 *a)
1204     TCGv_i64 tmp;
1206     REQUIRE_INSNS_FLAGS2(ctx, VSX207);
1207     REQUIRE_VSX(ctx);
1209     tmp = tcg_temp_new_i64();
1210     get_cpu_vsr(tmp, a->xb, true);
1212     gen_helper_XSCVSPDPN(tmp, tmp);
1214     set_cpu_vsr(a->xt, tmp, true);
1215     set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
1216     return true;
1219 GEN_VSX_HELPER_X2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
1220 GEN_VSX_HELPER_X2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
1221 GEN_VSX_HELPER_X2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
1222 GEN_VSX_HELPER_X2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
1223 GEN_VSX_HELPER_X2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
1224 GEN_VSX_HELPER_R2(xscvudqp, 0x04, 0x1A, 0x02, PPC2_ISA300)
1225 GEN_VSX_HELPER_X2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
1226 GEN_VSX_HELPER_X2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
1227 GEN_VSX_HELPER_X2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
1228 GEN_VSX_HELPER_X2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
1229 GEN_VSX_HELPER_X2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
1230 GEN_VSX_HELPER_X2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
1231 GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207)
1232 GEN_VSX_HELPER_R2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300)
1233 GEN_VSX_HELPER_R2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300)
1234 GEN_VSX_HELPER_R2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300)
1235 GEN_VSX_HELPER_R3(xssubqp, 0x04, 0x10, 0, PPC2_ISA300)
1236 GEN_VSX_HELPER_X3(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
1237 GEN_VSX_HELPER_X3(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
1238 GEN_VSX_HELPER_X3(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
1239 GEN_VSX_HELPER_X3(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207)
1240 GEN_VSX_HELPER_X2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
1241 GEN_VSX_HELPER_X2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
1242 GEN_VSX_HELPER_X2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
1243 GEN_VSX_HELPER_X2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
1244 GEN_VSX_HELPER_X2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
1246 GEN_VSX_HELPER_X3(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
1247 GEN_VSX_HELPER_X3(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
1248 GEN_VSX_HELPER_X3(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX)
1249 GEN_VSX_HELPER_X3(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX)
1250 GEN_VSX_HELPER_X2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
1251 GEN_VSX_HELPER_X2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
1252 GEN_VSX_HELPER_X2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
1253 GEN_VSX_HELPER_X2_AB(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
1254 GEN_VSX_HELPER_X1(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
1255 GEN_VSX_HELPER_X3(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX)
1256 GEN_VSX_HELPER_X3(xvmindp, 0x00, 0x1D, 0, PPC2_VSX)
1257 GEN_VSX_HELPER_X2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
1258 GEN_VSX_HELPER_X2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
1259 GEN_VSX_HELPER_X2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
1260 GEN_VSX_HELPER_X2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
1261 GEN_VSX_HELPER_X2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
1262 GEN_VSX_HELPER_X2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
1263 GEN_VSX_HELPER_X2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
1264 GEN_VSX_HELPER_X2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
1265 GEN_VSX_HELPER_X2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
1266 GEN_VSX_HELPER_X2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
1267 GEN_VSX_HELPER_X2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
1268 GEN_VSX_HELPER_X2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
1269 GEN_VSX_HELPER_X2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
1270 GEN_VSX_HELPER_X2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
1272 GEN_VSX_HELPER_X3(xvaddsp, 0x00, 0x08, 0, PPC2_VSX)
1273 GEN_VSX_HELPER_X3(xvsubsp, 0x00, 0x09, 0, PPC2_VSX)
1274 GEN_VSX_HELPER_X3(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX)
1275 GEN_VSX_HELPER_X3(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX)
1276 GEN_VSX_HELPER_X2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
1277 GEN_VSX_HELPER_X2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
1278 GEN_VSX_HELPER_X2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
1279 GEN_VSX_HELPER_X2_AB(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
1280 GEN_VSX_HELPER_X1(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
1281 GEN_VSX_HELPER_X3(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX)
1282 GEN_VSX_HELPER_X3(xvminsp, 0x00, 0x19, 0, PPC2_VSX)
1283 GEN_VSX_HELPER_X2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
1284 GEN_VSX_HELPER_X2(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300)
1285 GEN_VSX_HELPER_X2(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300)
1286 GEN_VSX_HELPER_X2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
1287 GEN_VSX_HELPER_X2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
1288 GEN_VSX_HELPER_X2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
1289 GEN_VSX_HELPER_X2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
1290 GEN_VSX_HELPER_X2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
1291 GEN_VSX_HELPER_X2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
1292 GEN_VSX_HELPER_X2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
1293 GEN_VSX_HELPER_X2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
1294 GEN_VSX_HELPER_X2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
1295 GEN_VSX_HELPER_X2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
1296 GEN_VSX_HELPER_X2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
1297 GEN_VSX_HELPER_X2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
1298 GEN_VSX_HELPER_X2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
1300 static bool trans_XXPERM(DisasContext *ctx, arg_XX3 *a)
1302     TCGv_ptr xt, xa, xb;
1304     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1305     REQUIRE_VSX(ctx);
1307     xt = gen_vsr_ptr(a->xt);
1308     xa = gen_vsr_ptr(a->xa);
1309     xb = gen_vsr_ptr(a->xb);
1311     gen_helper_VPERM(xt, xa, xt, xb);
1312     return true;
1315 static bool trans_XXPERMR(DisasContext *ctx, arg_XX3 *a)
1317     TCGv_ptr xt, xa, xb;
1319     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1320     REQUIRE_VSX(ctx);
1322     xt = gen_vsr_ptr(a->xt);
1323     xa = gen_vsr_ptr(a->xa);
1324     xb = gen_vsr_ptr(a->xb);
1326     gen_helper_VPERMR(xt, xa, xt, xb);
1327     return true;
1330 static bool trans_XXPERMDI(DisasContext *ctx, arg_XX3_dm *a)
1332     TCGv_i64 t0, t1;
1334     REQUIRE_INSNS_FLAGS2(ctx, VSX);
1335     REQUIRE_VSX(ctx);
1337     t0 = tcg_temp_new_i64();
1339     if (unlikely(a->xt == a->xa || a->xt == a->xb)) {
1340         t1 = tcg_temp_new_i64();
1342         get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0);
1343         get_cpu_vsr(t1, a->xb, (a->dm & 1) == 0);
1345         set_cpu_vsr(a->xt, t0, true);
1346         set_cpu_vsr(a->xt, t1, false);
1347     } else {
1348         get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0);
1349         set_cpu_vsr(a->xt, t0, true);
1351         get_cpu_vsr(t0, a->xb, (a->dm & 1) == 0);
1352         set_cpu_vsr(a->xt, t0, false);
1353     }
1354     return true;
1357 static bool trans_XXPERMX(DisasContext *ctx, arg_8RR_XX4_uim3 *a)
1359     TCGv_ptr xt, xa, xb, xc;
1361     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1362     REQUIRE_VSX(ctx);
1364     xt = gen_vsr_ptr(a->xt);
1365     xa = gen_vsr_ptr(a->xa);
1366     xb = gen_vsr_ptr(a->xb);
1367     xc = gen_vsr_ptr(a->xc);
1369     gen_helper_XXPERMX(xt, xa, xb, xc, tcg_constant_tl(a->uim3));
1370     return true;
1373 typedef void (*xxgenpcv_genfn)(TCGv_ptr, TCGv_ptr);
1375 static bool do_xxgenpcv(DisasContext *ctx, arg_X_imm5 *a,
1376                         const xxgenpcv_genfn fn[4])
1378     TCGv_ptr xt, vrb;
1380     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1381     REQUIRE_VSX(ctx);
1383     if (a->imm & ~0x3) {
1384         gen_invalid(ctx);
1385         return true;
1386     }
1388     xt = gen_vsr_ptr(a->xt);
1389     vrb = gen_avr_ptr(a->vrb);
1391     fn[a->imm](xt, vrb);
1392     return true;
1395 #define XXGENPCV(NAME) \
1396     static bool trans_##NAME(DisasContext *ctx, arg_X_imm5 *a)  \
1397     {                                                           \
1398         static const xxgenpcv_genfn fn[4] = {                   \
1399             gen_helper_##NAME##_be_exp,                         \
1400             gen_helper_##NAME##_be_comp,                        \
1401             gen_helper_##NAME##_le_exp,                         \
1402             gen_helper_##NAME##_le_comp,                        \
1403         };                                                      \
1404         return do_xxgenpcv(ctx, a, fn);                         \
1405     }
1407 XXGENPCV(XXGENPCVBM)
1408 XXGENPCV(XXGENPCVHM)
1409 XXGENPCV(XXGENPCVWM)
1410 XXGENPCV(XXGENPCVDM)
1411 #undef XXGENPCV
1413 static bool do_xsmadd(DisasContext *ctx, int tgt, int src1, int src2, int src3,
1414         void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
1416     TCGv_ptr t, s1, s2, s3;
1418     t = gen_vsr_ptr(tgt);
1419     s1 = gen_vsr_ptr(src1);
1420     s2 = gen_vsr_ptr(src2);
1421     s3 = gen_vsr_ptr(src3);
1423     gen_helper(tcg_env, t, s1, s2, s3);
1424     return true;
1427 static bool do_xsmadd_XX3(DisasContext *ctx, arg_XX3 *a, bool type_a,
1428         void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
1430     REQUIRE_VSX(ctx);
1432     if (type_a) {
1433         return do_xsmadd(ctx, a->xt, a->xa, a->xt, a->xb, gen_helper);
1434     }
1435     return do_xsmadd(ctx, a->xt, a->xa, a->xb, a->xt, gen_helper);
1438 TRANS_FLAGS2(VSX, XSMADDADP, do_xsmadd_XX3, true, gen_helper_XSMADDDP)
1439 TRANS_FLAGS2(VSX, XSMADDMDP, do_xsmadd_XX3, false, gen_helper_XSMADDDP)
1440 TRANS_FLAGS2(VSX, XSMSUBADP, do_xsmadd_XX3, true, gen_helper_XSMSUBDP)
1441 TRANS_FLAGS2(VSX, XSMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSMSUBDP)
1442 TRANS_FLAGS2(VSX, XSNMADDADP, do_xsmadd_XX3, true, gen_helper_XSNMADDDP)
1443 TRANS_FLAGS2(VSX, XSNMADDMDP, do_xsmadd_XX3, false, gen_helper_XSNMADDDP)
1444 TRANS_FLAGS2(VSX, XSNMSUBADP, do_xsmadd_XX3, true, gen_helper_XSNMSUBDP)
1445 TRANS_FLAGS2(VSX, XSNMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSNMSUBDP)
1446 TRANS_FLAGS2(VSX207, XSMADDASP, do_xsmadd_XX3, true, gen_helper_XSMADDSP)
1447 TRANS_FLAGS2(VSX207, XSMADDMSP, do_xsmadd_XX3, false, gen_helper_XSMADDSP)
1448 TRANS_FLAGS2(VSX207, XSMSUBASP, do_xsmadd_XX3, true, gen_helper_XSMSUBSP)
1449 TRANS_FLAGS2(VSX207, XSMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSMSUBSP)
1450 TRANS_FLAGS2(VSX207, XSNMADDASP, do_xsmadd_XX3, true, gen_helper_XSNMADDSP)
1451 TRANS_FLAGS2(VSX207, XSNMADDMSP, do_xsmadd_XX3, false, gen_helper_XSNMADDSP)
1452 TRANS_FLAGS2(VSX207, XSNMSUBASP, do_xsmadd_XX3, true, gen_helper_XSNMSUBSP)
1453 TRANS_FLAGS2(VSX207, XSNMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSNMSUBSP)
1455 static bool do_xsmadd_X(DisasContext *ctx, arg_X_rc *a,
1456         void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr),
1457         void (*gen_helper_ro)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
1459     int vrt, vra, vrb;
1461     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1462     REQUIRE_VSX(ctx);
1464     vrt = a->rt + 32;
1465     vra = a->ra + 32;
1466     vrb = a->rb + 32;
1468     if (a->rc) {
1469         return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper_ro);
1470     }
1472     return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper);
1475 TRANS(XSMADDQP, do_xsmadd_X, gen_helper_XSMADDQP, gen_helper_XSMADDQPO)
1476 TRANS(XSMSUBQP, do_xsmadd_X, gen_helper_XSMSUBQP, gen_helper_XSMSUBQPO)
1477 TRANS(XSNMADDQP, do_xsmadd_X, gen_helper_XSNMADDQP, gen_helper_XSNMADDQPO)
1478 TRANS(XSNMSUBQP, do_xsmadd_X, gen_helper_XSNMSUBQP, gen_helper_XSNMSUBQPO)
1480 #define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type)             \
1481 static void gen_##name(DisasContext *ctx)                                     \
1482 {                                                                             \
1483     TCGv_ptr xt, s1, s2, s3;                                                  \
1484     if (unlikely(!ctx->vsx_enabled)) {                                        \
1485         gen_exception(ctx, POWERPC_EXCP_VSXU);                                \
1486         return;                                                               \
1487     }                                                                         \
1488     xt = gen_vsr_ptr(xT(ctx->opcode));                                        \
1489     s1 = gen_vsr_ptr(xA(ctx->opcode));                                        \
1490     if (ctx->opcode & PPC_BIT32(25)) {                                        \
1491         /*                                                                    \
1492          * AxT + B                                                            \
1493          */                                                                   \
1494         s2 = gen_vsr_ptr(xB(ctx->opcode));                                    \
1495         s3 = gen_vsr_ptr(xT(ctx->opcode));                                    \
1496     } else {                                                                  \
1497         /*                                                                    \
1498          * AxB + T                                                            \
1499          */                                                                   \
1500         s2 = gen_vsr_ptr(xT(ctx->opcode));                                    \
1501         s3 = gen_vsr_ptr(xB(ctx->opcode));                                    \
1502     }                                                                         \
1503     gen_helper_##name(tcg_env, xt, s1, s2, s3);                               \
1506 GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX)
1507 GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX)
1508 GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX)
1509 GEN_VSX_HELPER_VSX_MADD(xvnmsubdp, 0x04, 0x1E, 0x1F, 0, PPC2_VSX)
1510 GEN_VSX_HELPER_VSX_MADD(xvmaddsp, 0x04, 0x08, 0x09, 0, PPC2_VSX)
1511 GEN_VSX_HELPER_VSX_MADD(xvmsubsp, 0x04, 0x0A, 0x0B, 0, PPC2_VSX)
1512 GEN_VSX_HELPER_VSX_MADD(xvnmaddsp, 0x04, 0x18, 0x19, 0, PPC2_VSX)
1513 GEN_VSX_HELPER_VSX_MADD(xvnmsubsp, 0x04, 0x1A, 0x1B, 0, PPC2_VSX)
1515 static void gen_xxbrd(DisasContext *ctx)
1517     TCGv_i64 xth;
1518     TCGv_i64 xtl;
1519     TCGv_i64 xbh;
1520     TCGv_i64 xbl;
1522     if (unlikely(!ctx->vsx_enabled)) {
1523         gen_exception(ctx, POWERPC_EXCP_VSXU);
1524         return;
1525     }
1526     xth = tcg_temp_new_i64();
1527     xtl = tcg_temp_new_i64();
1528     xbh = tcg_temp_new_i64();
1529     xbl = tcg_temp_new_i64();
1530     get_cpu_vsr(xbh, xB(ctx->opcode), true);
1531     get_cpu_vsr(xbl, xB(ctx->opcode), false);
1533     tcg_gen_bswap64_i64(xth, xbh);
1534     tcg_gen_bswap64_i64(xtl, xbl);
1535     set_cpu_vsr(xT(ctx->opcode), xth, true);
1536     set_cpu_vsr(xT(ctx->opcode), xtl, false);
1539 static void gen_xxbrh(DisasContext *ctx)
1541     TCGv_i64 xth;
1542     TCGv_i64 xtl;
1543     TCGv_i64 xbh;
1544     TCGv_i64 xbl;
1546     if (unlikely(!ctx->vsx_enabled)) {
1547         gen_exception(ctx, POWERPC_EXCP_VSXU);
1548         return;
1549     }
1550     xth = tcg_temp_new_i64();
1551     xtl = tcg_temp_new_i64();
1552     xbh = tcg_temp_new_i64();
1553     xbl = tcg_temp_new_i64();
1554     get_cpu_vsr(xbh, xB(ctx->opcode), true);
1555     get_cpu_vsr(xbl, xB(ctx->opcode), false);
1557     gen_bswap16x8(xth, xtl, xbh, xbl);
1558     set_cpu_vsr(xT(ctx->opcode), xth, true);
1559     set_cpu_vsr(xT(ctx->opcode), xtl, false);
1562 static void gen_xxbrq(DisasContext *ctx)
1564     TCGv_i64 xth;
1565     TCGv_i64 xtl;
1566     TCGv_i64 xbh;
1567     TCGv_i64 xbl;
1568     TCGv_i64 t0;
1570     if (unlikely(!ctx->vsx_enabled)) {
1571         gen_exception(ctx, POWERPC_EXCP_VSXU);
1572         return;
1573     }
1574     xth = tcg_temp_new_i64();
1575     xtl = tcg_temp_new_i64();
1576     xbh = tcg_temp_new_i64();
1577     xbl = tcg_temp_new_i64();
1578     get_cpu_vsr(xbh, xB(ctx->opcode), true);
1579     get_cpu_vsr(xbl, xB(ctx->opcode), false);
1580     t0 = tcg_temp_new_i64();
1582     tcg_gen_bswap64_i64(t0, xbl);
1583     tcg_gen_bswap64_i64(xtl, xbh);
1584     set_cpu_vsr(xT(ctx->opcode), xtl, false);
1585     tcg_gen_mov_i64(xth, t0);
1586     set_cpu_vsr(xT(ctx->opcode), xth, true);
1589 static void gen_xxbrw(DisasContext *ctx)
1591     TCGv_i64 xth;
1592     TCGv_i64 xtl;
1593     TCGv_i64 xbh;
1594     TCGv_i64 xbl;
1596     if (unlikely(!ctx->vsx_enabled)) {
1597         gen_exception(ctx, POWERPC_EXCP_VSXU);
1598         return;
1599     }
1600     xth = tcg_temp_new_i64();
1601     xtl = tcg_temp_new_i64();
1602     xbh = tcg_temp_new_i64();
1603     xbl = tcg_temp_new_i64();
1604     get_cpu_vsr(xbh, xB(ctx->opcode), true);
1605     get_cpu_vsr(xbl, xB(ctx->opcode), false);
1607     gen_bswap32x4(xth, xtl, xbh, xbl);
1608     set_cpu_vsr(xT(ctx->opcode), xth, true);
1609     set_cpu_vsr(xT(ctx->opcode), xtl, false);
1612 #define VSX_LOGICAL(name, vece, tcg_op)                              \
1613 static void glue(gen_, name)(DisasContext *ctx)                      \
1614     {                                                                \
1615         if (unlikely(!ctx->vsx_enabled)) {                           \
1616             gen_exception(ctx, POWERPC_EXCP_VSXU);                   \
1617             return;                                                  \
1618         }                                                            \
1619         tcg_op(vece, vsr_full_offset(xT(ctx->opcode)),               \
1620                vsr_full_offset(xA(ctx->opcode)),                     \
1621                vsr_full_offset(xB(ctx->opcode)), 16, 16);            \
1622     }
1624 VSX_LOGICAL(xxland, MO_64, tcg_gen_gvec_and)
1625 VSX_LOGICAL(xxlandc, MO_64, tcg_gen_gvec_andc)
1626 VSX_LOGICAL(xxlor, MO_64, tcg_gen_gvec_or)
1627 VSX_LOGICAL(xxlxor, MO_64, tcg_gen_gvec_xor)
1628 VSX_LOGICAL(xxlnor, MO_64, tcg_gen_gvec_nor)
1629 VSX_LOGICAL(xxleqv, MO_64, tcg_gen_gvec_eqv)
1630 VSX_LOGICAL(xxlnand, MO_64, tcg_gen_gvec_nand)
1631 VSX_LOGICAL(xxlorc, MO_64, tcg_gen_gvec_orc)
1633 #define VSX_XXMRG(name, high)                               \
1634 static void glue(gen_, name)(DisasContext *ctx)             \
1635     {                                                       \
1636         TCGv_i64 a0, a1, b0, b1, tmp;                       \
1637         if (unlikely(!ctx->vsx_enabled)) {                  \
1638             gen_exception(ctx, POWERPC_EXCP_VSXU);          \
1639             return;                                         \
1640         }                                                   \
1641         a0 = tcg_temp_new_i64();                            \
1642         a1 = tcg_temp_new_i64();                            \
1643         b0 = tcg_temp_new_i64();                            \
1644         b1 = tcg_temp_new_i64();                            \
1645         tmp = tcg_temp_new_i64();                           \
1646         get_cpu_vsr(a0, xA(ctx->opcode), high);             \
1647         get_cpu_vsr(a1, xA(ctx->opcode), high);             \
1648         get_cpu_vsr(b0, xB(ctx->opcode), high);             \
1649         get_cpu_vsr(b1, xB(ctx->opcode), high);             \
1650         tcg_gen_shri_i64(a0, a0, 32);                       \
1651         tcg_gen_shri_i64(b0, b0, 32);                       \
1652         tcg_gen_deposit_i64(tmp, b0, a0, 32, 32);           \
1653         set_cpu_vsr(xT(ctx->opcode), tmp, true);            \
1654         tcg_gen_deposit_i64(tmp, b1, a1, 32, 32);           \
1655         set_cpu_vsr(xT(ctx->opcode), tmp, false);           \
1656     }
1658 VSX_XXMRG(xxmrghw, 1)
1659 VSX_XXMRG(xxmrglw, 0)
1661 static bool trans_XXSEL(DisasContext *ctx, arg_XX4 *a)
1663     REQUIRE_INSNS_FLAGS2(ctx, VSX);
1664     REQUIRE_VSX(ctx);
1666     tcg_gen_gvec_bitsel(MO_64, vsr_full_offset(a->xt), vsr_full_offset(a->xc),
1667                         vsr_full_offset(a->xb), vsr_full_offset(a->xa), 16, 16);
1669     return true;
1672 static bool trans_XXSPLTW(DisasContext *ctx, arg_XX2_uim *a)
1674     int tofs, bofs;
1676     REQUIRE_VSX(ctx);
1678     tofs = vsr_full_offset(a->xt);
1679     bofs = vsr_full_offset(a->xb);
1680     bofs += a->uim << MO_32;
1681 #if !HOST_BIG_ENDIAN
1682     bofs ^= 8 | 4;
1683 #endif
1685     tcg_gen_gvec_dup_mem(MO_32, tofs, bofs, 16, 16);
1686     return true;
1689 #define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff))
1691 static bool trans_XXSPLTIB(DisasContext *ctx, arg_X_imm8 *a)
1693     if (a->xt < 32) {
1694         REQUIRE_VSX(ctx);
1695     } else {
1696         REQUIRE_VECTOR(ctx);
1697     }
1698     tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(a->xt), 16, 16, a->imm);
1699     return true;
1702 static bool trans_XXSPLTIW(DisasContext *ctx, arg_8RR_D *a)
1704     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1705     REQUIRE_VSX(ctx);
1707     tcg_gen_gvec_dup_imm(MO_32, vsr_full_offset(a->xt), 16, 16, a->si);
1709     return true;
1712 static bool trans_XXSPLTIDP(DisasContext *ctx, arg_8RR_D *a)
1714     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1715     REQUIRE_VSX(ctx);
1717     tcg_gen_gvec_dup_imm(MO_64, vsr_full_offset(a->xt), 16, 16,
1718                          helper_todouble(a->si));
1719     return true;
1722 static bool trans_XXSPLTI32DX(DisasContext *ctx, arg_8RR_D_IX *a)
1724     TCGv_i32 imm;
1726     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1727     REQUIRE_VSX(ctx);
1729     imm = tcg_constant_i32(a->si);
1731     tcg_gen_st_i32(imm, tcg_env,
1732         offsetof(CPUPPCState, vsr[a->xt].VsrW(0 + a->ix)));
1733     tcg_gen_st_i32(imm, tcg_env,
1734         offsetof(CPUPPCState, vsr[a->xt].VsrW(2 + a->ix)));
1736     return true;
1739 static bool trans_LXVKQ(DisasContext *ctx, arg_X_uim5 *a)
1741     static const uint64_t values[32] = {
1742         0, /* Unspecified */
1743         0x3FFF000000000000llu, /* QP +1.0 */
1744         0x4000000000000000llu, /* QP +2.0 */
1745         0x4000800000000000llu, /* QP +3.0 */
1746         0x4001000000000000llu, /* QP +4.0 */
1747         0x4001400000000000llu, /* QP +5.0 */
1748         0x4001800000000000llu, /* QP +6.0 */
1749         0x4001C00000000000llu, /* QP +7.0 */
1750         0x7FFF000000000000llu, /* QP +Inf */
1751         0x7FFF800000000000llu, /* QP dQNaN */
1752         0, /* Unspecified */
1753         0, /* Unspecified */
1754         0, /* Unspecified */
1755         0, /* Unspecified */
1756         0, /* Unspecified */
1757         0, /* Unspecified */
1758         0x8000000000000000llu, /* QP -0.0 */
1759         0xBFFF000000000000llu, /* QP -1.0 */
1760         0xC000000000000000llu, /* QP -2.0 */
1761         0xC000800000000000llu, /* QP -3.0 */
1762         0xC001000000000000llu, /* QP -4.0 */
1763         0xC001400000000000llu, /* QP -5.0 */
1764         0xC001800000000000llu, /* QP -6.0 */
1765         0xC001C00000000000llu, /* QP -7.0 */
1766         0xFFFF000000000000llu, /* QP -Inf */
1767     };
1769     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1770     REQUIRE_VSX(ctx);
1772     if (values[a->uim]) {
1773         set_cpu_vsr(a->xt, tcg_constant_i64(0x0), false);
1774         set_cpu_vsr(a->xt, tcg_constant_i64(values[a->uim]), true);
1775     } else {
1776         gen_invalid(ctx);
1777     }
1779     return true;
1782 static bool trans_XVTLSBB(DisasContext *ctx, arg_XX2_bf_xb *a)
1784     TCGv_i64 xb, t0, t1, all_true, all_false, mask, zero;
1786     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1787     REQUIRE_VSX(ctx);
1789     xb = tcg_temp_new_i64();
1790     t0 = tcg_temp_new_i64();
1791     t1 = tcg_temp_new_i64();
1792     all_true = tcg_temp_new_i64();
1793     all_false = tcg_temp_new_i64();
1794     mask = tcg_constant_i64(dup_const(MO_8, 1));
1795     zero = tcg_constant_i64(0);
1797     get_cpu_vsr(xb, a->xb, true);
1798     tcg_gen_and_i64(t0, mask, xb);
1799     get_cpu_vsr(xb, a->xb, false);
1800     tcg_gen_and_i64(t1, mask, xb);
1802     tcg_gen_or_i64(all_false, t0, t1);
1803     tcg_gen_and_i64(all_true, t0, t1);
1805     tcg_gen_setcond_i64(TCG_COND_EQ, all_false, all_false, zero);
1806     tcg_gen_shli_i64(all_false, all_false, 1);
1807     tcg_gen_setcond_i64(TCG_COND_EQ, all_true, all_true, mask);
1808     tcg_gen_shli_i64(all_true, all_true, 3);
1810     tcg_gen_or_i64(t0, all_false, all_true);
1811     tcg_gen_extrl_i64_i32(cpu_crf[a->bf], t0);
1812     return true;
1815 static void gen_xxsldwi(DisasContext *ctx)
1817     TCGv_i64 xth, xtl;
1818     if (unlikely(!ctx->vsx_enabled)) {
1819         gen_exception(ctx, POWERPC_EXCP_VSXU);
1820         return;
1821     }
1822     xth = tcg_temp_new_i64();
1823     xtl = tcg_temp_new_i64();
1825     switch (SHW(ctx->opcode)) {
1826         case 0: {
1827             get_cpu_vsr(xth, xA(ctx->opcode), true);
1828             get_cpu_vsr(xtl, xA(ctx->opcode), false);
1829             break;
1830         }
1831         case 1: {
1832             TCGv_i64 t0 = tcg_temp_new_i64();
1833             get_cpu_vsr(xth, xA(ctx->opcode), true);
1834             tcg_gen_shli_i64(xth, xth, 32);
1835             get_cpu_vsr(t0, xA(ctx->opcode), false);
1836             tcg_gen_shri_i64(t0, t0, 32);
1837             tcg_gen_or_i64(xth, xth, t0);
1838             get_cpu_vsr(xtl, xA(ctx->opcode), false);
1839             tcg_gen_shli_i64(xtl, xtl, 32);
1840             get_cpu_vsr(t0, xB(ctx->opcode), true);
1841             tcg_gen_shri_i64(t0, t0, 32);
1842             tcg_gen_or_i64(xtl, xtl, t0);
1843             break;
1844         }
1845         case 2: {
1846             get_cpu_vsr(xth, xA(ctx->opcode), false);
1847             get_cpu_vsr(xtl, xB(ctx->opcode), true);
1848             break;
1849         }
1850         case 3: {
1851             TCGv_i64 t0 = tcg_temp_new_i64();
1852             get_cpu_vsr(xth, xA(ctx->opcode), false);
1853             tcg_gen_shli_i64(xth, xth, 32);
1854             get_cpu_vsr(t0, xB(ctx->opcode), true);
1855             tcg_gen_shri_i64(t0, t0, 32);
1856             tcg_gen_or_i64(xth, xth, t0);
1857             get_cpu_vsr(xtl, xB(ctx->opcode), true);
1858             tcg_gen_shli_i64(xtl, xtl, 32);
1859             get_cpu_vsr(t0, xB(ctx->opcode), false);
1860             tcg_gen_shri_i64(t0, t0, 32);
1861             tcg_gen_or_i64(xtl, xtl, t0);
1862             break;
1863         }
1864     }
1866     set_cpu_vsr(xT(ctx->opcode), xth, true);
1867     set_cpu_vsr(xT(ctx->opcode), xtl, false);
1870 static bool do_vsx_extract_insert(DisasContext *ctx, arg_XX2_uim *a,
1871     void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i32))
1873     TCGv_i64 zero = tcg_constant_i64(0);
1874     TCGv_ptr xt, xb;
1876     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1877     REQUIRE_VSX(ctx);
1879     /*
1880      * uim > 15 out of bound and for
1881      * uim > 12 handle as per hardware in helper
1882      */
1883     if (a->uim > 15) {
1884         set_cpu_vsr(a->xt, zero, true);
1885         set_cpu_vsr(a->xt, zero, false);
1886     } else {
1887         xt = gen_vsr_ptr(a->xt);
1888         xb = gen_vsr_ptr(a->xb);
1889         gen_helper(xt, xb, tcg_constant_i32(a->uim));
1890     }
1891     return true;
1894 TRANS(XXEXTRACTUW, do_vsx_extract_insert, gen_helper_XXEXTRACTUW)
1895 TRANS(XXINSERTW, do_vsx_extract_insert, gen_helper_XXINSERTW)
1897 #ifdef TARGET_PPC64
1898 static void gen_xsxexpdp(DisasContext *ctx)
1900     TCGv rt = cpu_gpr[rD(ctx->opcode)];
1901     TCGv_i64 t0;
1902     if (unlikely(!ctx->vsx_enabled)) {
1903         gen_exception(ctx, POWERPC_EXCP_VSXU);
1904         return;
1905     }
1906     t0 = tcg_temp_new_i64();
1907     get_cpu_vsr(t0, xB(ctx->opcode), true);
1908     tcg_gen_extract_i64(rt, t0, 52, 11);
1911 static void gen_xsxexpqp(DisasContext *ctx)
1913     TCGv_i64 xth;
1914     TCGv_i64 xtl;
1915     TCGv_i64 xbh;
1917     if (unlikely(!ctx->vsx_enabled)) {
1918         gen_exception(ctx, POWERPC_EXCP_VSXU);
1919         return;
1920     }
1921     xth = tcg_temp_new_i64();
1922     xtl = tcg_temp_new_i64();
1923     xbh = tcg_temp_new_i64();
1924     get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
1926     tcg_gen_extract_i64(xth, xbh, 48, 15);
1927     set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
1928     tcg_gen_movi_i64(xtl, 0);
1929     set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
1932 static void gen_xsiexpdp(DisasContext *ctx)
1934     TCGv_i64 xth;
1935     TCGv ra = cpu_gpr[rA(ctx->opcode)];
1936     TCGv rb = cpu_gpr[rB(ctx->opcode)];
1937     TCGv_i64 t0;
1939     if (unlikely(!ctx->vsx_enabled)) {
1940         gen_exception(ctx, POWERPC_EXCP_VSXU);
1941         return;
1942     }
1943     t0 = tcg_temp_new_i64();
1944     xth = tcg_temp_new_i64();
1945     tcg_gen_andi_i64(xth, ra, 0x800FFFFFFFFFFFFF);
1946     tcg_gen_andi_i64(t0, rb, 0x7FF);
1947     tcg_gen_shli_i64(t0, t0, 52);
1948     tcg_gen_or_i64(xth, xth, t0);
1949     set_cpu_vsr(xT(ctx->opcode), xth, true);
1950     set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false);
1953 static void gen_xsiexpqp(DisasContext *ctx)
1955     TCGv_i64 xth;
1956     TCGv_i64 xtl;
1957     TCGv_i64 xah;
1958     TCGv_i64 xal;
1959     TCGv_i64 xbh;
1960     TCGv_i64 t0;
1962     if (unlikely(!ctx->vsx_enabled)) {
1963         gen_exception(ctx, POWERPC_EXCP_VSXU);
1964         return;
1965     }
1966     xth = tcg_temp_new_i64();
1967     xtl = tcg_temp_new_i64();
1968     xah = tcg_temp_new_i64();
1969     xal = tcg_temp_new_i64();
1970     get_cpu_vsr(xah, rA(ctx->opcode) + 32, true);
1971     get_cpu_vsr(xal, rA(ctx->opcode) + 32, false);
1972     xbh = tcg_temp_new_i64();
1973     get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
1974     t0 = tcg_temp_new_i64();
1976     tcg_gen_andi_i64(xth, xah, 0x8000FFFFFFFFFFFF);
1977     tcg_gen_andi_i64(t0, xbh, 0x7FFF);
1978     tcg_gen_shli_i64(t0, t0, 48);
1979     tcg_gen_or_i64(xth, xth, t0);
1980     set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
1981     tcg_gen_mov_i64(xtl, xal);
1982     set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
1985 static void gen_xsxsigdp(DisasContext *ctx)
1987     TCGv rt = cpu_gpr[rD(ctx->opcode)];
1988     TCGv_i64 t0, t1, zr, nan, exp;
1990     if (unlikely(!ctx->vsx_enabled)) {
1991         gen_exception(ctx, POWERPC_EXCP_VSXU);
1992         return;
1993     }
1994     exp = tcg_temp_new_i64();
1995     t0 = tcg_temp_new_i64();
1996     t1 = tcg_temp_new_i64();
1997     zr = tcg_constant_i64(0);
1998     nan = tcg_constant_i64(2047);
2000     get_cpu_vsr(t1, xB(ctx->opcode), true);
2001     tcg_gen_extract_i64(exp, t1, 52, 11);
2002     tcg_gen_movi_i64(t0, 0x0010000000000000);
2003     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
2004     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
2005     get_cpu_vsr(t1, xB(ctx->opcode), true);
2006     tcg_gen_deposit_i64(rt, t0, t1, 0, 52);
2009 static void gen_xsxsigqp(DisasContext *ctx)
2011     TCGv_i64 t0, zr, nan, exp;
2012     TCGv_i64 xth;
2013     TCGv_i64 xtl;
2014     TCGv_i64 xbh;
2015     TCGv_i64 xbl;
2017     if (unlikely(!ctx->vsx_enabled)) {
2018         gen_exception(ctx, POWERPC_EXCP_VSXU);
2019         return;
2020     }
2021     xth = tcg_temp_new_i64();
2022     xtl = tcg_temp_new_i64();
2023     xbh = tcg_temp_new_i64();
2024     xbl = tcg_temp_new_i64();
2025     get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
2026     get_cpu_vsr(xbl, rB(ctx->opcode) + 32, false);
2027     exp = tcg_temp_new_i64();
2028     t0 = tcg_temp_new_i64();
2029     zr = tcg_constant_i64(0);
2030     nan = tcg_constant_i64(32767);
2032     tcg_gen_extract_i64(exp, xbh, 48, 15);
2033     tcg_gen_movi_i64(t0, 0x0001000000000000);
2034     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
2035     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
2036     tcg_gen_deposit_i64(xth, t0, xbh, 0, 48);
2037     set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
2038     tcg_gen_mov_i64(xtl, xbl);
2039     set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
2041 #endif
2043 static void gen_xviexpsp(DisasContext *ctx)
2045     TCGv_i64 xth;
2046     TCGv_i64 xtl;
2047     TCGv_i64 xah;
2048     TCGv_i64 xal;
2049     TCGv_i64 xbh;
2050     TCGv_i64 xbl;
2051     TCGv_i64 t0;
2053     if (unlikely(!ctx->vsx_enabled)) {
2054         gen_exception(ctx, POWERPC_EXCP_VSXU);
2055         return;
2056     }
2057     xth = tcg_temp_new_i64();
2058     xtl = tcg_temp_new_i64();
2059     xah = tcg_temp_new_i64();
2060     xal = tcg_temp_new_i64();
2061     xbh = tcg_temp_new_i64();
2062     xbl = tcg_temp_new_i64();
2063     get_cpu_vsr(xah, xA(ctx->opcode), true);
2064     get_cpu_vsr(xal, xA(ctx->opcode), false);
2065     get_cpu_vsr(xbh, xB(ctx->opcode), true);
2066     get_cpu_vsr(xbl, xB(ctx->opcode), false);
2067     t0 = tcg_temp_new_i64();
2069     tcg_gen_andi_i64(xth, xah, 0x807FFFFF807FFFFF);
2070     tcg_gen_andi_i64(t0, xbh, 0xFF000000FF);
2071     tcg_gen_shli_i64(t0, t0, 23);
2072     tcg_gen_or_i64(xth, xth, t0);
2073     set_cpu_vsr(xT(ctx->opcode), xth, true);
2074     tcg_gen_andi_i64(xtl, xal, 0x807FFFFF807FFFFF);
2075     tcg_gen_andi_i64(t0, xbl, 0xFF000000FF);
2076     tcg_gen_shli_i64(t0, t0, 23);
2077     tcg_gen_or_i64(xtl, xtl, t0);
2078     set_cpu_vsr(xT(ctx->opcode), xtl, false);
2081 static void gen_xviexpdp(DisasContext *ctx)
2083     TCGv_i64 xth;
2084     TCGv_i64 xtl;
2085     TCGv_i64 xah;
2086     TCGv_i64 xal;
2087     TCGv_i64 xbh;
2088     TCGv_i64 xbl;
2090     if (unlikely(!ctx->vsx_enabled)) {
2091         gen_exception(ctx, POWERPC_EXCP_VSXU);
2092         return;
2093     }
2094     xth = tcg_temp_new_i64();
2095     xtl = tcg_temp_new_i64();
2096     xah = tcg_temp_new_i64();
2097     xal = tcg_temp_new_i64();
2098     xbh = tcg_temp_new_i64();
2099     xbl = tcg_temp_new_i64();
2100     get_cpu_vsr(xah, xA(ctx->opcode), true);
2101     get_cpu_vsr(xal, xA(ctx->opcode), false);
2102     get_cpu_vsr(xbh, xB(ctx->opcode), true);
2103     get_cpu_vsr(xbl, xB(ctx->opcode), false);
2105     tcg_gen_deposit_i64(xth, xah, xbh, 52, 11);
2106     set_cpu_vsr(xT(ctx->opcode), xth, true);
2108     tcg_gen_deposit_i64(xtl, xal, xbl, 52, 11);
2109     set_cpu_vsr(xT(ctx->opcode), xtl, false);
2112 static void gen_xvxexpsp(DisasContext *ctx)
2114     TCGv_i64 xth;
2115     TCGv_i64 xtl;
2116     TCGv_i64 xbh;
2117     TCGv_i64 xbl;
2119     if (unlikely(!ctx->vsx_enabled)) {
2120         gen_exception(ctx, POWERPC_EXCP_VSXU);
2121         return;
2122     }
2123     xth = tcg_temp_new_i64();
2124     xtl = tcg_temp_new_i64();
2125     xbh = tcg_temp_new_i64();
2126     xbl = tcg_temp_new_i64();
2127     get_cpu_vsr(xbh, xB(ctx->opcode), true);
2128     get_cpu_vsr(xbl, xB(ctx->opcode), false);
2130     tcg_gen_shri_i64(xth, xbh, 23);
2131     tcg_gen_andi_i64(xth, xth, 0xFF000000FF);
2132     set_cpu_vsr(xT(ctx->opcode), xth, true);
2133     tcg_gen_shri_i64(xtl, xbl, 23);
2134     tcg_gen_andi_i64(xtl, xtl, 0xFF000000FF);
2135     set_cpu_vsr(xT(ctx->opcode), xtl, false);
2138 static void gen_xvxexpdp(DisasContext *ctx)
2140     TCGv_i64 xth;
2141     TCGv_i64 xtl;
2142     TCGv_i64 xbh;
2143     TCGv_i64 xbl;
2145     if (unlikely(!ctx->vsx_enabled)) {
2146         gen_exception(ctx, POWERPC_EXCP_VSXU);
2147         return;
2148     }
2149     xth = tcg_temp_new_i64();
2150     xtl = tcg_temp_new_i64();
2151     xbh = tcg_temp_new_i64();
2152     xbl = tcg_temp_new_i64();
2153     get_cpu_vsr(xbh, xB(ctx->opcode), true);
2154     get_cpu_vsr(xbl, xB(ctx->opcode), false);
2156     tcg_gen_extract_i64(xth, xbh, 52, 11);
2157     set_cpu_vsr(xT(ctx->opcode), xth, true);
2158     tcg_gen_extract_i64(xtl, xbl, 52, 11);
2159     set_cpu_vsr(xT(ctx->opcode), xtl, false);
2162 static bool trans_XVXSIGSP(DisasContext *ctx, arg_XX2 *a)
2164     TCGv_ptr t, b;
2166     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2167     REQUIRE_VSX(ctx);
2169     t = gen_vsr_ptr(a->xt);
2170     b = gen_vsr_ptr(a->xb);
2172     gen_helper_XVXSIGSP(t, b);
2173     return true;
2176 static void gen_xvxsigdp(DisasContext *ctx)
2178     TCGv_i64 xth;
2179     TCGv_i64 xtl;
2180     TCGv_i64 xbh;
2181     TCGv_i64 xbl;
2182     TCGv_i64 t0, zr, nan, exp;
2184     if (unlikely(!ctx->vsx_enabled)) {
2185         gen_exception(ctx, POWERPC_EXCP_VSXU);
2186         return;
2187     }
2188     xth = tcg_temp_new_i64();
2189     xtl = tcg_temp_new_i64();
2190     xbh = tcg_temp_new_i64();
2191     xbl = tcg_temp_new_i64();
2192     get_cpu_vsr(xbh, xB(ctx->opcode), true);
2193     get_cpu_vsr(xbl, xB(ctx->opcode), false);
2194     exp = tcg_temp_new_i64();
2195     t0 = tcg_temp_new_i64();
2196     zr = tcg_constant_i64(0);
2197     nan = tcg_constant_i64(2047);
2199     tcg_gen_extract_i64(exp, xbh, 52, 11);
2200     tcg_gen_movi_i64(t0, 0x0010000000000000);
2201     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
2202     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
2203     tcg_gen_deposit_i64(xth, t0, xbh, 0, 52);
2204     set_cpu_vsr(xT(ctx->opcode), xth, true);
2206     tcg_gen_extract_i64(exp, xbl, 52, 11);
2207     tcg_gen_movi_i64(t0, 0x0010000000000000);
2208     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
2209     tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
2210     tcg_gen_deposit_i64(xtl, t0, xbl, 0, 52);
2211     set_cpu_vsr(xT(ctx->opcode), xtl, false);
2214 static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ,
2215                      int rt, bool store, bool paired)
2217     TCGv ea;
2218     TCGv_i64 xt;
2219     MemOp mop;
2220     int rt1, rt2;
2222     xt = tcg_temp_new_i64();
2224     mop = DEF_MEMOP(MO_UQ);
2226     gen_set_access_type(ctx, ACCESS_INT);
2227     ea = do_ea_calc(ctx, ra, displ);
2229     if (paired && ctx->le_mode) {
2230         rt1 = rt + 1;
2231         rt2 = rt;
2232     } else {
2233         rt1 = rt;
2234         rt2 = rt + 1;
2235     }
2237     if (store) {
2238         get_cpu_vsr(xt, rt1, !ctx->le_mode);
2239         tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2240         gen_addr_add(ctx, ea, ea, 8);
2241         get_cpu_vsr(xt, rt1, ctx->le_mode);
2242         tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2243         if (paired) {
2244             gen_addr_add(ctx, ea, ea, 8);
2245             get_cpu_vsr(xt, rt2, !ctx->le_mode);
2246             tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2247             gen_addr_add(ctx, ea, ea, 8);
2248             get_cpu_vsr(xt, rt2, ctx->le_mode);
2249             tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2250         }
2251     } else {
2252         tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2253         set_cpu_vsr(rt1, xt, !ctx->le_mode);
2254         gen_addr_add(ctx, ea, ea, 8);
2255         tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2256         set_cpu_vsr(rt1, xt, ctx->le_mode);
2257         if (paired) {
2258             gen_addr_add(ctx, ea, ea, 8);
2259             tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2260             set_cpu_vsr(rt2, xt, !ctx->le_mode);
2261             gen_addr_add(ctx, ea, ea, 8);
2262             tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2263             set_cpu_vsr(rt2, xt, ctx->le_mode);
2264         }
2265     }
2266     return true;
2269 static bool do_lstxv_D(DisasContext *ctx, arg_D *a, bool store, bool paired)
2271     if (paired || a->rt < 32) {
2272         REQUIRE_VSX(ctx);
2273     } else {
2274         REQUIRE_VECTOR(ctx);
2275     }
2277     return do_lstxv(ctx, a->ra, tcg_constant_tl(a->si), a->rt, store, paired);
2280 static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a,
2281                            bool store, bool paired)
2283     arg_D d;
2284     REQUIRE_VSX(ctx);
2286     if (!resolve_PLS_D(ctx, &d, a)) {
2287         return true;
2288     }
2290     return do_lstxv(ctx, d.ra, tcg_constant_tl(d.si), d.rt, store, paired);
2293 static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired)
2295     if (paired || a->rt >= 32) {
2296         REQUIRE_VSX(ctx);
2297     } else {
2298         REQUIRE_VECTOR(ctx);
2299     }
2301     return do_lstxv(ctx, a->ra, cpu_gpr[a->rb], a->rt, store, paired);
2304 static bool do_lstxsd(DisasContext *ctx, int rt, int ra, TCGv displ, bool store)
2306     TCGv ea;
2307     TCGv_i64 xt;
2308     MemOp mop;
2310     if (store) {
2311         REQUIRE_VECTOR(ctx);
2312     } else {
2313         REQUIRE_VSX(ctx);
2314     }
2316     xt = tcg_temp_new_i64();
2317     mop = DEF_MEMOP(MO_UQ);
2319     gen_set_access_type(ctx, ACCESS_INT);
2320     ea = do_ea_calc(ctx, ra, displ);
2322     if (store) {
2323         get_cpu_vsr(xt, rt + 32, true);
2324         tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2325     } else {
2326         tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2327         set_cpu_vsr(rt + 32, xt, true);
2328         set_cpu_vsr(rt + 32, tcg_constant_i64(0), false);
2329     }
2330     return true;
2333 static bool do_lstxsd_DS(DisasContext *ctx, arg_D *a, bool store)
2335     return do_lstxsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), store);
2338 static bool do_plstxsd_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store)
2340     arg_D d;
2342     if (!resolve_PLS_D(ctx, &d, a)) {
2343         return true;
2344     }
2346     return do_lstxsd(ctx, d.rt, d.ra, tcg_constant_tl(d.si), store);
2349 static bool do_lstxssp(DisasContext *ctx, int rt, int ra, TCGv displ, bool store)
2351     TCGv ea;
2352     TCGv_i64 xt;
2354     REQUIRE_VECTOR(ctx);
2356     xt = tcg_temp_new_i64();
2358     gen_set_access_type(ctx, ACCESS_INT);
2359     ea = do_ea_calc(ctx, ra, displ);
2361     if (store) {
2362         get_cpu_vsr(xt, rt + 32, true);
2363         gen_qemu_st32fs(ctx, xt, ea);
2364     } else {
2365         gen_qemu_ld32fs(ctx, xt, ea);
2366         set_cpu_vsr(rt + 32, xt, true);
2367         set_cpu_vsr(rt + 32, tcg_constant_i64(0), false);
2368     }
2369     return true;
2372 static bool do_lstxssp_DS(DisasContext *ctx, arg_D *a, bool store)
2374     return do_lstxssp(ctx, a->rt, a->ra, tcg_constant_tl(a->si), store);
2377 static bool do_plstxssp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store)
2379     arg_D d;
2381     if (!resolve_PLS_D(ctx, &d, a)) {
2382         return true;
2383     }
2385     return do_lstxssp(ctx, d.rt, d.ra, tcg_constant_tl(d.si), store);
2388 TRANS_FLAGS2(ISA300, LXSD, do_lstxsd_DS, false)
2389 TRANS_FLAGS2(ISA300, STXSD, do_lstxsd_DS, true)
2390 TRANS_FLAGS2(ISA300, LXSSP, do_lstxssp_DS, false)
2391 TRANS_FLAGS2(ISA300, STXSSP, do_lstxssp_DS, true)
2392 TRANS_FLAGS2(ISA300, STXV, do_lstxv_D, true, false)
2393 TRANS_FLAGS2(ISA300, LXV, do_lstxv_D, false, false)
2394 TRANS_FLAGS2(ISA310, STXVP, do_lstxv_D, true, true)
2395 TRANS_FLAGS2(ISA310, LXVP, do_lstxv_D, false, true)
2396 TRANS_FLAGS2(ISA300, STXVX, do_lstxv_X, true, false)
2397 TRANS_FLAGS2(ISA300, LXVX, do_lstxv_X, false, false)
2398 TRANS_FLAGS2(ISA310, STXVPX, do_lstxv_X, true, true)
2399 TRANS_FLAGS2(ISA310, LXVPX, do_lstxv_X, false, true)
2400 TRANS64_FLAGS2(ISA310, PLXSD, do_plstxsd_PLS_D, false)
2401 TRANS64_FLAGS2(ISA310, PSTXSD, do_plstxsd_PLS_D, true)
2402 TRANS64_FLAGS2(ISA310, PLXSSP, do_plstxssp_PLS_D, false)
2403 TRANS64_FLAGS2(ISA310, PSTXSSP, do_plstxssp_PLS_D, true)
2404 TRANS64_FLAGS2(ISA310, PSTXV, do_lstxv_PLS_D, true, false)
2405 TRANS64_FLAGS2(ISA310, PLXV, do_lstxv_PLS_D, false, false)
2406 TRANS64_FLAGS2(ISA310, PSTXVP, do_lstxv_PLS_D, true, true)
2407 TRANS64_FLAGS2(ISA310, PLXVP, do_lstxv_PLS_D, false, true)
2409 static bool do_lstrm(DisasContext *ctx, arg_X *a, MemOp mop, bool store)
2411     TCGv ea;
2412     TCGv_i64 xt;
2414     REQUIRE_VSX(ctx);
2416     xt = tcg_temp_new_i64();
2418     gen_set_access_type(ctx, ACCESS_INT);
2419     ea = do_ea_calc(ctx, a->ra , cpu_gpr[a->rb]);
2421     if (store) {
2422         get_cpu_vsr(xt, a->rt, false);
2423         tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
2424     } else {
2425         tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
2426         set_cpu_vsr(a->rt, xt, false);
2427         set_cpu_vsr(a->rt, tcg_constant_i64(0), true);
2428     }
2429     return true;
2432 TRANS_FLAGS2(ISA310, LXVRBX, do_lstrm, DEF_MEMOP(MO_UB), false)
2433 TRANS_FLAGS2(ISA310, LXVRHX, do_lstrm, DEF_MEMOP(MO_UW), false)
2434 TRANS_FLAGS2(ISA310, LXVRWX, do_lstrm, DEF_MEMOP(MO_UL), false)
2435 TRANS_FLAGS2(ISA310, LXVRDX, do_lstrm, DEF_MEMOP(MO_UQ), false)
2436 TRANS_FLAGS2(ISA310, STXVRBX, do_lstrm, DEF_MEMOP(MO_UB), true)
2437 TRANS_FLAGS2(ISA310, STXVRHX, do_lstrm, DEF_MEMOP(MO_UW), true)
2438 TRANS_FLAGS2(ISA310, STXVRWX, do_lstrm, DEF_MEMOP(MO_UL), true)
2439 TRANS_FLAGS2(ISA310, STXVRDX, do_lstrm, DEF_MEMOP(MO_UQ), true)
2441 static void gen_xxeval_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c,
2442                            int64_t imm)
2444     /*
2445      * Instead of processing imm bit-by-bit, we'll skip the computation of
2446      * conjunctions whose corresponding bit is unset.
2447      */
2448     int bit;
2449     TCGv_i64 conj, disj;
2451     conj = tcg_temp_new_i64();
2452     disj = tcg_temp_new_i64();
2453     tcg_gen_movi_i64(disj, 0);
2455     /* Iterate over set bits from the least to the most significant bit */
2456     while (imm) {
2457         /*
2458          * Get the next bit to be processed with ctz64. Invert the result of
2459          * ctz64 to match the indexing used by PowerISA.
2460          */
2461         bit = 7 - ctz64(imm);
2462         if (bit & 0x4) {
2463             tcg_gen_mov_i64(conj, a);
2464         } else {
2465             tcg_gen_not_i64(conj, a);
2466         }
2467         if (bit & 0x2) {
2468             tcg_gen_and_i64(conj, conj, b);
2469         } else {
2470             tcg_gen_andc_i64(conj, conj, b);
2471         }
2472         if (bit & 0x1) {
2473             tcg_gen_and_i64(conj, conj, c);
2474         } else {
2475             tcg_gen_andc_i64(conj, conj, c);
2476         }
2477         tcg_gen_or_i64(disj, disj, conj);
2479         /* Unset the least significant bit that is set */
2480         imm &= imm - 1;
2481     }
2483     tcg_gen_mov_i64(t, disj);
2486 static void gen_xxeval_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
2487                            TCGv_vec c, int64_t imm)
2489     /*
2490      * Instead of processing imm bit-by-bit, we'll skip the computation of
2491      * conjunctions whose corresponding bit is unset.
2492      */
2493     int bit;
2494     TCGv_vec disj, conj;
2496     conj = tcg_temp_new_vec_matching(t);
2497     disj = tcg_temp_new_vec_matching(t);
2498     tcg_gen_dupi_vec(vece, disj, 0);
2500     /* Iterate over set bits from the least to the most significant bit */
2501     while (imm) {
2502         /*
2503          * Get the next bit to be processed with ctz64. Invert the result of
2504          * ctz64 to match the indexing used by PowerISA.
2505          */
2506         bit = 7 - ctz64(imm);
2507         if (bit & 0x4) {
2508             tcg_gen_mov_vec(conj, a);
2509         } else {
2510             tcg_gen_not_vec(vece, conj, a);
2511         }
2512         if (bit & 0x2) {
2513             tcg_gen_and_vec(vece, conj, conj, b);
2514         } else {
2515             tcg_gen_andc_vec(vece, conj, conj, b);
2516         }
2517         if (bit & 0x1) {
2518             tcg_gen_and_vec(vece, conj, conj, c);
2519         } else {
2520             tcg_gen_andc_vec(vece, conj, conj, c);
2521         }
2522         tcg_gen_or_vec(vece, disj, disj, conj);
2524         /* Unset the least significant bit that is set */
2525         imm &= imm - 1;
2526     }
2528     tcg_gen_mov_vec(t, disj);
2531 static bool trans_XXEVAL(DisasContext *ctx, arg_8RR_XX4_imm *a)
2533     static const TCGOpcode vecop_list[] = {
2534         INDEX_op_andc_vec, 0
2535     };
2536     static const GVecGen4i op = {
2537         .fniv = gen_xxeval_vec,
2538         .fno = gen_helper_XXEVAL,
2539         .fni8 = gen_xxeval_i64,
2540         .opt_opc = vecop_list,
2541         .vece = MO_64
2542     };
2543     int xt = vsr_full_offset(a->xt), xa = vsr_full_offset(a->xa),
2544         xb = vsr_full_offset(a->xb), xc = vsr_full_offset(a->xc);
2546     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2547     REQUIRE_VSX(ctx);
2549     /* Equivalent functions that can be implemented with a single gen_gvec */
2550     switch (a->imm) {
2551     case 0b00000000: /* false */
2552         set_cpu_vsr(a->xt, tcg_constant_i64(0), true);
2553         set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
2554         break;
2555     case 0b00000011: /* and(B,A) */
2556         tcg_gen_gvec_and(MO_64, xt, xb, xa, 16, 16);
2557         break;
2558     case 0b00000101: /* and(C,A) */
2559         tcg_gen_gvec_and(MO_64, xt, xc, xa, 16, 16);
2560         break;
2561     case 0b00001111: /* A */
2562         tcg_gen_gvec_mov(MO_64, xt, xa, 16, 16);
2563         break;
2564     case 0b00010001: /* and(C,B) */
2565         tcg_gen_gvec_and(MO_64, xt, xc, xb, 16, 16);
2566         break;
2567     case 0b00011011: /* C?B:A */
2568         tcg_gen_gvec_bitsel(MO_64, xt, xc, xb, xa, 16, 16);
2569         break;
2570     case 0b00011101: /* B?C:A */
2571         tcg_gen_gvec_bitsel(MO_64, xt, xb, xc, xa, 16, 16);
2572         break;
2573     case 0b00100111: /* C?A:B */
2574         tcg_gen_gvec_bitsel(MO_64, xt, xc, xa, xb, 16, 16);
2575         break;
2576     case 0b00110011: /* B */
2577         tcg_gen_gvec_mov(MO_64, xt, xb, 16, 16);
2578         break;
2579     case 0b00110101: /* A?C:B */
2580         tcg_gen_gvec_bitsel(MO_64, xt, xa, xc, xb, 16, 16);
2581         break;
2582     case 0b00111100: /* xor(B,A) */
2583         tcg_gen_gvec_xor(MO_64, xt, xb, xa, 16, 16);
2584         break;
2585     case 0b00111111: /* or(B,A) */
2586         tcg_gen_gvec_or(MO_64, xt, xb, xa, 16, 16);
2587         break;
2588     case 0b01000111: /* B?A:C */
2589         tcg_gen_gvec_bitsel(MO_64, xt, xb, xa, xc, 16, 16);
2590         break;
2591     case 0b01010011: /* A?B:C */
2592         tcg_gen_gvec_bitsel(MO_64, xt, xa, xb, xc, 16, 16);
2593         break;
2594     case 0b01010101: /* C */
2595         tcg_gen_gvec_mov(MO_64, xt, xc, 16, 16);
2596         break;
2597     case 0b01011010: /* xor(C,A) */
2598         tcg_gen_gvec_xor(MO_64, xt, xc, xa, 16, 16);
2599         break;
2600     case 0b01011111: /* or(C,A) */
2601         tcg_gen_gvec_or(MO_64, xt, xc, xa, 16, 16);
2602         break;
2603     case 0b01100110: /* xor(C,B) */
2604         tcg_gen_gvec_xor(MO_64, xt, xc, xb, 16, 16);
2605         break;
2606     case 0b01110111: /* or(C,B) */
2607         tcg_gen_gvec_or(MO_64, xt, xc, xb, 16, 16);
2608         break;
2609     case 0b10001000: /* nor(C,B) */
2610         tcg_gen_gvec_nor(MO_64, xt, xc, xb, 16, 16);
2611         break;
2612     case 0b10011001: /* eqv(C,B) */
2613         tcg_gen_gvec_eqv(MO_64, xt, xc, xb, 16, 16);
2614         break;
2615     case 0b10100000: /* nor(C,A) */
2616         tcg_gen_gvec_nor(MO_64, xt, xc, xa, 16, 16);
2617         break;
2618     case 0b10100101: /* eqv(C,A) */
2619         tcg_gen_gvec_eqv(MO_64, xt, xc, xa, 16, 16);
2620         break;
2621     case 0b10101010: /* not(C) */
2622         tcg_gen_gvec_not(MO_64, xt, xc, 16, 16);
2623         break;
2624     case 0b11000000: /* nor(B,A) */
2625         tcg_gen_gvec_nor(MO_64, xt,  xb, xa, 16, 16);
2626         break;
2627     case 0b11000011: /* eqv(B,A) */
2628         tcg_gen_gvec_eqv(MO_64, xt,  xb, xa, 16, 16);
2629         break;
2630     case 0b11001100: /* not(B) */
2631         tcg_gen_gvec_not(MO_64, xt, xb, 16, 16);
2632         break;
2633     case 0b11101110: /* nand(C,B) */
2634         tcg_gen_gvec_nand(MO_64, xt, xc, xb, 16, 16);
2635         break;
2636     case 0b11110000: /* not(A) */
2637         tcg_gen_gvec_not(MO_64, xt, xa, 16, 16);
2638         break;
2639     case 0b11111010: /* nand(C,A) */
2640         tcg_gen_gvec_nand(MO_64, xt, xc, xa, 16, 16);
2641         break;
2642     case 0b11111100: /* nand(B,A) */
2643         tcg_gen_gvec_nand(MO_64, xt, xb, xa, 16, 16);
2644         break;
2645     case 0b11111111: /* true */
2646         set_cpu_vsr(a->xt, tcg_constant_i64(-1), true);
2647         set_cpu_vsr(a->xt, tcg_constant_i64(-1), false);
2648         break;
2649     default:
2650         /* Fallback to compute all conjunctions/disjunctions */
2651         tcg_gen_gvec_4i(xt, xa, xb, xc, 16, 16, a->imm, &op);
2652     }
2654     return true;
2657 static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
2658                              TCGv_vec c)
2660     TCGv_vec tmp = tcg_temp_new_vec_matching(c);
2661     tcg_gen_sari_vec(vece, tmp, c, (8 << vece) - 1);
2662     tcg_gen_bitsel_vec(vece, t, tmp, b, a);
2665 static bool do_xxblendv(DisasContext *ctx, arg_8RR_XX4 *a, unsigned vece)
2667     static const TCGOpcode vecop_list[] = {
2668         INDEX_op_sari_vec, 0
2669     };
2670     static const GVecGen4 ops[4] = {
2671         {
2672             .fniv = gen_xxblendv_vec,
2673             .fno = gen_helper_XXBLENDVB,
2674             .opt_opc = vecop_list,
2675             .vece = MO_8
2676         },
2677         {
2678             .fniv = gen_xxblendv_vec,
2679             .fno = gen_helper_XXBLENDVH,
2680             .opt_opc = vecop_list,
2681             .vece = MO_16
2682         },
2683         {
2684             .fniv = gen_xxblendv_vec,
2685             .fno = gen_helper_XXBLENDVW,
2686             .opt_opc = vecop_list,
2687             .vece = MO_32
2688         },
2689         {
2690             .fniv = gen_xxblendv_vec,
2691             .fno = gen_helper_XXBLENDVD,
2692             .opt_opc = vecop_list,
2693             .vece = MO_64
2694         }
2695     };
2697     REQUIRE_VSX(ctx);
2699     tcg_gen_gvec_4(vsr_full_offset(a->xt), vsr_full_offset(a->xa),
2700                    vsr_full_offset(a->xb), vsr_full_offset(a->xc),
2701                    16, 16, &ops[vece]);
2703     return true;
2706 TRANS(XXBLENDVB, do_xxblendv, MO_8)
2707 TRANS(XXBLENDVH, do_xxblendv, MO_16)
2708 TRANS(XXBLENDVW, do_xxblendv, MO_32)
2709 TRANS(XXBLENDVD, do_xxblendv, MO_64)
2711 static bool do_helper_XX3(DisasContext *ctx, arg_XX3 *a,
2712     void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2714     TCGv_ptr xt, xa, xb;
2716     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2717     REQUIRE_VSX(ctx);
2719     xt = gen_vsr_ptr(a->xt);
2720     xa = gen_vsr_ptr(a->xa);
2721     xb = gen_vsr_ptr(a->xb);
2723     helper(tcg_env, xt, xa, xb);
2724     return true;
2727 TRANS(XSCMPEQDP, do_helper_XX3, gen_helper_XSCMPEQDP)
2728 TRANS(XSCMPGEDP, do_helper_XX3, gen_helper_XSCMPGEDP)
2729 TRANS(XSCMPGTDP, do_helper_XX3, gen_helper_XSCMPGTDP)
2730 TRANS(XSMAXCDP, do_helper_XX3, gen_helper_XSMAXCDP)
2731 TRANS(XSMINCDP, do_helper_XX3, gen_helper_XSMINCDP)
2732 TRANS(XSMAXJDP, do_helper_XX3, gen_helper_XSMAXJDP)
2733 TRANS(XSMINJDP, do_helper_XX3, gen_helper_XSMINJDP)
2735 static bool do_helper_X(arg_X *a,
2736     void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2738     TCGv_ptr rt, ra, rb;
2740     rt = gen_avr_ptr(a->rt);
2741     ra = gen_avr_ptr(a->ra);
2742     rb = gen_avr_ptr(a->rb);
2744     helper(tcg_env, rt, ra, rb);
2745     return true;
2748 static bool do_xscmpqp(DisasContext *ctx, arg_X *a,
2749     void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2751     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2752     REQUIRE_VSX(ctx);
2754     return do_helper_X(a, helper);
2757 TRANS(XSCMPEQQP, do_xscmpqp, gen_helper_XSCMPEQQP)
2758 TRANS(XSCMPGEQP, do_xscmpqp, gen_helper_XSCMPGEQP)
2759 TRANS(XSCMPGTQP, do_xscmpqp, gen_helper_XSCMPGTQP)
2760 TRANS(XSMAXCQP, do_xscmpqp, gen_helper_XSMAXCQP)
2761 TRANS(XSMINCQP, do_xscmpqp, gen_helper_XSMINCQP)
2763 static bool trans_XVCVSPBF16(DisasContext *ctx, arg_XX2 *a)
2765     TCGv_ptr xt, xb;
2767     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2768     REQUIRE_VSX(ctx);
2770     xt = gen_vsr_ptr(a->xt);
2771     xb = gen_vsr_ptr(a->xb);
2773     gen_helper_XVCVSPBF16(tcg_env, xt, xb);
2774     return true;
2777 static bool trans_XVCVBF16SPN(DisasContext *ctx, arg_XX2 *a)
2779     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2780     REQUIRE_VSX(ctx);
2782     tcg_gen_gvec_shli(MO_32, vsr_full_offset(a->xt), vsr_full_offset(a->xb),
2783                       16, 16, 16);
2785     return true;
2788     /*
2789      *  The PowerISA 3.1 mentions that for the current version of the
2790      *  architecture, "the hardware implementation provides the effect of
2791      *  ACC[i] and VSRs 4*i to 4*i + 3 logically containing the same data"
2792      *  and "The Accumulators introduce no new logical state at this time"
2793      *  (page 501). For now it seems unnecessary to create new structures,
2794      *  so ACC[i] is the same as VSRs 4*i to 4*i+3 and therefore
2795      *  move to and from accumulators are no-ops.
2796      */
2797 static bool trans_XXMFACC(DisasContext *ctx, arg_X_a *a)
2799     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2800     REQUIRE_VSX(ctx);
2801     return true;
2804 static bool trans_XXMTACC(DisasContext *ctx, arg_X_a *a)
2806     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2807     REQUIRE_VSX(ctx);
2808     return true;
2811 static bool trans_XXSETACCZ(DisasContext *ctx, arg_X_a *a)
2813     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2814     REQUIRE_VSX(ctx);
2815     tcg_gen_gvec_dup_imm(MO_64, acc_full_offset(a->ra), 64, 64, 0);
2816     return true;
2819 static bool do_ger(DisasContext *ctx, arg_MMIRR_XX3 *a,
2820     void (*helper)(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32))
2822     uint32_t mask;
2823     TCGv_ptr xt, xa, xb;
2824     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2825     REQUIRE_VSX(ctx);
2826     if (unlikely((a->xa / 4 == a->xt) || (a->xb / 4 == a->xt))) {
2827         gen_invalid(ctx);
2828         return true;
2829     }
2831     xt = gen_acc_ptr(a->xt);
2832     xa = gen_vsr_ptr(a->xa);
2833     xb = gen_vsr_ptr(a->xb);
2835     mask = ger_pack_masks(a->pmsk, a->ymsk, a->xmsk);
2836     helper(tcg_env, xa, xb, xt, tcg_constant_i32(mask));
2837     return true;
2840 TRANS(XVI4GER8, do_ger, gen_helper_XVI4GER8)
2841 TRANS(XVI4GER8PP, do_ger,  gen_helper_XVI4GER8PP)
2842 TRANS(XVI8GER4, do_ger, gen_helper_XVI8GER4)
2843 TRANS(XVI8GER4PP, do_ger,  gen_helper_XVI8GER4PP)
2844 TRANS(XVI8GER4SPP, do_ger, gen_helper_XVI8GER4SPP)
2845 TRANS(XVI16GER2, do_ger, gen_helper_XVI16GER2)
2846 TRANS(XVI16GER2PP, do_ger, gen_helper_XVI16GER2PP)
2847 TRANS(XVI16GER2S, do_ger, gen_helper_XVI16GER2S)
2848 TRANS(XVI16GER2SPP, do_ger, gen_helper_XVI16GER2SPP)
2850 TRANS64(PMXVI4GER8, do_ger, gen_helper_XVI4GER8)
2851 TRANS64(PMXVI4GER8PP, do_ger, gen_helper_XVI4GER8PP)
2852 TRANS64(PMXVI8GER4, do_ger, gen_helper_XVI8GER4)
2853 TRANS64(PMXVI8GER4PP, do_ger, gen_helper_XVI8GER4PP)
2854 TRANS64(PMXVI8GER4SPP, do_ger, gen_helper_XVI8GER4SPP)
2855 TRANS64(PMXVI16GER2, do_ger, gen_helper_XVI16GER2)
2856 TRANS64(PMXVI16GER2PP, do_ger, gen_helper_XVI16GER2PP)
2857 TRANS64(PMXVI16GER2S, do_ger, gen_helper_XVI16GER2S)
2858 TRANS64(PMXVI16GER2SPP, do_ger, gen_helper_XVI16GER2SPP)
2860 TRANS(XVBF16GER2, do_ger, gen_helper_XVBF16GER2)
2861 TRANS(XVBF16GER2PP, do_ger, gen_helper_XVBF16GER2PP)
2862 TRANS(XVBF16GER2PN, do_ger, gen_helper_XVBF16GER2PN)
2863 TRANS(XVBF16GER2NP, do_ger, gen_helper_XVBF16GER2NP)
2864 TRANS(XVBF16GER2NN, do_ger, gen_helper_XVBF16GER2NN)
2866 TRANS(XVF16GER2, do_ger, gen_helper_XVF16GER2)
2867 TRANS(XVF16GER2PP, do_ger, gen_helper_XVF16GER2PP)
2868 TRANS(XVF16GER2PN, do_ger, gen_helper_XVF16GER2PN)
2869 TRANS(XVF16GER2NP, do_ger, gen_helper_XVF16GER2NP)
2870 TRANS(XVF16GER2NN, do_ger, gen_helper_XVF16GER2NN)
2872 TRANS(XVF32GER, do_ger, gen_helper_XVF32GER)
2873 TRANS(XVF32GERPP, do_ger, gen_helper_XVF32GERPP)
2874 TRANS(XVF32GERPN, do_ger, gen_helper_XVF32GERPN)
2875 TRANS(XVF32GERNP, do_ger, gen_helper_XVF32GERNP)
2876 TRANS(XVF32GERNN, do_ger, gen_helper_XVF32GERNN)
2878 TRANS(XVF64GER, do_ger, gen_helper_XVF64GER)
2879 TRANS(XVF64GERPP, do_ger, gen_helper_XVF64GERPP)
2880 TRANS(XVF64GERPN, do_ger, gen_helper_XVF64GERPN)
2881 TRANS(XVF64GERNP, do_ger, gen_helper_XVF64GERNP)
2882 TRANS(XVF64GERNN, do_ger, gen_helper_XVF64GERNN)
2884 TRANS64(PMXVBF16GER2, do_ger, gen_helper_XVBF16GER2)
2885 TRANS64(PMXVBF16GER2PP, do_ger, gen_helper_XVBF16GER2PP)
2886 TRANS64(PMXVBF16GER2PN, do_ger, gen_helper_XVBF16GER2PN)
2887 TRANS64(PMXVBF16GER2NP, do_ger, gen_helper_XVBF16GER2NP)
2888 TRANS64(PMXVBF16GER2NN, do_ger, gen_helper_XVBF16GER2NN)
2890 TRANS64(PMXVF16GER2, do_ger, gen_helper_XVF16GER2)
2891 TRANS64(PMXVF16GER2PP, do_ger, gen_helper_XVF16GER2PP)
2892 TRANS64(PMXVF16GER2PN, do_ger, gen_helper_XVF16GER2PN)
2893 TRANS64(PMXVF16GER2NP, do_ger, gen_helper_XVF16GER2NP)
2894 TRANS64(PMXVF16GER2NN, do_ger, gen_helper_XVF16GER2NN)
2896 TRANS64(PMXVF32GER, do_ger, gen_helper_XVF32GER)
2897 TRANS64(PMXVF32GERPP, do_ger, gen_helper_XVF32GERPP)
2898 TRANS64(PMXVF32GERPN, do_ger, gen_helper_XVF32GERPN)
2899 TRANS64(PMXVF32GERNP, do_ger, gen_helper_XVF32GERNP)
2900 TRANS64(PMXVF32GERNN, do_ger, gen_helper_XVF32GERNN)
2902 TRANS64(PMXVF64GER, do_ger, gen_helper_XVF64GER)
2903 TRANS64(PMXVF64GERPP, do_ger, gen_helper_XVF64GERPP)
2904 TRANS64(PMXVF64GERPN, do_ger, gen_helper_XVF64GERPN)
2905 TRANS64(PMXVF64GERNP, do_ger, gen_helper_XVF64GERNP)
2906 TRANS64(PMXVF64GERNN, do_ger, gen_helper_XVF64GERNN)
2908 #undef GEN_XX2FORM
2909 #undef GEN_XX3FORM
2910 #undef GEN_XX2IFORM
2911 #undef GEN_XX3_RC_FORM
2912 #undef GEN_XX3FORM_DM
2913 #undef VSX_LOGICAL