target/ppc: Move V(ADD|SUB)CUW to decodetree and use gvec
[qemu/armbru.git] / target / ppc / translate / vmx-impl.c.inc
blobf52485a5f1e6b3ddb3dd489a84bbb3f0eb147f9e
1 /*
2  * translate/vmx-impl.c
3  *
4  * Altivec/VMX translation
5  */
7 /***                      Altivec vector extension                         ***/
8 /* Altivec registers moves */
10 static inline TCGv_ptr gen_avr_ptr(int reg)
12     TCGv_ptr r = tcg_temp_new_ptr();
13     tcg_gen_addi_ptr(r, cpu_env, avr_full_offset(reg));
14     return r;
17 #define GEN_VR_LDX(name, opc2, opc3)                                          \
18 static void glue(gen_, name)(DisasContext *ctx)                               \
19 {                                                                             \
20     TCGv EA;                                                                  \
21     TCGv_i64 avr;                                                             \
22     if (unlikely(!ctx->altivec_enabled)) {                                    \
23         gen_exception(ctx, POWERPC_EXCP_VPU);                                 \
24         return;                                                               \
25     }                                                                         \
26     gen_set_access_type(ctx, ACCESS_INT);                                     \
27     avr = tcg_temp_new_i64();                                                 \
28     EA = tcg_temp_new();                                                      \
29     gen_addr_reg_index(ctx, EA);                                              \
30     tcg_gen_andi_tl(EA, EA, ~0xf);                                            \
31     /*                                                                        \
32      * We only need to swap high and low halves. gen_qemu_ld64_i64            \
33      * does necessary 64-bit byteswap already.                                \
34      */                                                                       \
35     if (ctx->le_mode) {                                                       \
36         gen_qemu_ld64_i64(ctx, avr, EA);                                      \
37         set_avr64(rD(ctx->opcode), avr, false);                               \
38         tcg_gen_addi_tl(EA, EA, 8);                                           \
39         gen_qemu_ld64_i64(ctx, avr, EA);                                      \
40         set_avr64(rD(ctx->opcode), avr, true);                                \
41     } else {                                                                  \
42         gen_qemu_ld64_i64(ctx, avr, EA);                                      \
43         set_avr64(rD(ctx->opcode), avr, true);                                \
44         tcg_gen_addi_tl(EA, EA, 8);                                           \
45         gen_qemu_ld64_i64(ctx, avr, EA);                                      \
46         set_avr64(rD(ctx->opcode), avr, false);                               \
47     }                                                                         \
48     tcg_temp_free(EA);                                                        \
49     tcg_temp_free_i64(avr);                                                   \
52 #define GEN_VR_STX(name, opc2, opc3)                                          \
53 static void gen_st##name(DisasContext *ctx)                                   \
54 {                                                                             \
55     TCGv EA;                                                                  \
56     TCGv_i64 avr;                                                             \
57     if (unlikely(!ctx->altivec_enabled)) {                                    \
58         gen_exception(ctx, POWERPC_EXCP_VPU);                                 \
59         return;                                                               \
60     }                                                                         \
61     gen_set_access_type(ctx, ACCESS_INT);                                     \
62     avr = tcg_temp_new_i64();                                                 \
63     EA = tcg_temp_new();                                                      \
64     gen_addr_reg_index(ctx, EA);                                              \
65     tcg_gen_andi_tl(EA, EA, ~0xf);                                            \
66     /*                                                                        \
67      * We only need to swap high and low halves. gen_qemu_st64_i64            \
68      * does necessary 64-bit byteswap already.                                \
69      */                                                                       \
70     if (ctx->le_mode) {                                                       \
71         get_avr64(avr, rD(ctx->opcode), false);                               \
72         gen_qemu_st64_i64(ctx, avr, EA);                                      \
73         tcg_gen_addi_tl(EA, EA, 8);                                           \
74         get_avr64(avr, rD(ctx->opcode), true);                                \
75         gen_qemu_st64_i64(ctx, avr, EA);                                      \
76     } else {                                                                  \
77         get_avr64(avr, rD(ctx->opcode), true);                                \
78         gen_qemu_st64_i64(ctx, avr, EA);                                      \
79         tcg_gen_addi_tl(EA, EA, 8);                                           \
80         get_avr64(avr, rD(ctx->opcode), false);                               \
81         gen_qemu_st64_i64(ctx, avr, EA);                                      \
82     }                                                                         \
83     tcg_temp_free(EA);                                                        \
84     tcg_temp_free_i64(avr);                                                   \
87 #define GEN_VR_LVE(name, opc2, opc3, size)                              \
88 static void gen_lve##name(DisasContext *ctx)                            \
89     {                                                                   \
90         TCGv EA;                                                        \
91         TCGv_ptr rs;                                                    \
92         if (unlikely(!ctx->altivec_enabled)) {                          \
93             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
94             return;                                                     \
95         }                                                               \
96         gen_set_access_type(ctx, ACCESS_INT);                           \
97         EA = tcg_temp_new();                                            \
98         gen_addr_reg_index(ctx, EA);                                    \
99         if (size > 1) {                                                 \
100             tcg_gen_andi_tl(EA, EA, ~(size - 1));                       \
101         }                                                               \
102         rs = gen_avr_ptr(rS(ctx->opcode));                              \
103         gen_helper_lve##name(cpu_env, rs, EA);                          \
104         tcg_temp_free(EA);                                              \
105         tcg_temp_free_ptr(rs);                                          \
106     }
108 #define GEN_VR_STVE(name, opc2, opc3, size)                             \
109 static void gen_stve##name(DisasContext *ctx)                           \
110     {                                                                   \
111         TCGv EA;                                                        \
112         TCGv_ptr rs;                                                    \
113         if (unlikely(!ctx->altivec_enabled)) {                          \
114             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
115             return;                                                     \
116         }                                                               \
117         gen_set_access_type(ctx, ACCESS_INT);                           \
118         EA = tcg_temp_new();                                            \
119         gen_addr_reg_index(ctx, EA);                                    \
120         if (size > 1) {                                                 \
121             tcg_gen_andi_tl(EA, EA, ~(size - 1));                       \
122         }                                                               \
123         rs = gen_avr_ptr(rS(ctx->opcode));                              \
124         gen_helper_stve##name(cpu_env, rs, EA);                         \
125         tcg_temp_free(EA);                                              \
126         tcg_temp_free_ptr(rs);                                          \
127     }
129 GEN_VR_LDX(lvx, 0x07, 0x03);
130 /* As we don't emulate the cache, lvxl is stricly equivalent to lvx */
131 GEN_VR_LDX(lvxl, 0x07, 0x0B);
133 GEN_VR_LVE(bx, 0x07, 0x00, 1);
134 GEN_VR_LVE(hx, 0x07, 0x01, 2);
135 GEN_VR_LVE(wx, 0x07, 0x02, 4);
137 GEN_VR_STX(svx, 0x07, 0x07);
138 /* As we don't emulate the cache, stvxl is stricly equivalent to stvx */
139 GEN_VR_STX(svxl, 0x07, 0x0F);
141 GEN_VR_STVE(bx, 0x07, 0x04, 1);
142 GEN_VR_STVE(hx, 0x07, 0x05, 2);
143 GEN_VR_STVE(wx, 0x07, 0x06, 4);
145 static void gen_mfvscr(DisasContext *ctx)
147     TCGv_i32 t;
148     TCGv_i64 avr;
149     if (unlikely(!ctx->altivec_enabled)) {
150         gen_exception(ctx, POWERPC_EXCP_VPU);
151         return;
152     }
153     avr = tcg_temp_new_i64();
154     tcg_gen_movi_i64(avr, 0);
155     set_avr64(rD(ctx->opcode), avr, true);
156     t = tcg_temp_new_i32();
157     gen_helper_mfvscr(t, cpu_env);
158     tcg_gen_extu_i32_i64(avr, t);
159     set_avr64(rD(ctx->opcode), avr, false);
160     tcg_temp_free_i32(t);
161     tcg_temp_free_i64(avr);
164 static void gen_mtvscr(DisasContext *ctx)
166     TCGv_i32 val;
167     int bofs;
169     if (unlikely(!ctx->altivec_enabled)) {
170         gen_exception(ctx, POWERPC_EXCP_VPU);
171         return;
172     }
174     val = tcg_temp_new_i32();
175     bofs = avr_full_offset(rB(ctx->opcode));
176 #if HOST_BIG_ENDIAN
177     bofs += 3 * 4;
178 #endif
180     tcg_gen_ld_i32(val, cpu_env, bofs);
181     gen_helper_mtvscr(cpu_env, val);
182     tcg_temp_free_i32(val);
185 #define GEN_VX_VMUL10(name, add_cin, ret_carry)                         \
186 static void glue(gen_, name)(DisasContext *ctx)                         \
187 {                                                                       \
188     TCGv_i64 t0;                                                        \
189     TCGv_i64 t1;                                                        \
190     TCGv_i64 t2;                                                        \
191     TCGv_i64 avr;                                                       \
192     TCGv_i64 ten, z;                                                    \
193                                                                         \
194     if (unlikely(!ctx->altivec_enabled)) {                              \
195         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
196         return;                                                         \
197     }                                                                   \
198                                                                         \
199     t0 = tcg_temp_new_i64();                                            \
200     t1 = tcg_temp_new_i64();                                            \
201     t2 = tcg_temp_new_i64();                                            \
202     avr = tcg_temp_new_i64();                                           \
203     ten = tcg_const_i64(10);                                            \
204     z = tcg_const_i64(0);                                               \
205                                                                         \
206     if (add_cin) {                                                      \
207         get_avr64(avr, rA(ctx->opcode), false);                         \
208         tcg_gen_mulu2_i64(t0, t1, avr, ten);                            \
209         get_avr64(avr, rB(ctx->opcode), false);                         \
210         tcg_gen_andi_i64(t2, avr, 0xF);                                 \
211         tcg_gen_add2_i64(avr, t2, t0, t1, t2, z);                       \
212         set_avr64(rD(ctx->opcode), avr, false);                         \
213     } else {                                                            \
214         get_avr64(avr, rA(ctx->opcode), false);                         \
215         tcg_gen_mulu2_i64(avr, t2, avr, ten);                           \
216         set_avr64(rD(ctx->opcode), avr, false);                         \
217     }                                                                   \
218                                                                         \
219     if (ret_carry) {                                                    \
220         get_avr64(avr, rA(ctx->opcode), true);                          \
221         tcg_gen_mulu2_i64(t0, t1, avr, ten);                            \
222         tcg_gen_add2_i64(t0, avr, t0, t1, t2, z);                       \
223         set_avr64(rD(ctx->opcode), avr, false);                         \
224         set_avr64(rD(ctx->opcode), z, true);                            \
225     } else {                                                            \
226         get_avr64(avr, rA(ctx->opcode), true);                          \
227         tcg_gen_mul_i64(t0, avr, ten);                                  \
228         tcg_gen_add_i64(avr, t0, t2);                                   \
229         set_avr64(rD(ctx->opcode), avr, true);                          \
230     }                                                                   \
231                                                                         \
232     tcg_temp_free_i64(t0);                                              \
233     tcg_temp_free_i64(t1);                                              \
234     tcg_temp_free_i64(t2);                                              \
235     tcg_temp_free_i64(avr);                                             \
236     tcg_temp_free_i64(ten);                                             \
237     tcg_temp_free_i64(z);                                               \
238 }                                                                       \
240 GEN_VX_VMUL10(vmul10uq, 0, 0);
241 GEN_VX_VMUL10(vmul10euq, 1, 0);
242 GEN_VX_VMUL10(vmul10cuq, 0, 1);
243 GEN_VX_VMUL10(vmul10ecuq, 1, 1);
245 #define GEN_VXFORM_V(name, vece, tcg_op, opc2, opc3)                    \
246 static void glue(gen_, name)(DisasContext *ctx)                         \
247 {                                                                       \
248     if (unlikely(!ctx->altivec_enabled)) {                              \
249         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
250         return;                                                         \
251     }                                                                   \
252                                                                         \
253     tcg_op(vece,                                                        \
254            avr_full_offset(rD(ctx->opcode)),                            \
255            avr_full_offset(rA(ctx->opcode)),                            \
256            avr_full_offset(rB(ctx->opcode)),                            \
257            16, 16);                                                     \
260 /* Logical operations */
261 GEN_VXFORM_V(vand, MO_64, tcg_gen_gvec_and, 2, 16);
262 GEN_VXFORM_V(vandc, MO_64, tcg_gen_gvec_andc, 2, 17);
263 GEN_VXFORM_V(vor, MO_64, tcg_gen_gvec_or, 2, 18);
264 GEN_VXFORM_V(vxor, MO_64, tcg_gen_gvec_xor, 2, 19);
265 GEN_VXFORM_V(vnor, MO_64, tcg_gen_gvec_nor, 2, 20);
266 GEN_VXFORM_V(veqv, MO_64, tcg_gen_gvec_eqv, 2, 26);
267 GEN_VXFORM_V(vnand, MO_64, tcg_gen_gvec_nand, 2, 22);
268 GEN_VXFORM_V(vorc, MO_64, tcg_gen_gvec_orc, 2, 21);
270 #define GEN_VXFORM(name, opc2, opc3)                                    \
271 static void glue(gen_, name)(DisasContext *ctx)                         \
272 {                                                                       \
273     TCGv_ptr ra, rb, rd;                                                \
274     if (unlikely(!ctx->altivec_enabled)) {                              \
275         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
276         return;                                                         \
277     }                                                                   \
278     ra = gen_avr_ptr(rA(ctx->opcode));                                  \
279     rb = gen_avr_ptr(rB(ctx->opcode));                                  \
280     rd = gen_avr_ptr(rD(ctx->opcode));                                  \
281     gen_helper_##name(rd, ra, rb);                                      \
282     tcg_temp_free_ptr(ra);                                              \
283     tcg_temp_free_ptr(rb);                                              \
284     tcg_temp_free_ptr(rd);                                              \
287 #define GEN_VXFORM_TRANS(name, opc2, opc3)                              \
288 static void glue(gen_, name)(DisasContext *ctx)                         \
289 {                                                                       \
290     if (unlikely(!ctx->altivec_enabled)) {                              \
291         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
292         return;                                                         \
293     }                                                                   \
294     trans_##name(ctx);                                                  \
297 #define GEN_VXFORM_ENV(name, opc2, opc3)                                \
298 static void glue(gen_, name)(DisasContext *ctx)                         \
299 {                                                                       \
300     TCGv_ptr ra, rb, rd;                                                \
301     if (unlikely(!ctx->altivec_enabled)) {                              \
302         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
303         return;                                                         \
304     }                                                                   \
305     ra = gen_avr_ptr(rA(ctx->opcode));                                  \
306     rb = gen_avr_ptr(rB(ctx->opcode));                                  \
307     rd = gen_avr_ptr(rD(ctx->opcode));                                  \
308     gen_helper_##name(cpu_env, rd, ra, rb);                             \
309     tcg_temp_free_ptr(ra);                                              \
310     tcg_temp_free_ptr(rb);                                              \
311     tcg_temp_free_ptr(rd);                                              \
314 #define GEN_VXFORM3(name, opc2, opc3)                                   \
315 static void glue(gen_, name)(DisasContext *ctx)                         \
316 {                                                                       \
317     TCGv_ptr ra, rb, rc, rd;                                            \
318     if (unlikely(!ctx->altivec_enabled)) {                              \
319         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
320         return;                                                         \
321     }                                                                   \
322     ra = gen_avr_ptr(rA(ctx->opcode));                                  \
323     rb = gen_avr_ptr(rB(ctx->opcode));                                  \
324     rc = gen_avr_ptr(rC(ctx->opcode));                                  \
325     rd = gen_avr_ptr(rD(ctx->opcode));                                  \
326     gen_helper_##name(rd, ra, rb, rc);                                  \
327     tcg_temp_free_ptr(ra);                                              \
328     tcg_temp_free_ptr(rb);                                              \
329     tcg_temp_free_ptr(rc);                                              \
330     tcg_temp_free_ptr(rd);                                              \
334  * Support for Altivec instruction pairs that use bit 31 (Rc) as
335  * an opcode bit.  In general, these pairs come from different
336  * versions of the ISA, so we must also support a pair of flags for
337  * each instruction.
338  */
339 #define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)          \
340 static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
341 {                                                                      \
342     if ((Rc(ctx->opcode) == 0) &&                                      \
343         ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
344         gen_##name0(ctx);                                              \
345     } else if ((Rc(ctx->opcode) == 1) &&                               \
346         ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
347         gen_##name1(ctx);                                              \
348     } else {                                                           \
349         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
350     }                                                                  \
354  * We use this macro if one instruction is realized with direct
355  * translation, and second one with helper.
356  */
357 #define GEN_VXFORM_TRANS_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)\
358 static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
359 {                                                                      \
360     if ((Rc(ctx->opcode) == 0) &&                                      \
361         ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
362         if (unlikely(!ctx->altivec_enabled)) {                         \
363             gen_exception(ctx, POWERPC_EXCP_VPU);                      \
364             return;                                                    \
365         }                                                              \
366         trans_##name0(ctx);                                            \
367     } else if ((Rc(ctx->opcode) == 1) &&                               \
368         ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
369         gen_##name1(ctx);                                              \
370     } else {                                                           \
371         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
372     }                                                                  \
375 /* Adds support to provide invalid mask */
376 #define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0,                \
377                             name1, flg1, flg2_1, inval1)                \
378 static void glue(gen_, name0##_##name1)(DisasContext *ctx)              \
379 {                                                                       \
380     if ((Rc(ctx->opcode) == 0) &&                                       \
381         ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0)) &&  \
382         !(ctx->opcode & inval0)) {                                      \
383         gen_##name0(ctx);                                               \
384     } else if ((Rc(ctx->opcode) == 1) &&                                \
385                ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1)) && \
386                !(ctx->opcode & inval1)) {                               \
387         gen_##name1(ctx);                                               \
388     } else {                                                            \
389         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);             \
390     }                                                                   \
393 #define GEN_VXFORM_HETRO(name, opc2, opc3)                              \
394 static void glue(gen_, name)(DisasContext *ctx)                         \
395 {                                                                       \
396     TCGv_ptr rb;                                                        \
397     if (unlikely(!ctx->altivec_enabled)) {                              \
398         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
399         return;                                                         \
400     }                                                                   \
401     rb = gen_avr_ptr(rB(ctx->opcode));                                  \
402     gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \
403     tcg_temp_free_ptr(rb);                                              \
406 GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0);
407 GEN_VXFORM_DUAL_EXT(vaddubm, PPC_ALTIVEC, PPC_NONE, 0,       \
408                     vmul10cuq, PPC_NONE, PPC2_ISA300, 0x0000F800)
409 GEN_VXFORM_V(vadduhm, MO_16, tcg_gen_gvec_add, 0, 1);
410 GEN_VXFORM_DUAL(vadduhm, PPC_ALTIVEC, PPC_NONE,  \
411                 vmul10ecuq, PPC_NONE, PPC2_ISA300)
412 GEN_VXFORM_V(vadduwm, MO_32, tcg_gen_gvec_add, 0, 2);
413 GEN_VXFORM_V(vaddudm, MO_64, tcg_gen_gvec_add, 0, 3);
414 GEN_VXFORM_V(vsububm, MO_8, tcg_gen_gvec_sub, 0, 16);
415 GEN_VXFORM_V(vsubuhm, MO_16, tcg_gen_gvec_sub, 0, 17);
416 GEN_VXFORM_V(vsubuwm, MO_32, tcg_gen_gvec_sub, 0, 18);
417 GEN_VXFORM_V(vsubudm, MO_64, tcg_gen_gvec_sub, 0, 19);
418 GEN_VXFORM_V(vmaxub, MO_8, tcg_gen_gvec_umax, 1, 0);
419 GEN_VXFORM_V(vmaxuh, MO_16, tcg_gen_gvec_umax, 1, 1);
420 GEN_VXFORM_V(vmaxuw, MO_32, tcg_gen_gvec_umax, 1, 2);
421 GEN_VXFORM_V(vmaxud, MO_64, tcg_gen_gvec_umax, 1, 3);
422 GEN_VXFORM_V(vmaxsb, MO_8, tcg_gen_gvec_smax, 1, 4);
423 GEN_VXFORM_V(vmaxsh, MO_16, tcg_gen_gvec_smax, 1, 5);
424 GEN_VXFORM_V(vmaxsw, MO_32, tcg_gen_gvec_smax, 1, 6);
425 GEN_VXFORM_V(vmaxsd, MO_64, tcg_gen_gvec_smax, 1, 7);
426 GEN_VXFORM_V(vminub, MO_8, tcg_gen_gvec_umin, 1, 8);
427 GEN_VXFORM_V(vminuh, MO_16, tcg_gen_gvec_umin, 1, 9);
428 GEN_VXFORM_V(vminuw, MO_32, tcg_gen_gvec_umin, 1, 10);
429 GEN_VXFORM_V(vminud, MO_64, tcg_gen_gvec_umin, 1, 11);
430 GEN_VXFORM_V(vminsb, MO_8, tcg_gen_gvec_smin, 1, 12);
431 GEN_VXFORM_V(vminsh, MO_16, tcg_gen_gvec_smin, 1, 13);
432 GEN_VXFORM_V(vminsw, MO_32, tcg_gen_gvec_smin, 1, 14);
433 GEN_VXFORM_V(vminsd, MO_64, tcg_gen_gvec_smin, 1, 15);
434 GEN_VXFORM(vavgub, 1, 16);
435 GEN_VXFORM(vabsdub, 1, 16);
436 GEN_VXFORM_DUAL(vavgub, PPC_ALTIVEC, PPC_NONE, \
437                 vabsdub, PPC_NONE, PPC2_ISA300)
438 GEN_VXFORM(vavguh, 1, 17);
439 GEN_VXFORM(vabsduh, 1, 17);
440 GEN_VXFORM_DUAL(vavguh, PPC_ALTIVEC, PPC_NONE, \
441                 vabsduh, PPC_NONE, PPC2_ISA300)
442 GEN_VXFORM(vavguw, 1, 18);
443 GEN_VXFORM(vabsduw, 1, 18);
444 GEN_VXFORM_DUAL(vavguw, PPC_ALTIVEC, PPC_NONE, \
445                 vabsduw, PPC_NONE, PPC2_ISA300)
446 GEN_VXFORM(vavgsb, 1, 20);
447 GEN_VXFORM(vavgsh, 1, 21);
448 GEN_VXFORM(vavgsw, 1, 22);
449 GEN_VXFORM(vmrghb, 6, 0);
450 GEN_VXFORM(vmrghh, 6, 1);
451 GEN_VXFORM(vmrghw, 6, 2);
452 GEN_VXFORM(vmrglb, 6, 4);
453 GEN_VXFORM(vmrglh, 6, 5);
454 GEN_VXFORM(vmrglw, 6, 6);
456 static void trans_vmrgew(DisasContext *ctx)
458     int VT = rD(ctx->opcode);
459     int VA = rA(ctx->opcode);
460     int VB = rB(ctx->opcode);
461     TCGv_i64 tmp = tcg_temp_new_i64();
462     TCGv_i64 avr = tcg_temp_new_i64();
464     get_avr64(avr, VB, true);
465     tcg_gen_shri_i64(tmp, avr, 32);
466     get_avr64(avr, VA, true);
467     tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
468     set_avr64(VT, avr, true);
470     get_avr64(avr, VB, false);
471     tcg_gen_shri_i64(tmp, avr, 32);
472     get_avr64(avr, VA, false);
473     tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
474     set_avr64(VT, avr, false);
476     tcg_temp_free_i64(tmp);
477     tcg_temp_free_i64(avr);
480 static void trans_vmrgow(DisasContext *ctx)
482     int VT = rD(ctx->opcode);
483     int VA = rA(ctx->opcode);
484     int VB = rB(ctx->opcode);
485     TCGv_i64 t0 = tcg_temp_new_i64();
486     TCGv_i64 t1 = tcg_temp_new_i64();
487     TCGv_i64 avr = tcg_temp_new_i64();
489     get_avr64(t0, VB, true);
490     get_avr64(t1, VA, true);
491     tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
492     set_avr64(VT, avr, true);
494     get_avr64(t0, VB, false);
495     get_avr64(t1, VA, false);
496     tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
497     set_avr64(VT, avr, false);
499     tcg_temp_free_i64(t0);
500     tcg_temp_free_i64(t1);
501     tcg_temp_free_i64(avr);
505  * lvsl VRT,RA,RB - Load Vector for Shift Left
507  * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
508  * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
509  * Bytes sh:sh+15 of X are placed into vD.
510  */
511 static void trans_lvsl(DisasContext *ctx)
513     int VT = rD(ctx->opcode);
514     TCGv_i64 result = tcg_temp_new_i64();
515     TCGv_i64 sh = tcg_temp_new_i64();
516     TCGv EA = tcg_temp_new();
518     /* Get sh(from description) by anding EA with 0xf. */
519     gen_addr_reg_index(ctx, EA);
520     tcg_gen_extu_tl_i64(sh, EA);
521     tcg_gen_andi_i64(sh, sh, 0xfULL);
523     /*
524      * Create bytes sh:sh+7 of X(from description) and place them in
525      * higher doubleword of vD.
526      */
527     tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
528     tcg_gen_addi_i64(result, sh, 0x0001020304050607ull);
529     set_avr64(VT, result, true);
530     /*
531      * Create bytes sh+8:sh+15 of X(from description) and place them in
532      * lower doubleword of vD.
533      */
534     tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL);
535     set_avr64(VT, result, false);
537     tcg_temp_free_i64(result);
538     tcg_temp_free_i64(sh);
539     tcg_temp_free(EA);
543  * lvsr VRT,RA,RB - Load Vector for Shift Right
545  * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
546  * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
547  * Bytes (16-sh):(31-sh) of X are placed into vD.
548  */
549 static void trans_lvsr(DisasContext *ctx)
551     int VT = rD(ctx->opcode);
552     TCGv_i64 result = tcg_temp_new_i64();
553     TCGv_i64 sh = tcg_temp_new_i64();
554     TCGv EA = tcg_temp_new();
557     /* Get sh(from description) by anding EA with 0xf. */
558     gen_addr_reg_index(ctx, EA);
559     tcg_gen_extu_tl_i64(sh, EA);
560     tcg_gen_andi_i64(sh, sh, 0xfULL);
562     /*
563      * Create bytes (16-sh):(23-sh) of X(from description) and place them in
564      * higher doubleword of vD.
565      */
566     tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
567     tcg_gen_subfi_i64(result, 0x1011121314151617ULL, sh);
568     set_avr64(VT, result, true);
569     /*
570      * Create bytes (24-sh):(32-sh) of X(from description) and place them in
571      * lower doubleword of vD.
572      */
573     tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh);
574     set_avr64(VT, result, false);
576     tcg_temp_free_i64(result);
577     tcg_temp_free_i64(sh);
578     tcg_temp_free(EA);
582  * vsl VRT,VRA,VRB - Vector Shift Left
584  * Shifting left 128 bit value of vA by value specified in bits 125-127 of vB.
585  * Lowest 3 bits in each byte element of register vB must be identical or
586  * result is undefined.
587  */
588 static void trans_vsl(DisasContext *ctx)
590     int VT = rD(ctx->opcode);
591     int VA = rA(ctx->opcode);
592     int VB = rB(ctx->opcode);
593     TCGv_i64 avr = tcg_temp_new_i64();
594     TCGv_i64 sh = tcg_temp_new_i64();
595     TCGv_i64 carry = tcg_temp_new_i64();
596     TCGv_i64 tmp = tcg_temp_new_i64();
598     /* Place bits 125-127 of vB in 'sh'. */
599     get_avr64(avr, VB, false);
600     tcg_gen_andi_i64(sh, avr, 0x07ULL);
602     /*
603      * Save highest 'sh' bits of lower doubleword element of vA in variable
604      * 'carry' and perform shift on lower doubleword.
605      */
606     get_avr64(avr, VA, false);
607     tcg_gen_subfi_i64(tmp, 32, sh);
608     tcg_gen_shri_i64(carry, avr, 32);
609     tcg_gen_shr_i64(carry, carry, tmp);
610     tcg_gen_shl_i64(avr, avr, sh);
611     set_avr64(VT, avr, false);
613     /*
614      * Perform shift on higher doubleword element of vA and replace lowest
615      * 'sh' bits with 'carry'.
616      */
617     get_avr64(avr, VA, true);
618     tcg_gen_shl_i64(avr, avr, sh);
619     tcg_gen_or_i64(avr, avr, carry);
620     set_avr64(VT, avr, true);
622     tcg_temp_free_i64(avr);
623     tcg_temp_free_i64(sh);
624     tcg_temp_free_i64(carry);
625     tcg_temp_free_i64(tmp);
629  * vsr VRT,VRA,VRB - Vector Shift Right
631  * Shifting right 128 bit value of vA by value specified in bits 125-127 of vB.
632  * Lowest 3 bits in each byte element of register vB must be identical or
633  * result is undefined.
634  */
635 static void trans_vsr(DisasContext *ctx)
637     int VT = rD(ctx->opcode);
638     int VA = rA(ctx->opcode);
639     int VB = rB(ctx->opcode);
640     TCGv_i64 avr = tcg_temp_new_i64();
641     TCGv_i64 sh = tcg_temp_new_i64();
642     TCGv_i64 carry = tcg_temp_new_i64();
643     TCGv_i64 tmp = tcg_temp_new_i64();
645     /* Place bits 125-127 of vB in 'sh'. */
646     get_avr64(avr, VB, false);
647     tcg_gen_andi_i64(sh, avr, 0x07ULL);
649     /*
650      * Save lowest 'sh' bits of higher doubleword element of vA in variable
651      * 'carry' and perform shift on higher doubleword.
652      */
653     get_avr64(avr, VA, true);
654     tcg_gen_subfi_i64(tmp, 32, sh);
655     tcg_gen_shli_i64(carry, avr, 32);
656     tcg_gen_shl_i64(carry, carry, tmp);
657     tcg_gen_shr_i64(avr, avr, sh);
658     set_avr64(VT, avr, true);
659     /*
660      * Perform shift on lower doubleword element of vA and replace highest
661      * 'sh' bits with 'carry'.
662      */
663     get_avr64(avr, VA, false);
664     tcg_gen_shr_i64(avr, avr, sh);
665     tcg_gen_or_i64(avr, avr, carry);
666     set_avr64(VT, avr, false);
668     tcg_temp_free_i64(avr);
669     tcg_temp_free_i64(sh);
670     tcg_temp_free_i64(carry);
671     tcg_temp_free_i64(tmp);
675  * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword
677  * All ith bits (i in range 1 to 8) of each byte of doubleword element in source
678  * register are concatenated and placed into ith byte of appropriate doubleword
679  * element in destination register.
681  * Following solution is done for both doubleword elements of source register
682  * in parallel, in order to reduce the number of instructions needed(that's why
683  * arrays are used):
684  * First, both doubleword elements of source register vB are placed in
685  * appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for
686  * loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of
687  * byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with
688  * tcg_mask. For every following iteration, both avr[i] and tcg_mask variables
689  * have to be shifted right for 7 and 8 places, respectively, in order to get
690  * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so
691  * shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask...
692  * After first 8 iteration(first loop), all the first bits are in their final
693  * places, all second bits but second bit from eight byte are in their places...
694  * only 1 eight bit from eight byte is in it's place). In second loop we do all
695  * operations symmetrically, in order to get other half of bits in their final
696  * spots. Results for first and second doubleword elements are saved in
697  * result[0] and result[1] respectively. In the end those results are saved in
698  * appropriate doubleword element of destination register vD.
699  */
700 static void trans_vgbbd(DisasContext *ctx)
702     int VT = rD(ctx->opcode);
703     int VB = rB(ctx->opcode);
704     TCGv_i64 tmp = tcg_temp_new_i64();
705     uint64_t mask = 0x8040201008040201ULL;
706     int i, j;
708     TCGv_i64 result[2];
709     result[0] = tcg_temp_new_i64();
710     result[1] = tcg_temp_new_i64();
711     TCGv_i64 avr[2];
712     avr[0] = tcg_temp_new_i64();
713     avr[1] = tcg_temp_new_i64();
714     TCGv_i64 tcg_mask = tcg_temp_new_i64();
716     tcg_gen_movi_i64(tcg_mask, mask);
717     for (j = 0; j < 2; j++) {
718         get_avr64(avr[j], VB, j);
719         tcg_gen_and_i64(result[j], avr[j], tcg_mask);
720     }
721     for (i = 1; i < 8; i++) {
722         tcg_gen_movi_i64(tcg_mask, mask >> (i * 8));
723         for (j = 0; j < 2; j++) {
724             tcg_gen_shri_i64(tmp, avr[j], i * 7);
725             tcg_gen_and_i64(tmp, tmp, tcg_mask);
726             tcg_gen_or_i64(result[j], result[j], tmp);
727         }
728     }
729     for (i = 1; i < 8; i++) {
730         tcg_gen_movi_i64(tcg_mask, mask << (i * 8));
731         for (j = 0; j < 2; j++) {
732             tcg_gen_shli_i64(tmp, avr[j], i * 7);
733             tcg_gen_and_i64(tmp, tmp, tcg_mask);
734             tcg_gen_or_i64(result[j], result[j], tmp);
735         }
736     }
737     for (j = 0; j < 2; j++) {
738         set_avr64(VT, result[j], j);
739     }
741     tcg_temp_free_i64(tmp);
742     tcg_temp_free_i64(tcg_mask);
743     tcg_temp_free_i64(result[0]);
744     tcg_temp_free_i64(result[1]);
745     tcg_temp_free_i64(avr[0]);
746     tcg_temp_free_i64(avr[1]);
750  * vclzw VRT,VRB - Vector Count Leading Zeros Word
752  * Counting the number of leading zero bits of each word element in source
753  * register and placing result in appropriate word element of destination
754  * register.
755  */
756 static void trans_vclzw(DisasContext *ctx)
758     int VT = rD(ctx->opcode);
759     int VB = rB(ctx->opcode);
760     TCGv_i32 tmp = tcg_temp_new_i32();
761     int i;
763     /* Perform count for every word element using tcg_gen_clzi_i32. */
764     for (i = 0; i < 4; i++) {
765         tcg_gen_ld_i32(tmp, cpu_env,
766             offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4);
767         tcg_gen_clzi_i32(tmp, tmp, 32);
768         tcg_gen_st_i32(tmp, cpu_env,
769             offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4);
770     }
772     tcg_temp_free_i32(tmp);
776  * vclzd VRT,VRB - Vector Count Leading Zeros Doubleword
778  * Counting the number of leading zero bits of each doubleword element in source
779  * register and placing result in appropriate doubleword element of destination
780  * register.
781  */
782 static void trans_vclzd(DisasContext *ctx)
784     int VT = rD(ctx->opcode);
785     int VB = rB(ctx->opcode);
786     TCGv_i64 avr = tcg_temp_new_i64();
788     /* high doubleword */
789     get_avr64(avr, VB, true);
790     tcg_gen_clzi_i64(avr, avr, 64);
791     set_avr64(VT, avr, true);
793     /* low doubleword */
794     get_avr64(avr, VB, false);
795     tcg_gen_clzi_i64(avr, avr, 64);
796     set_avr64(VT, avr, false);
798     tcg_temp_free_i64(avr);
801 GEN_VXFORM_V(vmuluwm, MO_32, tcg_gen_gvec_mul, 4, 2);
802 GEN_VXFORM(vsrv, 2, 28);
803 GEN_VXFORM(vslv, 2, 29);
804 GEN_VXFORM(vslo, 6, 16);
805 GEN_VXFORM(vsro, 6, 17);
807 static bool do_vector_gvec3_VX(DisasContext *ctx, arg_VX *a, int vece,
808                                void (*gen_gvec)(unsigned, uint32_t, uint32_t,
809                                                 uint32_t, uint32_t, uint32_t))
811     REQUIRE_VECTOR(ctx);
813     gen_gvec(vece, avr_full_offset(a->vrt), avr_full_offset(a->vra),
814              avr_full_offset(a->vrb), 16, 16);
816     return true;
819 TRANS_FLAGS(ALTIVEC, VSLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shlv);
820 TRANS_FLAGS(ALTIVEC, VSLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shlv);
821 TRANS_FLAGS(ALTIVEC, VSLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shlv);
822 TRANS_FLAGS2(ALTIVEC_207, VSLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shlv);
824 TRANS_FLAGS(ALTIVEC, VSRB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shrv);
825 TRANS_FLAGS(ALTIVEC, VSRH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shrv);
826 TRANS_FLAGS(ALTIVEC, VSRW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shrv);
827 TRANS_FLAGS2(ALTIVEC_207, VSRD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shrv);
829 TRANS_FLAGS(ALTIVEC, VSRAB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_sarv);
830 TRANS_FLAGS(ALTIVEC, VSRAH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_sarv);
831 TRANS_FLAGS(ALTIVEC, VSRAW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_sarv);
832 TRANS_FLAGS2(ALTIVEC_207, VSRAD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_sarv);
834 TRANS_FLAGS(ALTIVEC, VRLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_rotlv)
835 TRANS_FLAGS(ALTIVEC, VRLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_rotlv)
836 TRANS_FLAGS(ALTIVEC, VRLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_rotlv)
837 TRANS_FLAGS2(ALTIVEC_207, VRLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_rotlv)
839 static TCGv_vec do_vrl_mask_vec(unsigned vece, TCGv_vec vrb)
841     TCGv_vec t0 = tcg_temp_new_vec_matching(vrb),
842              t1 = tcg_temp_new_vec_matching(vrb),
843              t2 = tcg_temp_new_vec_matching(vrb),
844              ones = tcg_constant_vec_matching(vrb, vece, -1);
846     /* Extract b and e */
847     tcg_gen_dupi_vec(vece, t2, (8 << vece) - 1);
849     tcg_gen_shri_vec(vece, t0, vrb, 16);
850     tcg_gen_and_vec(vece, t0, t0, t2);
852     tcg_gen_shri_vec(vece, t1, vrb, 8);
853     tcg_gen_and_vec(vece, t1, t1, t2);
855     /* Compare b and e to negate the mask where begin > end */
856     tcg_gen_cmp_vec(TCG_COND_GT, vece, t2, t0, t1);
858     /* Create the mask with (~0 >> b) ^ ((~0 >> e) >> 1) */
859     tcg_gen_shrv_vec(vece, t0, ones, t0);
860     tcg_gen_shrv_vec(vece, t1, ones, t1);
861     tcg_gen_shri_vec(vece, t1, t1, 1);
862     tcg_gen_xor_vec(vece, t0, t0, t1);
864     /* negate the mask */
865     tcg_gen_xor_vec(vece, t0, t0, t2);
867     tcg_temp_free_vec(t1);
868     tcg_temp_free_vec(t2);
870     return t0;
873 static void gen_vrlnm_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
874                           TCGv_vec vrb)
876     TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt);
878     /* Create the mask */
879     mask = do_vrl_mask_vec(vece, vrb);
881     /* Extract n */
882     tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
883     tcg_gen_and_vec(vece, n, vrb, n);
885     /* Rotate and mask */
886     tcg_gen_rotlv_vec(vece, vrt, vra, n);
887     tcg_gen_and_vec(vece, vrt, vrt, mask);
889     tcg_temp_free_vec(n);
890     tcg_temp_free_vec(mask);
893 static bool do_vrlnm(DisasContext *ctx, arg_VX *a, int vece)
895     static const TCGOpcode vecop_list[] = {
896         INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
897         INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
898     };
899     static const GVecGen3 ops[2] = {
900         {
901             .fniv = gen_vrlnm_vec,
902             .fno = gen_helper_VRLWNM,
903             .opt_opc = vecop_list,
904             .load_dest = true,
905             .vece = MO_32
906         },
907         {
908             .fniv = gen_vrlnm_vec,
909             .fno = gen_helper_VRLDNM,
910             .opt_opc = vecop_list,
911             .load_dest = true,
912             .vece = MO_64
913         }
914     };
916     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
917     REQUIRE_VSX(ctx);
919     tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
920                    avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
922     return true;
925 TRANS(VRLWNM, do_vrlnm, MO_32)
926 TRANS(VRLDNM, do_vrlnm, MO_64)
928 static void gen_vrlmi_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
929                           TCGv_vec vrb)
931     TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt),
932              tmp = tcg_temp_new_vec_matching(vrt);
934     /* Create the mask */
935     mask = do_vrl_mask_vec(vece, vrb);
937     /* Extract n */
938     tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
939     tcg_gen_and_vec(vece, n, vrb, n);
941     /* Rotate and insert */
942     tcg_gen_rotlv_vec(vece, tmp, vra, n);
943     tcg_gen_bitsel_vec(vece, vrt, mask, tmp, vrt);
945     tcg_temp_free_vec(n);
946     tcg_temp_free_vec(tmp);
947     tcg_temp_free_vec(mask);
950 static bool do_vrlmi(DisasContext *ctx, arg_VX *a, int vece)
952     static const TCGOpcode vecop_list[] = {
953         INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
954         INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
955     };
956     static const GVecGen3 ops[2] = {
957         {
958             .fniv = gen_vrlmi_vec,
959             .fno = gen_helper_VRLWMI,
960             .opt_opc = vecop_list,
961             .load_dest = true,
962             .vece = MO_32
963         },
964         {
965             .fniv = gen_vrlnm_vec,
966             .fno = gen_helper_VRLDMI,
967             .opt_opc = vecop_list,
968             .load_dest = true,
969             .vece = MO_64
970         }
971     };
973     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
974     REQUIRE_VSX(ctx);
976     tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
977                    avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
979     return true;
982 TRANS(VRLWMI, do_vrlmi, MO_32)
983 TRANS(VRLDMI, do_vrlmi, MO_64)
985 static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right,
986                                  bool alg)
988     TCGv_i64 hi, lo, t0, t1, n, zero = tcg_constant_i64(0);
990     REQUIRE_VECTOR(ctx);
992     n = tcg_temp_new_i64();
993     hi = tcg_temp_new_i64();
994     lo = tcg_temp_new_i64();
995     t0 = tcg_temp_new_i64();
996     t1 = tcg_const_i64(0);
998     get_avr64(lo, a->vra, false);
999     get_avr64(hi, a->vra, true);
1001     get_avr64(n, a->vrb, true);
1003     tcg_gen_andi_i64(t0, n, 64);
1004     if (right) {
1005         tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, hi, lo);
1006         if (alg) {
1007             tcg_gen_sari_i64(t1, lo, 63);
1008         }
1009         tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, t1, hi);
1010     } else {
1011         tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, lo, hi);
1012         tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, zero, lo);
1013     }
1014     tcg_gen_andi_i64(n, n, 0x3F);
1016     if (right) {
1017         if (alg) {
1018             tcg_gen_sar_i64(t0, hi, n);
1019         } else {
1020             tcg_gen_shr_i64(t0, hi, n);
1021         }
1022     } else {
1023         tcg_gen_shl_i64(t0, lo, n);
1024     }
1025     set_avr64(a->vrt, t0, right);
1027     if (right) {
1028         tcg_gen_shr_i64(lo, lo, n);
1029     } else {
1030         tcg_gen_shl_i64(hi, hi, n);
1031     }
1032     tcg_gen_xori_i64(n, n, 63);
1033     if (right) {
1034         tcg_gen_shl_i64(hi, hi, n);
1035         tcg_gen_shli_i64(hi, hi, 1);
1036     } else {
1037         tcg_gen_shr_i64(lo, lo, n);
1038         tcg_gen_shri_i64(lo, lo, 1);
1039     }
1040     tcg_gen_or_i64(hi, hi, lo);
1041     set_avr64(a->vrt, hi, !right);
1043     tcg_temp_free_i64(hi);
1044     tcg_temp_free_i64(lo);
1045     tcg_temp_free_i64(t0);
1046     tcg_temp_free_i64(t1);
1047     tcg_temp_free_i64(n);
1049     return true;
1052 TRANS_FLAGS2(ISA310, VSLQ, do_vector_shift_quad, false, false);
1053 TRANS_FLAGS2(ISA310, VSRQ, do_vector_shift_quad, true, false);
1054 TRANS_FLAGS2(ISA310, VSRAQ, do_vector_shift_quad, true, true);
1056 static void do_vrlq_mask(TCGv_i64 mh, TCGv_i64 ml, TCGv_i64 b, TCGv_i64 e)
1058     TCGv_i64 th, tl, t0, t1, zero = tcg_constant_i64(0),
1059              ones = tcg_constant_i64(-1);
1061     th = tcg_temp_new_i64();
1062     tl = tcg_temp_new_i64();
1063     t0 = tcg_temp_new_i64();
1064     t1 = tcg_temp_new_i64();
1066     /* m = ~0 >> b */
1067     tcg_gen_andi_i64(t0, b, 64);
1068     tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
1069     tcg_gen_andi_i64(t0, b, 0x3F);
1070     tcg_gen_shr_i64(mh, t1, t0);
1071     tcg_gen_shr_i64(ml, ones, t0);
1072     tcg_gen_xori_i64(t0, t0, 63);
1073     tcg_gen_shl_i64(t1, t1, t0);
1074     tcg_gen_shli_i64(t1, t1, 1);
1075     tcg_gen_or_i64(ml, t1, ml);
1077     /* t = ~0 >> e */
1078     tcg_gen_andi_i64(t0, e, 64);
1079     tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
1080     tcg_gen_andi_i64(t0, e, 0x3F);
1081     tcg_gen_shr_i64(th, t1, t0);
1082     tcg_gen_shr_i64(tl, ones, t0);
1083     tcg_gen_xori_i64(t0, t0, 63);
1084     tcg_gen_shl_i64(t1, t1, t0);
1085     tcg_gen_shli_i64(t1, t1, 1);
1086     tcg_gen_or_i64(tl, t1, tl);
1088     /* t = t >> 1 */
1089     tcg_gen_extract2_i64(tl, tl, th, 1);
1090     tcg_gen_shri_i64(th, th, 1);
1092     /* m = m ^ t */
1093     tcg_gen_xor_i64(mh, mh, th);
1094     tcg_gen_xor_i64(ml, ml, tl);
1096     /* Negate the mask if begin > end */
1097     tcg_gen_movcond_i64(TCG_COND_GT, t0, b, e, ones, zero);
1099     tcg_gen_xor_i64(mh, mh, t0);
1100     tcg_gen_xor_i64(ml, ml, t0);
1102     tcg_temp_free_i64(th);
1103     tcg_temp_free_i64(tl);
1104     tcg_temp_free_i64(t0);
1105     tcg_temp_free_i64(t1);
1108 static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask,
1109                                 bool insert)
1111     TCGv_i64 ah, al, vrb, n, t0, t1, zero = tcg_constant_i64(0);
1113     REQUIRE_VECTOR(ctx);
1114     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1116     ah = tcg_temp_new_i64();
1117     al = tcg_temp_new_i64();
1118     vrb = tcg_temp_new_i64();
1119     n = tcg_temp_new_i64();
1120     t0 = tcg_temp_new_i64();
1121     t1 = tcg_temp_new_i64();
1123     get_avr64(ah, a->vra, true);
1124     get_avr64(al, a->vra, false);
1125     get_avr64(vrb, a->vrb, true);
1127     tcg_gen_mov_i64(t0, ah);
1128     tcg_gen_andi_i64(t1, vrb, 64);
1129     tcg_gen_movcond_i64(TCG_COND_NE, ah, t1, zero, al, ah);
1130     tcg_gen_movcond_i64(TCG_COND_NE, al, t1, zero, t0, al);
1131     tcg_gen_andi_i64(n, vrb, 0x3F);
1133     tcg_gen_shl_i64(t0, ah, n);
1134     tcg_gen_shl_i64(t1, al, n);
1136     tcg_gen_xori_i64(n, n, 63);
1138     tcg_gen_shr_i64(al, al, n);
1139     tcg_gen_shri_i64(al, al, 1);
1140     tcg_gen_or_i64(t0, al, t0);
1142     tcg_gen_shr_i64(ah, ah, n);
1143     tcg_gen_shri_i64(ah, ah, 1);
1144     tcg_gen_or_i64(t1, ah, t1);
1146     if (mask || insert) {
1147         tcg_gen_extract_i64(n, vrb, 8, 7);
1148         tcg_gen_extract_i64(vrb, vrb, 16, 7);
1150         do_vrlq_mask(ah, al, vrb, n);
1152         tcg_gen_and_i64(t0, t0, ah);
1153         tcg_gen_and_i64(t1, t1, al);
1155         if (insert) {
1156             get_avr64(n, a->vrt, true);
1157             get_avr64(vrb, a->vrt, false);
1158             tcg_gen_andc_i64(n, n, ah);
1159             tcg_gen_andc_i64(vrb, vrb, al);
1160             tcg_gen_or_i64(t0, t0, n);
1161             tcg_gen_or_i64(t1, t1, vrb);
1162         }
1163     }
1165     set_avr64(a->vrt, t0, true);
1166     set_avr64(a->vrt, t1, false);
1168     tcg_temp_free_i64(ah);
1169     tcg_temp_free_i64(al);
1170     tcg_temp_free_i64(vrb);
1171     tcg_temp_free_i64(n);
1172     tcg_temp_free_i64(t0);
1173     tcg_temp_free_i64(t1);
1175     return true;
1178 TRANS(VRLQ, do_vector_rotl_quad, false, false)
1179 TRANS(VRLQNM, do_vector_rotl_quad, true, false)
1180 TRANS(VRLQMI, do_vector_rotl_quad, false, true)
1182 #define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3)               \
1183 static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t,     \
1184                                          TCGv_vec sat, TCGv_vec a,      \
1185                                          TCGv_vec b)                    \
1186 {                                                                       \
1187     TCGv_vec x = tcg_temp_new_vec_matching(t);                          \
1188     glue(glue(tcg_gen_, NORM), _vec)(VECE, x, a, b);                    \
1189     glue(glue(tcg_gen_, SAT), _vec)(VECE, t, a, b);                     \
1190     tcg_gen_cmp_vec(TCG_COND_NE, VECE, x, x, t);                        \
1191     tcg_gen_or_vec(VECE, sat, sat, x);                                  \
1192     tcg_temp_free_vec(x);                                               \
1193 }                                                                       \
1194 static void glue(gen_, NAME)(DisasContext *ctx)                         \
1195 {                                                                       \
1196     static const TCGOpcode vecop_list[] = {                             \
1197         glue(glue(INDEX_op_, NORM), _vec),                              \
1198         glue(glue(INDEX_op_, SAT), _vec),                               \
1199         INDEX_op_cmp_vec, 0                                             \
1200     };                                                                  \
1201     static const GVecGen4 g = {                                         \
1202         .fniv = glue(glue(gen_, NAME), _vec),                           \
1203         .fno = glue(gen_helper_, NAME),                                 \
1204         .opt_opc = vecop_list,                                          \
1205         .write_aofs = true,                                             \
1206         .vece = VECE,                                                   \
1207     };                                                                  \
1208     if (unlikely(!ctx->altivec_enabled)) {                              \
1209         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
1210         return;                                                         \
1211     }                                                                   \
1212     tcg_gen_gvec_4(avr_full_offset(rD(ctx->opcode)),                    \
1213                    offsetof(CPUPPCState, vscr_sat),                     \
1214                    avr_full_offset(rA(ctx->opcode)),                    \
1215                    avr_full_offset(rB(ctx->opcode)),                    \
1216                    16, 16, &g);                                         \
1219 GEN_VXFORM_SAT(vaddubs, MO_8, add, usadd, 0, 8);
1220 GEN_VXFORM_DUAL_EXT(vaddubs, PPC_ALTIVEC, PPC_NONE, 0,       \
1221                     vmul10uq, PPC_NONE, PPC2_ISA300, 0x0000F800)
1222 GEN_VXFORM_SAT(vadduhs, MO_16, add, usadd, 0, 9);
1223 GEN_VXFORM_DUAL(vadduhs, PPC_ALTIVEC, PPC_NONE, \
1224                 vmul10euq, PPC_NONE, PPC2_ISA300)
1225 GEN_VXFORM_SAT(vadduws, MO_32, add, usadd, 0, 10);
1226 GEN_VXFORM_SAT(vaddsbs, MO_8, add, ssadd, 0, 12);
1227 GEN_VXFORM_SAT(vaddshs, MO_16, add, ssadd, 0, 13);
1228 GEN_VXFORM_SAT(vaddsws, MO_32, add, ssadd, 0, 14);
1229 GEN_VXFORM_SAT(vsububs, MO_8, sub, ussub, 0, 24);
1230 GEN_VXFORM_SAT(vsubuhs, MO_16, sub, ussub, 0, 25);
1231 GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26);
1232 GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28);
1233 GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29);
1234 GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30);
1235 GEN_VXFORM_TRANS(vsl, 2, 7);
1236 GEN_VXFORM_TRANS(vsr, 2, 11);
1237 GEN_VXFORM_ENV(vpkuhum, 7, 0);
1238 GEN_VXFORM_ENV(vpkuwum, 7, 1);
1239 GEN_VXFORM_ENV(vpkudum, 7, 17);
1240 GEN_VXFORM_ENV(vpkuhus, 7, 2);
1241 GEN_VXFORM_ENV(vpkuwus, 7, 3);
1242 GEN_VXFORM_ENV(vpkudus, 7, 19);
1243 GEN_VXFORM_ENV(vpkshus, 7, 4);
1244 GEN_VXFORM_ENV(vpkswus, 7, 5);
1245 GEN_VXFORM_ENV(vpksdus, 7, 21);
1246 GEN_VXFORM_ENV(vpkshss, 7, 6);
1247 GEN_VXFORM_ENV(vpkswss, 7, 7);
1248 GEN_VXFORM_ENV(vpksdss, 7, 23);
1249 GEN_VXFORM(vpkpx, 7, 12);
1250 GEN_VXFORM_ENV(vsum4ubs, 4, 24);
1251 GEN_VXFORM_ENV(vsum4sbs, 4, 28);
1252 GEN_VXFORM_ENV(vsum4shs, 4, 25);
1253 GEN_VXFORM_ENV(vsum2sws, 4, 26);
1254 GEN_VXFORM_ENV(vsumsws, 4, 30);
1255 GEN_VXFORM_ENV(vaddfp, 5, 0);
1256 GEN_VXFORM_ENV(vsubfp, 5, 1);
1257 GEN_VXFORM_ENV(vmaxfp, 5, 16);
1258 GEN_VXFORM_ENV(vminfp, 5, 17);
1259 GEN_VXFORM_HETRO(vextublx, 6, 24)
1260 GEN_VXFORM_HETRO(vextuhlx, 6, 25)
1261 GEN_VXFORM_HETRO(vextuwlx, 6, 26)
1262 GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207,
1263                 vextuwlx, PPC_NONE, PPC2_ISA300)
1264 GEN_VXFORM_HETRO(vextubrx, 6, 28)
1265 GEN_VXFORM_HETRO(vextuhrx, 6, 29)
1266 GEN_VXFORM_HETRO(vextuwrx, 6, 30)
1267 GEN_VXFORM_TRANS(lvsl, 6, 31)
1268 GEN_VXFORM_TRANS(lvsr, 6, 32)
1269 GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207,
1270                 vextuwrx, PPC_NONE, PPC2_ISA300)
1272 #define GEN_VXRFORM1(opname, name, str, opc2, opc3)                     \
1273 static void glue(gen_, name)(DisasContext *ctx)                         \
1274     {                                                                   \
1275         TCGv_ptr ra, rb, rd;                                            \
1276         if (unlikely(!ctx->altivec_enabled)) {                          \
1277             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1278             return;                                                     \
1279         }                                                               \
1280         ra = gen_avr_ptr(rA(ctx->opcode));                              \
1281         rb = gen_avr_ptr(rB(ctx->opcode));                              \
1282         rd = gen_avr_ptr(rD(ctx->opcode));                              \
1283         gen_helper_##opname(cpu_env, rd, ra, rb);                       \
1284         tcg_temp_free_ptr(ra);                                          \
1285         tcg_temp_free_ptr(rb);                                          \
1286         tcg_temp_free_ptr(rd);                                          \
1287     }
1289 #define GEN_VXRFORM(name, opc2, opc3)                                \
1290     GEN_VXRFORM1(name, name, #name, opc2, opc3)                      \
1291     GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
1294  * Support for Altivec instructions that use bit 31 (Rc) as an opcode
1295  * bit but also use bit 21 as an actual Rc bit.  In general, thse pairs
1296  * come from different versions of the ISA, so we must also support a
1297  * pair of flags for each instruction.
1298  */
1299 #define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)     \
1300 static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
1301 {                                                                      \
1302     if ((Rc(ctx->opcode) == 0) &&                                      \
1303         ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
1304         if (Rc21(ctx->opcode) == 0) {                                  \
1305             gen_##name0(ctx);                                          \
1306         } else {                                                       \
1307             gen_##name0##_(ctx);                                       \
1308         }                                                              \
1309     } else if ((Rc(ctx->opcode) == 1) &&                               \
1310         ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
1311         if (Rc21(ctx->opcode) == 0) {                                  \
1312             gen_##name1(ctx);                                          \
1313         } else {                                                       \
1314             gen_##name1##_(ctx);                                       \
1315         }                                                              \
1316     } else {                                                           \
1317         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
1318     }                                                                  \
1321 static void do_vcmp_rc(int vrt)
1323     TCGv_i64 tmp, set, clr;
1325     tmp = tcg_temp_new_i64();
1326     set = tcg_temp_new_i64();
1327     clr = tcg_temp_new_i64();
1329     get_avr64(tmp, vrt, true);
1330     tcg_gen_mov_i64(set, tmp);
1331     get_avr64(tmp, vrt, false);
1332     tcg_gen_or_i64(clr, set, tmp);
1333     tcg_gen_and_i64(set, set, tmp);
1335     tcg_gen_setcondi_i64(TCG_COND_EQ, clr, clr, 0);
1336     tcg_gen_shli_i64(clr, clr, 1);
1338     tcg_gen_setcondi_i64(TCG_COND_EQ, set, set, -1);
1339     tcg_gen_shli_i64(set, set, 3);
1341     tcg_gen_or_i64(tmp, set, clr);
1342     tcg_gen_extrl_i64_i32(cpu_crf[6], tmp);
1344     tcg_temp_free_i64(tmp);
1345     tcg_temp_free_i64(set);
1346     tcg_temp_free_i64(clr);
1349 static bool do_vcmp(DisasContext *ctx, arg_VC *a, TCGCond cond, int vece)
1351     REQUIRE_VECTOR(ctx);
1353     tcg_gen_gvec_cmp(cond, vece, avr_full_offset(a->vrt),
1354                      avr_full_offset(a->vra), avr_full_offset(a->vrb), 16, 16);
1356     if (a->rc) {
1357         do_vcmp_rc(a->vrt);
1358     }
1360     return true;
1363 TRANS_FLAGS(ALTIVEC, VCMPEQUB, do_vcmp, TCG_COND_EQ, MO_8)
1364 TRANS_FLAGS(ALTIVEC, VCMPEQUH, do_vcmp, TCG_COND_EQ, MO_16)
1365 TRANS_FLAGS(ALTIVEC, VCMPEQUW, do_vcmp, TCG_COND_EQ, MO_32)
1366 TRANS_FLAGS2(ALTIVEC_207, VCMPEQUD, do_vcmp, TCG_COND_EQ, MO_64)
1368 TRANS_FLAGS(ALTIVEC, VCMPGTSB, do_vcmp, TCG_COND_GT, MO_8)
1369 TRANS_FLAGS(ALTIVEC, VCMPGTSH, do_vcmp, TCG_COND_GT, MO_16)
1370 TRANS_FLAGS(ALTIVEC, VCMPGTSW, do_vcmp, TCG_COND_GT, MO_32)
1371 TRANS_FLAGS2(ALTIVEC_207, VCMPGTSD, do_vcmp, TCG_COND_GT, MO_64)
1372 TRANS_FLAGS(ALTIVEC, VCMPGTUB, do_vcmp, TCG_COND_GTU, MO_8)
1373 TRANS_FLAGS(ALTIVEC, VCMPGTUH, do_vcmp, TCG_COND_GTU, MO_16)
1374 TRANS_FLAGS(ALTIVEC, VCMPGTUW, do_vcmp, TCG_COND_GTU, MO_32)
1375 TRANS_FLAGS2(ALTIVEC_207, VCMPGTUD, do_vcmp, TCG_COND_GTU, MO_64)
1377 TRANS_FLAGS2(ISA300, VCMPNEB, do_vcmp, TCG_COND_NE, MO_8)
1378 TRANS_FLAGS2(ISA300, VCMPNEH, do_vcmp, TCG_COND_NE, MO_16)
1379 TRANS_FLAGS2(ISA300, VCMPNEW, do_vcmp, TCG_COND_NE, MO_32)
1381 static void gen_vcmpnez_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1383     TCGv_vec t0, t1, zero;
1385     t0 = tcg_temp_new_vec_matching(t);
1386     t1 = tcg_temp_new_vec_matching(t);
1387     zero = tcg_constant_vec_matching(t, vece, 0);
1389     tcg_gen_cmp_vec(TCG_COND_EQ, vece, t0, a, zero);
1390     tcg_gen_cmp_vec(TCG_COND_EQ, vece, t1, b, zero);
1391     tcg_gen_cmp_vec(TCG_COND_NE, vece, t, a, b);
1393     tcg_gen_or_vec(vece, t, t, t0);
1394     tcg_gen_or_vec(vece, t, t, t1);
1396     tcg_temp_free_vec(t0);
1397     tcg_temp_free_vec(t1);
1400 static bool do_vcmpnez(DisasContext *ctx, arg_VC *a, int vece)
1402     static const TCGOpcode vecop_list[] = {
1403         INDEX_op_cmp_vec, 0
1404     };
1405     static const GVecGen3 ops[3] = {
1406         {
1407             .fniv = gen_vcmpnez_vec,
1408             .fno = gen_helper_VCMPNEZB,
1409             .opt_opc = vecop_list,
1410             .vece = MO_8
1411         },
1412         {
1413             .fniv = gen_vcmpnez_vec,
1414             .fno = gen_helper_VCMPNEZH,
1415             .opt_opc = vecop_list,
1416             .vece = MO_16
1417         },
1418         {
1419             .fniv = gen_vcmpnez_vec,
1420             .fno = gen_helper_VCMPNEZW,
1421             .opt_opc = vecop_list,
1422             .vece = MO_32
1423         }
1424     };
1426     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1427     REQUIRE_VECTOR(ctx);
1429     tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
1430                    avr_full_offset(a->vrb), 16, 16, &ops[vece]);
1432     if (a->rc) {
1433         do_vcmp_rc(a->vrt);
1434     }
1436     return true;
1439 TRANS(VCMPNEZB, do_vcmpnez, MO_8)
1440 TRANS(VCMPNEZH, do_vcmpnez, MO_16)
1441 TRANS(VCMPNEZW, do_vcmpnez, MO_32)
1443 static bool trans_VCMPEQUQ(DisasContext *ctx, arg_VC *a)
1445     TCGv_i64 t0, t1, t2;
1447     t0 = tcg_temp_new_i64();
1448     t1 = tcg_temp_new_i64();
1449     t2 = tcg_temp_new_i64();
1451     get_avr64(t0, a->vra, true);
1452     get_avr64(t1, a->vrb, true);
1453     tcg_gen_xor_i64(t2, t0, t1);
1455     get_avr64(t0, a->vra, false);
1456     get_avr64(t1, a->vrb, false);
1457     tcg_gen_xor_i64(t1, t0, t1);
1459     tcg_gen_or_i64(t1, t1, t2);
1460     tcg_gen_setcondi_i64(TCG_COND_EQ, t1, t1, 0);
1461     tcg_gen_neg_i64(t1, t1);
1463     set_avr64(a->vrt, t1, true);
1464     set_avr64(a->vrt, t1, false);
1466     if (a->rc) {
1467         tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
1468         tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
1469         tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
1470     }
1472     tcg_temp_free_i64(t0);
1473     tcg_temp_free_i64(t1);
1474     tcg_temp_free_i64(t2);
1476     return true;
1479 static bool do_vcmpgtq(DisasContext *ctx, arg_VC *a, bool sign)
1481     TCGv_i64 t0, t1, t2;
1483     t0 = tcg_temp_new_i64();
1484     t1 = tcg_temp_new_i64();
1485     t2 = tcg_temp_new_i64();
1487     get_avr64(t0, a->vra, false);
1488     get_avr64(t1, a->vrb, false);
1489     tcg_gen_setcond_i64(TCG_COND_GTU, t2, t0, t1);
1491     get_avr64(t0, a->vra, true);
1492     get_avr64(t1, a->vrb, true);
1493     tcg_gen_movcond_i64(TCG_COND_EQ, t2, t0, t1, t2, tcg_constant_i64(0));
1494     tcg_gen_setcond_i64(sign ? TCG_COND_GT : TCG_COND_GTU, t1, t0, t1);
1496     tcg_gen_or_i64(t1, t1, t2);
1497     tcg_gen_neg_i64(t1, t1);
1499     set_avr64(a->vrt, t1, true);
1500     set_avr64(a->vrt, t1, false);
1502     if (a->rc) {
1503         tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
1504         tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
1505         tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
1506     }
1508     tcg_temp_free_i64(t0);
1509     tcg_temp_free_i64(t1);
1510     tcg_temp_free_i64(t2);
1512     return true;
1515 TRANS(VCMPGTSQ, do_vcmpgtq, true)
1516 TRANS(VCMPGTUQ, do_vcmpgtq, false)
1518 static bool do_vcmpq(DisasContext *ctx, arg_VX_bf *a, bool sign)
1520     TCGv_i64 vra, vrb;
1521     TCGLabel *gt, *lt, *done;
1523     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1524     REQUIRE_VECTOR(ctx);
1526     vra = tcg_temp_local_new_i64();
1527     vrb = tcg_temp_local_new_i64();
1528     gt = gen_new_label();
1529     lt = gen_new_label();
1530     done = gen_new_label();
1532     get_avr64(vra, a->vra, true);
1533     get_avr64(vrb, a->vrb, true);
1534     tcg_gen_brcond_i64((sign ? TCG_COND_GT : TCG_COND_GTU), vra, vrb, gt);
1535     tcg_gen_brcond_i64((sign ? TCG_COND_LT : TCG_COND_LTU), vra, vrb, lt);
1537     get_avr64(vra, a->vra, false);
1538     get_avr64(vrb, a->vrb, false);
1539     tcg_gen_brcond_i64(TCG_COND_GTU, vra, vrb, gt);
1540     tcg_gen_brcond_i64(TCG_COND_LTU, vra, vrb, lt);
1542     tcg_gen_movi_i32(cpu_crf[a->bf], CRF_EQ);
1543     tcg_gen_br(done);
1545     gen_set_label(gt);
1546     tcg_gen_movi_i32(cpu_crf[a->bf], CRF_GT);
1547     tcg_gen_br(done);
1549     gen_set_label(lt);
1550     tcg_gen_movi_i32(cpu_crf[a->bf], CRF_LT);
1551     tcg_gen_br(done);
1553     gen_set_label(done);
1554     tcg_temp_free_i64(vra);
1555     tcg_temp_free_i64(vrb);
1557     return true;
1560 TRANS(VCMPSQ, do_vcmpq, true)
1561 TRANS(VCMPUQ, do_vcmpq, false)
1563 GEN_VXRFORM(vcmpeqfp, 3, 3)
1564 GEN_VXRFORM(vcmpgefp, 3, 7)
1565 GEN_VXRFORM(vcmpgtfp, 3, 11)
1566 GEN_VXRFORM(vcmpbfp, 3, 15)
1568 static void gen_vsplti(DisasContext *ctx, int vece)
1570     int simm;
1572     if (unlikely(!ctx->altivec_enabled)) {
1573         gen_exception(ctx, POWERPC_EXCP_VPU);
1574         return;
1575     }
1577     simm = SIMM5(ctx->opcode);
1578     tcg_gen_gvec_dup_imm(vece, avr_full_offset(rD(ctx->opcode)), 16, 16, simm);
1581 #define GEN_VXFORM_VSPLTI(name, vece, opc2, opc3) \
1582 static void glue(gen_, name)(DisasContext *ctx) { gen_vsplti(ctx, vece); }
1584 GEN_VXFORM_VSPLTI(vspltisb, MO_8, 6, 12);
1585 GEN_VXFORM_VSPLTI(vspltish, MO_16, 6, 13);
1586 GEN_VXFORM_VSPLTI(vspltisw, MO_32, 6, 14);
1588 #define GEN_VXFORM_NOA(name, opc2, opc3)                                \
1589 static void glue(gen_, name)(DisasContext *ctx)                         \
1590     {                                                                   \
1591         TCGv_ptr rb, rd;                                                \
1592         if (unlikely(!ctx->altivec_enabled)) {                          \
1593             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1594             return;                                                     \
1595         }                                                               \
1596         rb = gen_avr_ptr(rB(ctx->opcode));                              \
1597         rd = gen_avr_ptr(rD(ctx->opcode));                              \
1598         gen_helper_##name(rd, rb);                                      \
1599         tcg_temp_free_ptr(rb);                                          \
1600         tcg_temp_free_ptr(rd);                                          \
1601     }
1603 #define GEN_VXFORM_NOA_ENV(name, opc2, opc3)                            \
1604 static void glue(gen_, name)(DisasContext *ctx)                         \
1605     {                                                                   \
1606         TCGv_ptr rb, rd;                                                \
1607                                                                         \
1608         if (unlikely(!ctx->altivec_enabled)) {                          \
1609             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1610             return;                                                     \
1611         }                                                               \
1612         rb = gen_avr_ptr(rB(ctx->opcode));                              \
1613         rd = gen_avr_ptr(rD(ctx->opcode));                              \
1614         gen_helper_##name(cpu_env, rd, rb);                             \
1615         tcg_temp_free_ptr(rb);                                          \
1616         tcg_temp_free_ptr(rd);                                          \
1617     }
1619 #define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4)                        \
1620 static void glue(gen_, name)(DisasContext *ctx)                         \
1621     {                                                                   \
1622         TCGv_ptr rb, rd;                                                \
1623         if (unlikely(!ctx->altivec_enabled)) {                          \
1624             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1625             return;                                                     \
1626         }                                                               \
1627         rb = gen_avr_ptr(rB(ctx->opcode));                              \
1628         rd = gen_avr_ptr(rD(ctx->opcode));                              \
1629         gen_helper_##name(rd, rb);                                      \
1630         tcg_temp_free_ptr(rb);                                          \
1631         tcg_temp_free_ptr(rd);                                          \
1632     }
1634 #define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4)                        \
1635 static void glue(gen_, name)(DisasContext *ctx)                         \
1636     {                                                                   \
1637         TCGv_ptr rb;                                                    \
1638         if (unlikely(!ctx->altivec_enabled)) {                          \
1639             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1640             return;                                                     \
1641         }                                                               \
1642         rb = gen_avr_ptr(rB(ctx->opcode));                              \
1643         gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb);                \
1644         tcg_temp_free_ptr(rb);                                          \
1645     }
1646 GEN_VXFORM_NOA(vupkhsb, 7, 8);
1647 GEN_VXFORM_NOA(vupkhsh, 7, 9);
1648 GEN_VXFORM_NOA(vupkhsw, 7, 25);
1649 GEN_VXFORM_NOA(vupklsb, 7, 10);
1650 GEN_VXFORM_NOA(vupklsh, 7, 11);
1651 GEN_VXFORM_NOA(vupklsw, 7, 27);
1652 GEN_VXFORM_NOA(vupkhpx, 7, 13);
1653 GEN_VXFORM_NOA(vupklpx, 7, 15);
1654 GEN_VXFORM_NOA_ENV(vrefp, 5, 4);
1655 GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5);
1656 GEN_VXFORM_NOA_ENV(vexptefp, 5, 6);
1657 GEN_VXFORM_NOA_ENV(vlogefp, 5, 7);
1658 GEN_VXFORM_NOA_ENV(vrfim, 5, 11);
1659 GEN_VXFORM_NOA_ENV(vrfin, 5, 8);
1660 GEN_VXFORM_NOA_ENV(vrfip, 5, 10);
1661 GEN_VXFORM_NOA_ENV(vrfiz, 5, 9);
1662 GEN_VXFORM_NOA(vprtybw, 1, 24);
1663 GEN_VXFORM_NOA(vprtybd, 1, 24);
1664 GEN_VXFORM_NOA(vprtybq, 1, 24);
1666 static void gen_vsplt(DisasContext *ctx, int vece)
1668     int uimm, dofs, bofs;
1670     if (unlikely(!ctx->altivec_enabled)) {
1671         gen_exception(ctx, POWERPC_EXCP_VPU);
1672         return;
1673     }
1675     uimm = UIMM5(ctx->opcode);
1676     bofs = avr_full_offset(rB(ctx->opcode));
1677     dofs = avr_full_offset(rD(ctx->opcode));
1679     /* Experimental testing shows that hardware masks the immediate.  */
1680     bofs += (uimm << vece) & 15;
1681 #if !HOST_BIG_ENDIAN
1682     bofs ^= 15;
1683     bofs &= ~((1 << vece) - 1);
1684 #endif
1686     tcg_gen_gvec_dup_mem(vece, dofs, bofs, 16, 16);
1689 #define GEN_VXFORM_VSPLT(name, vece, opc2, opc3) \
1690 static void glue(gen_, name)(DisasContext *ctx) { gen_vsplt(ctx, vece); }
1692 #define GEN_VXFORM_UIMM_ENV(name, opc2, opc3)                           \
1693 static void glue(gen_, name)(DisasContext *ctx)                         \
1694     {                                                                   \
1695         TCGv_ptr rb, rd;                                                \
1696         TCGv_i32 uimm;                                                  \
1697                                                                         \
1698         if (unlikely(!ctx->altivec_enabled)) {                          \
1699             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1700             return;                                                     \
1701         }                                                               \
1702         uimm = tcg_const_i32(UIMM5(ctx->opcode));                       \
1703         rb = gen_avr_ptr(rB(ctx->opcode));                              \
1704         rd = gen_avr_ptr(rD(ctx->opcode));                              \
1705         gen_helper_##name(cpu_env, rd, rb, uimm);                       \
1706         tcg_temp_free_i32(uimm);                                        \
1707         tcg_temp_free_ptr(rb);                                          \
1708         tcg_temp_free_ptr(rd);                                          \
1709     }
1711 #define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max)              \
1712 static void glue(gen_, name)(DisasContext *ctx)                         \
1713     {                                                                   \
1714         TCGv_ptr rb, rd;                                                \
1715         uint8_t uimm = UIMM4(ctx->opcode);                              \
1716         TCGv_i32 t0;                                                    \
1717         if (unlikely(!ctx->altivec_enabled)) {                          \
1718             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1719             return;                                                     \
1720         }                                                               \
1721         if (uimm > splat_max) {                                         \
1722             uimm = 0;                                                   \
1723         }                                                               \
1724         t0 = tcg_temp_new_i32();                                        \
1725         tcg_gen_movi_i32(t0, uimm);                                     \
1726         rb = gen_avr_ptr(rB(ctx->opcode));                              \
1727         rd = gen_avr_ptr(rD(ctx->opcode));                              \
1728         gen_helper_##name(rd, rb, t0);                                  \
1729         tcg_temp_free_i32(t0);                                          \
1730         tcg_temp_free_ptr(rb);                                          \
1731         tcg_temp_free_ptr(rd);                                          \
1732     }
1734 GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8);
1735 GEN_VXFORM_VSPLT(vsplth, MO_16, 6, 9);
1736 GEN_VXFORM_VSPLT(vspltw, MO_32, 6, 10);
1737 GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15);
1738 GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14);
1739 GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12);
1740 GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8);
1741 GEN_VXFORM_UIMM_ENV(vcfux, 5, 12);
1742 GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13);
1743 GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14);
1744 GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15);
1745 GEN_VXFORM_DUAL(vspltb, PPC_ALTIVEC, PPC_NONE,
1746                 vextractub, PPC_NONE, PPC2_ISA300);
1747 GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE,
1748                 vextractuh, PPC_NONE, PPC2_ISA300);
1749 GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE,
1750                 vextractuw, PPC_NONE, PPC2_ISA300);
1752 static bool trans_VGNB(DisasContext *ctx, arg_VX_n *a)
1754     /*
1755      * Similar to do_vextractm, we'll use a sequence of mask-shift-or operations
1756      * to gather the bits. The masks can be created with
1757      *
1758      * uint64_t mask(uint64_t n, uint64_t step)
1759      * {
1760      *     uint64_t p = ((1UL << (1UL << step)) - 1UL) << ((n - 1UL) << step),
1761      *                  plen = n << step, m = 0;
1762      *     for(int i = 0; i < 64/plen; i++) {
1763      *         m |= p;
1764      *         m = ror64(m, plen);
1765      *     }
1766      *     p >>= plen * DIV_ROUND_UP(64, plen) - 64;
1767      *     return m | p;
1768      * }
1769      *
1770      * But since there are few values of N, we'll use a lookup table to avoid
1771      * these calculations at runtime.
1772      */
1773     static const uint64_t mask[6][5] = {
1774         {
1775             0xAAAAAAAAAAAAAAAAULL, 0xccccccccccccccccULL, 0xf0f0f0f0f0f0f0f0ULL,
1776             0xff00ff00ff00ff00ULL, 0xffff0000ffff0000ULL
1777         },
1778         {
1779             0x9249249249249249ULL, 0xC30C30C30C30C30CULL, 0xF00F00F00F00F00FULL,
1780             0xFF0000FF0000FF00ULL, 0xFFFF00000000FFFFULL
1781         },
1782         {
1783             /* For N >= 4, some mask operations can be elided */
1784             0x8888888888888888ULL, 0, 0xf000f000f000f000ULL, 0,
1785             0xFFFF000000000000ULL
1786         },
1787         {
1788             0x8421084210842108ULL, 0, 0xF0000F0000F0000FULL, 0, 0
1789         },
1790         {
1791             0x8208208208208208ULL, 0, 0xF00000F00000F000ULL, 0, 0
1792         },
1793         {
1794             0x8102040810204081ULL, 0, 0xF000000F000000F0ULL, 0, 0
1795         }
1796     };
1797     uint64_t m;
1798     int i, sh, nbits = DIV_ROUND_UP(64, a->n);
1799     TCGv_i64 hi, lo, t0, t1;
1801     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1802     REQUIRE_VECTOR(ctx);
1804     if (a->n < 2) {
1805         /*
1806          * "N can be any value between 2 and 7, inclusive." Otherwise, the
1807          * result is undefined, so we don't need to change RT. Also, N > 7 is
1808          * impossible since the immediate field is 3 bits only.
1809          */
1810         return true;
1811     }
1813     hi = tcg_temp_new_i64();
1814     lo = tcg_temp_new_i64();
1815     t0 = tcg_temp_new_i64();
1816     t1 = tcg_temp_new_i64();
1818     get_avr64(hi, a->vrb, true);
1819     get_avr64(lo, a->vrb, false);
1821     /* Align the lower doubleword so we can use the same mask */
1822     tcg_gen_shli_i64(lo, lo, a->n * nbits - 64);
1824     /*
1825      * Starting from the most significant bit, gather every Nth bit with a
1826      * sequence of mask-shift-or operation. E.g.: for N=3
1827      * AxxBxxCxxDxxExxFxxGxxHxxIxxJxxKxxLxxMxxNxxOxxPxxQxxRxxSxxTxxUxxV
1828      *     & rep(0b100)
1829      * A..B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V
1830      *     << 2
1831      * .B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V..
1832      *     |
1833      * AB.BC.CD.DE.EF.FG.GH.HI.IJ.JK.KL.LM.MN.NO.OP.PQ.QR.RS.ST.TU.UV.V
1834      *  & rep(0b110000)
1835      * AB....CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV..
1836      *     << 4
1837      * ..CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV......
1838      *     |
1839      * ABCD..CDEF..EFGH..GHIJ..IJKL..KLMN..MNOP..OPQR..QRST..STUV..UV..
1840      *     & rep(0b111100000000)
1841      * ABCD........EFGH........IJKL........MNOP........QRST........UV..
1842      *     << 8
1843      * ....EFGH........IJKL........MNOP........QRST........UV..........
1844      *     |
1845      * ABCDEFGH....EFGHIJKL....IJKLMNOP....MNOPQRST....QRSTUV......UV..
1846      *  & rep(0b111111110000000000000000)
1847      * ABCDEFGH................IJKLMNOP................QRSTUV..........
1848      *     << 16
1849      * ........IJKLMNOP................QRSTUV..........................
1850      *     |
1851      * ABCDEFGHIJKLMNOP........IJKLMNOPQRSTUV..........QRSTUV..........
1852      *     & rep(0b111111111111111100000000000000000000000000000000)
1853      * ABCDEFGHIJKLMNOP................................QRSTUV..........
1854      *     << 32
1855      * ................QRSTUV..........................................
1856      *     |
1857      * ABCDEFGHIJKLMNOPQRSTUV..........................QRSTUV..........
1858      */
1859     for (i = 0, sh = a->n - 1; i < 5; i++, sh <<= 1) {
1860         m = mask[a->n - 2][i];
1861         if (m) {
1862             tcg_gen_andi_i64(hi, hi, m);
1863             tcg_gen_andi_i64(lo, lo, m);
1864         }
1865         if (sh < 64) {
1866             tcg_gen_shli_i64(t0, hi, sh);
1867             tcg_gen_shli_i64(t1, lo, sh);
1868             tcg_gen_or_i64(hi, t0, hi);
1869             tcg_gen_or_i64(lo, t1, lo);
1870         }
1871     }
1873     tcg_gen_andi_i64(hi, hi, ~(~0ULL >> nbits));
1874     tcg_gen_andi_i64(lo, lo, ~(~0ULL >> nbits));
1875     tcg_gen_shri_i64(lo, lo, nbits);
1876     tcg_gen_or_i64(hi, hi, lo);
1877     tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], hi);
1879     tcg_temp_free_i64(hi);
1880     tcg_temp_free_i64(lo);
1881     tcg_temp_free_i64(t0);
1882     tcg_temp_free_i64(t1);
1884     return true;
1887 static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right,
1888                void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv))
1890     TCGv_ptr vrt, vra, vrb;
1891     TCGv rc;
1893     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1894     REQUIRE_VECTOR(ctx);
1896     vrt = gen_avr_ptr(a->vrt);
1897     vra = gen_avr_ptr(a->vra);
1898     vrb = gen_avr_ptr(a->vrb);
1899     rc = tcg_temp_new();
1901     tcg_gen_andi_tl(rc, cpu_gpr[a->rc], 0x1F);
1902     if (right) {
1903         tcg_gen_subfi_tl(rc, 32 - size, rc);
1904     }
1905     gen_helper(cpu_env, vrt, vra, vrb, rc);
1907     tcg_temp_free_ptr(vrt);
1908     tcg_temp_free_ptr(vra);
1909     tcg_temp_free_ptr(vrb);
1910     tcg_temp_free(rc);
1911     return true;
1914 TRANS(VEXTDUBVLX, do_vextdx, 1, false, gen_helper_VEXTDUBVLX)
1915 TRANS(VEXTDUHVLX, do_vextdx, 2, false, gen_helper_VEXTDUHVLX)
1916 TRANS(VEXTDUWVLX, do_vextdx, 4, false, gen_helper_VEXTDUWVLX)
1917 TRANS(VEXTDDVLX, do_vextdx, 8, false, gen_helper_VEXTDDVLX)
1919 TRANS(VEXTDUBVRX, do_vextdx, 1, true, gen_helper_VEXTDUBVLX)
1920 TRANS(VEXTDUHVRX, do_vextdx, 2, true, gen_helper_VEXTDUHVLX)
1921 TRANS(VEXTDUWVRX, do_vextdx, 4, true, gen_helper_VEXTDUWVLX)
1922 TRANS(VEXTDDVRX, do_vextdx, 8, true, gen_helper_VEXTDDVLX)
1924 static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
1925             TCGv_i64 rb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1927     TCGv_ptr t;
1928     TCGv idx;
1930     t = gen_avr_ptr(vrt);
1931     idx = tcg_temp_new();
1933     tcg_gen_andi_tl(idx, ra, 0xF);
1934     if (right) {
1935         tcg_gen_subfi_tl(idx, 16 - size, idx);
1936     }
1938     gen_helper(cpu_env, t, rb, idx);
1940     tcg_temp_free_ptr(t);
1941     tcg_temp_free(idx);
1943     return true;
1946 static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
1947                 int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1949     bool ok;
1950     TCGv_i64 val;
1952     val = tcg_temp_new_i64();
1953     get_avr64(val, vrb, true);
1954     ok = do_vinsx(ctx, vrt, size, right, ra, val, gen_helper);
1956     tcg_temp_free_i64(val);
1957     return ok;
1960 static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
1961                         void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1963     bool ok;
1964     TCGv_i64 val;
1966     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1967     REQUIRE_VECTOR(ctx);
1969     val = tcg_temp_new_i64();
1970     tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
1972     ok = do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper);
1974     tcg_temp_free_i64(val);
1975     return ok;
1978 static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
1979                         void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1981     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1982     REQUIRE_VECTOR(ctx);
1984     return do_vinsvx(ctx, a->vrt, size, right, cpu_gpr[a->vra], a->vrb,
1985                      gen_helper);
1988 static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
1989                         void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1991     bool ok;
1992     TCGv_i64 val;
1994     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1995     REQUIRE_VECTOR(ctx);
1997     if (a->uim > (16 - size)) {
1998         /*
1999          * PowerISA v3.1 says that the resulting value is undefined in this
2000          * case, so just log a guest error and leave VRT unchanged. The
2001          * real hardware would do a partial insert, e.g. if VRT is zeroed and
2002          * RB is 0x12345678, executing "vinsw VRT,RB,14" results in
2003          * VRT = 0x0000...00001234, but we don't bother to reproduce this
2004          * behavior as software shouldn't rely on it.
2005          */
2006         qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINS* at"
2007             " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
2008             16 - size);
2009         return true;
2010     }
2012     val = tcg_temp_new_i64();
2013     tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
2015     ok = do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val,
2016                   gen_helper);
2018     tcg_temp_free_i64(val);
2019     return ok;
2022 static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
2023                         void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
2025     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2026     REQUIRE_VECTOR(ctx);
2028     if (a->uim > (16 - size)) {
2029         qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINSERT* at"
2030             " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
2031             16 - size);
2032         return true;
2033     }
2035     return do_vinsvx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), a->vrb,
2036                      gen_helper);
2039 TRANS(VINSBLX, do_vinsx_VX, 1, false, gen_helper_VINSBLX)
2040 TRANS(VINSHLX, do_vinsx_VX, 2, false, gen_helper_VINSHLX)
2041 TRANS(VINSWLX, do_vinsx_VX, 4, false, gen_helper_VINSWLX)
2042 TRANS(VINSDLX, do_vinsx_VX, 8, false, gen_helper_VINSDLX)
2044 TRANS(VINSBRX, do_vinsx_VX, 1, true, gen_helper_VINSBLX)
2045 TRANS(VINSHRX, do_vinsx_VX, 2, true, gen_helper_VINSHLX)
2046 TRANS(VINSWRX, do_vinsx_VX, 4, true, gen_helper_VINSWLX)
2047 TRANS(VINSDRX, do_vinsx_VX, 8, true, gen_helper_VINSDLX)
2049 TRANS(VINSW, do_vins_VX_uim4, 4, gen_helper_VINSWLX)
2050 TRANS(VINSD, do_vins_VX_uim4, 8, gen_helper_VINSDLX)
2052 TRANS(VINSBVLX, do_vinsvx_VX, 1, false, gen_helper_VINSBLX)
2053 TRANS(VINSHVLX, do_vinsvx_VX, 2, false, gen_helper_VINSHLX)
2054 TRANS(VINSWVLX, do_vinsvx_VX, 4, false, gen_helper_VINSWLX)
2056 TRANS(VINSBVRX, do_vinsvx_VX, 1, true, gen_helper_VINSBLX)
2057 TRANS(VINSHVRX, do_vinsvx_VX, 2, true, gen_helper_VINSHLX)
2058 TRANS(VINSWVRX, do_vinsvx_VX, 4, true, gen_helper_VINSWLX)
2060 TRANS(VINSERTB, do_vinsert_VX_uim4, 1, gen_helper_VINSBLX)
2061 TRANS(VINSERTH, do_vinsert_VX_uim4, 2, gen_helper_VINSHLX)
2062 TRANS(VINSERTW, do_vinsert_VX_uim4, 4, gen_helper_VINSWLX)
2063 TRANS(VINSERTD, do_vinsert_VX_uim4, 8, gen_helper_VINSDLX)
2065 static void gen_vsldoi(DisasContext *ctx)
2067     TCGv_ptr ra, rb, rd;
2068     TCGv_i32 sh;
2069     if (unlikely(!ctx->altivec_enabled)) {
2070         gen_exception(ctx, POWERPC_EXCP_VPU);
2071         return;
2072     }
2073     ra = gen_avr_ptr(rA(ctx->opcode));
2074     rb = gen_avr_ptr(rB(ctx->opcode));
2075     rd = gen_avr_ptr(rD(ctx->opcode));
2076     sh = tcg_const_i32(VSH(ctx->opcode));
2077     gen_helper_vsldoi(rd, ra, rb, sh);
2078     tcg_temp_free_ptr(ra);
2079     tcg_temp_free_ptr(rb);
2080     tcg_temp_free_ptr(rd);
2081     tcg_temp_free_i32(sh);
2084 static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a)
2086     TCGv_i64 t0, t1, t2;
2088     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2089     REQUIRE_VECTOR(ctx);
2091     t0 = tcg_temp_new_i64();
2092     t1 = tcg_temp_new_i64();
2094     get_avr64(t0, a->vra, true);
2095     get_avr64(t1, a->vra, false);
2097     if (a->sh != 0) {
2098         t2 = tcg_temp_new_i64();
2100         get_avr64(t2, a->vrb, true);
2102         tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh);
2103         tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh);
2105         tcg_temp_free_i64(t2);
2106     }
2108     set_avr64(a->vrt, t0, true);
2109     set_avr64(a->vrt, t1, false);
2111     tcg_temp_free_i64(t0);
2112     tcg_temp_free_i64(t1);
2114     return true;
2117 static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a)
2119     TCGv_i64 t2, t1, t0;
2121     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2122     REQUIRE_VECTOR(ctx);
2124     t0 = tcg_temp_new_i64();
2125     t1 = tcg_temp_new_i64();
2127     get_avr64(t0, a->vrb, false);
2128     get_avr64(t1, a->vrb, true);
2130     if (a->sh != 0) {
2131         t2 = tcg_temp_new_i64();
2133         get_avr64(t2, a->vra, false);
2135         tcg_gen_extract2_i64(t0, t0, t1, a->sh);
2136         tcg_gen_extract2_i64(t1, t1, t2, a->sh);
2138         tcg_temp_free_i64(t2);
2139     }
2141     set_avr64(a->vrt, t0, false);
2142     set_avr64(a->vrt, t1, true);
2144     tcg_temp_free_i64(t0);
2145     tcg_temp_free_i64(t1);
2147     return true;
2150 static bool do_vexpand(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2152     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2153     REQUIRE_VECTOR(ctx);
2155     tcg_gen_gvec_sari(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2156                       (8 << vece) - 1, 16, 16);
2158     return true;
2161 TRANS(VEXPANDBM, do_vexpand, MO_8)
2162 TRANS(VEXPANDHM, do_vexpand, MO_16)
2163 TRANS(VEXPANDWM, do_vexpand, MO_32)
2164 TRANS(VEXPANDDM, do_vexpand, MO_64)
2166 static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a)
2168     TCGv_i64 tmp;
2170     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2171     REQUIRE_VECTOR(ctx);
2173     tmp = tcg_temp_new_i64();
2175     get_avr64(tmp, a->vrb, true);
2176     tcg_gen_sari_i64(tmp, tmp, 63);
2177     set_avr64(a->vrt, tmp, false);
2178     set_avr64(a->vrt, tmp, true);
2180     tcg_temp_free_i64(tmp);
2181     return true;
2184 static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2186     const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece,
2187                    mask = dup_const(vece, 1 << (elem_width - 1));
2188     uint64_t i, j;
2189     TCGv_i64 lo, hi, t0, t1;
2191     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2192     REQUIRE_VECTOR(ctx);
2194     hi = tcg_temp_new_i64();
2195     lo = tcg_temp_new_i64();
2196     t0 = tcg_temp_new_i64();
2197     t1 = tcg_temp_new_i64();
2199     get_avr64(lo, a->vrb, false);
2200     get_avr64(hi, a->vrb, true);
2202     tcg_gen_andi_i64(lo, lo, mask);
2203     tcg_gen_andi_i64(hi, hi, mask);
2205     /*
2206      * Gather the most significant bit of each element in the highest element
2207      * element. E.g. for bytes:
2208      * aXXXXXXXbXXXXXXXcXXXXXXXdXXXXXXXeXXXXXXXfXXXXXXXgXXXXXXXhXXXXXXX
2209      *     & dup(1 << (elem_width - 1))
2210      * a0000000b0000000c0000000d0000000e0000000f0000000g0000000h0000000
2211      *     << 32 - 4
2212      * 0000e0000000f0000000g0000000h00000000000000000000000000000000000
2213      *     |
2214      * a000e000b000f000c000g000d000h000e0000000f0000000g0000000h0000000
2215      *     << 16 - 2
2216      * 00c000g000d000h000e0000000f0000000g0000000h000000000000000000000
2217      *     |
2218      * a0c0e0g0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h0000000
2219      *     << 8 - 1
2220      * 0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h00000000000000
2221      *     |
2222      * abcdefghbcdefgh0cdefgh00defgh000efgh0000fgh00000gh000000h0000000
2223      */
2224     for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
2225         tcg_gen_shli_i64(t0, hi, j - i);
2226         tcg_gen_shli_i64(t1, lo, j - i);
2227         tcg_gen_or_i64(hi, hi, t0);
2228         tcg_gen_or_i64(lo, lo, t1);
2229     }
2231     tcg_gen_shri_i64(hi, hi, 64 - elem_count_half);
2232     tcg_gen_extract2_i64(lo, lo, hi, 64 - elem_count_half);
2233     tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], lo);
2235     tcg_temp_free_i64(hi);
2236     tcg_temp_free_i64(lo);
2237     tcg_temp_free_i64(t0);
2238     tcg_temp_free_i64(t1);
2240     return true;
2243 TRANS(VEXTRACTBM, do_vextractm, MO_8)
2244 TRANS(VEXTRACTHM, do_vextractm, MO_16)
2245 TRANS(VEXTRACTWM, do_vextractm, MO_32)
2246 TRANS(VEXTRACTDM, do_vextractm, MO_64)
2248 static bool trans_VEXTRACTQM(DisasContext *ctx, arg_VX_tb *a)
2250     TCGv_i64 tmp;
2252     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2253     REQUIRE_VECTOR(ctx);
2255     tmp = tcg_temp_new_i64();
2257     get_avr64(tmp, a->vrb, true);
2258     tcg_gen_shri_i64(tmp, tmp, 63);
2259     tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], tmp);
2261     tcg_temp_free_i64(tmp);
2263     return true;
2266 static bool do_mtvsrm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2268     const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece;
2269     uint64_t c;
2270     int i, j;
2271     TCGv_i64 hi, lo, t0, t1;
2273     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2274     REQUIRE_VECTOR(ctx);
2276     hi = tcg_temp_new_i64();
2277     lo = tcg_temp_new_i64();
2278     t0 = tcg_temp_new_i64();
2279     t1 = tcg_temp_new_i64();
2281     tcg_gen_extu_tl_i64(t0, cpu_gpr[a->vrb]);
2282     tcg_gen_extract_i64(hi, t0, elem_count_half, elem_count_half);
2283     tcg_gen_extract_i64(lo, t0, 0, elem_count_half);
2285     /*
2286      * Spread the bits into their respective elements.
2287      * E.g. for bytes:
2288      * 00000000000000000000000000000000000000000000000000000000abcdefgh
2289      *   << 32 - 4
2290      * 0000000000000000000000000000abcdefgh0000000000000000000000000000
2291      *   |
2292      * 0000000000000000000000000000abcdefgh00000000000000000000abcdefgh
2293      *   << 16 - 2
2294      * 00000000000000abcdefgh00000000000000000000abcdefgh00000000000000
2295      *   |
2296      * 00000000000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh
2297      *   << 8 - 1
2298      * 0000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh0000000
2299      *   |
2300      * 0000000abcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgh
2301      *   & dup(1)
2302      * 0000000a0000000b0000000c0000000d0000000e0000000f0000000g0000000h
2303      *   * 0xff
2304      * aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh
2305      */
2306     for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
2307         tcg_gen_shli_i64(t0, hi, j - i);
2308         tcg_gen_shli_i64(t1, lo, j - i);
2309         tcg_gen_or_i64(hi, hi, t0);
2310         tcg_gen_or_i64(lo, lo, t1);
2311     }
2313     c = dup_const(vece, 1);
2314     tcg_gen_andi_i64(hi, hi, c);
2315     tcg_gen_andi_i64(lo, lo, c);
2317     c = MAKE_64BIT_MASK(0, elem_width);
2318     tcg_gen_muli_i64(hi, hi, c);
2319     tcg_gen_muli_i64(lo, lo, c);
2321     set_avr64(a->vrt, lo, false);
2322     set_avr64(a->vrt, hi, true);
2324     tcg_temp_free_i64(hi);
2325     tcg_temp_free_i64(lo);
2326     tcg_temp_free_i64(t0);
2327     tcg_temp_free_i64(t1);
2329     return true;
2332 TRANS(MTVSRBM, do_mtvsrm, MO_8)
2333 TRANS(MTVSRHM, do_mtvsrm, MO_16)
2334 TRANS(MTVSRWM, do_mtvsrm, MO_32)
2335 TRANS(MTVSRDM, do_mtvsrm, MO_64)
2337 static bool trans_MTVSRQM(DisasContext *ctx, arg_VX_tb *a)
2339     TCGv_i64 tmp;
2341     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2342     REQUIRE_VECTOR(ctx);
2344     tmp = tcg_temp_new_i64();
2346     tcg_gen_ext_tl_i64(tmp, cpu_gpr[a->vrb]);
2347     tcg_gen_sextract_i64(tmp, tmp, 0, 1);
2348     set_avr64(a->vrt, tmp, false);
2349     set_avr64(a->vrt, tmp, true);
2351     tcg_temp_free_i64(tmp);
2353     return true;
2356 static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a)
2358     const uint64_t mask = dup_const(MO_8, 1);
2359     uint64_t hi, lo;
2361     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2362     REQUIRE_VECTOR(ctx);
2364     hi = extract16(a->b, 8, 8);
2365     lo = extract16(a->b, 0, 8);
2367     for (int i = 4, j = 32; i > 0; i >>= 1, j >>= 1) {
2368         hi |= hi << (j - i);
2369         lo |= lo << (j - i);
2370     }
2372     hi = (hi & mask) * 0xFF;
2373     lo = (lo & mask) * 0xFF;
2375     set_avr64(a->vrt, tcg_constant_i64(hi), true);
2376     set_avr64(a->vrt, tcg_constant_i64(lo), false);
2378     return true;
2381 static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece)
2383     TCGv_i64 rt, vrb, mask;
2384     rt = tcg_const_i64(0);
2385     vrb = tcg_temp_new_i64();
2386     mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1)));
2388     for (int i = 0; i < 2; i++) {
2389         get_avr64(vrb, a->vrb, i);
2390         if (a->mp) {
2391             tcg_gen_and_i64(vrb, mask, vrb);
2392         } else {
2393             tcg_gen_andc_i64(vrb, mask, vrb);
2394         }
2395         tcg_gen_ctpop_i64(vrb, vrb);
2396         tcg_gen_add_i64(rt, rt, vrb);
2397     }
2399     tcg_gen_shli_i64(rt, rt, TARGET_LONG_BITS - 8 + vece);
2400     tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], rt);
2402     tcg_temp_free_i64(vrb);
2403     tcg_temp_free_i64(rt);
2405     return true;
2408 TRANS(VCNTMBB, do_vcntmb, MO_8)
2409 TRANS(VCNTMBH, do_vcntmb, MO_16)
2410 TRANS(VCNTMBW, do_vcntmb, MO_32)
2411 TRANS(VCNTMBD, do_vcntmb, MO_64)
2413 static bool do_vstri(DisasContext *ctx, arg_VX_tb_rc *a,
2414                      void (*gen_helper)(TCGv_i32, TCGv_ptr, TCGv_ptr))
2416     TCGv_ptr vrt, vrb;
2418     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2419     REQUIRE_VECTOR(ctx);
2421     vrt = gen_avr_ptr(a->vrt);
2422     vrb = gen_avr_ptr(a->vrb);
2424     if (a->rc) {
2425         gen_helper(cpu_crf[6], vrt, vrb);
2426     } else {
2427         TCGv_i32 discard = tcg_temp_new_i32();
2428         gen_helper(discard, vrt, vrb);
2429         tcg_temp_free_i32(discard);
2430     }
2432     tcg_temp_free_ptr(vrt);
2433     tcg_temp_free_ptr(vrb);
2435     return true;
2438 TRANS(VSTRIBL, do_vstri, gen_helper_VSTRIBL)
2439 TRANS(VSTRIBR, do_vstri, gen_helper_VSTRIBR)
2440 TRANS(VSTRIHL, do_vstri, gen_helper_VSTRIHL)
2441 TRANS(VSTRIHR, do_vstri, gen_helper_VSTRIHR)
2443 static bool do_vclrb(DisasContext *ctx, arg_VX *a, bool right)
2445     TCGv_i64 rb, mh, ml, tmp,
2446              ones = tcg_constant_i64(-1),
2447              zero = tcg_constant_i64(0);
2449     rb = tcg_temp_new_i64();
2450     mh = tcg_temp_new_i64();
2451     ml = tcg_temp_new_i64();
2452     tmp = tcg_temp_new_i64();
2454     tcg_gen_extu_tl_i64(rb, cpu_gpr[a->vrb]);
2455     tcg_gen_andi_i64(tmp, rb, 7);
2456     tcg_gen_shli_i64(tmp, tmp, 3);
2457     if (right) {
2458         tcg_gen_shr_i64(tmp, ones, tmp);
2459     } else {
2460         tcg_gen_shl_i64(tmp, ones, tmp);
2461     }
2462     tcg_gen_not_i64(tmp, tmp);
2464     if (right) {
2465         tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
2466                             tmp, ones);
2467         tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
2468                             zero, tmp);
2469         tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(16),
2470                             ml, ones);
2471     } else {
2472         tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
2473                             tmp, ones);
2474         tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
2475                             zero, tmp);
2476         tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(16),
2477                             mh, ones);
2478     }
2480     get_avr64(tmp, a->vra, true);
2481     tcg_gen_and_i64(tmp, tmp, mh);
2482     set_avr64(a->vrt, tmp, true);
2484     get_avr64(tmp, a->vra, false);
2485     tcg_gen_and_i64(tmp, tmp, ml);
2486     set_avr64(a->vrt, tmp, false);
2488     tcg_temp_free_i64(rb);
2489     tcg_temp_free_i64(mh);
2490     tcg_temp_free_i64(ml);
2491     tcg_temp_free_i64(tmp);
2493     return true;
2496 TRANS(VCLRLB, do_vclrb, false)
2497 TRANS(VCLRRB, do_vclrb, true)
2499 #define GEN_VAFORM_PAIRED(name0, name1, opc2)                           \
2500 static void glue(gen_, name0##_##name1)(DisasContext *ctx)              \
2501     {                                                                   \
2502         TCGv_ptr ra, rb, rc, rd;                                        \
2503         if (unlikely(!ctx->altivec_enabled)) {                          \
2504             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
2505             return;                                                     \
2506         }                                                               \
2507         ra = gen_avr_ptr(rA(ctx->opcode));                              \
2508         rb = gen_avr_ptr(rB(ctx->opcode));                              \
2509         rc = gen_avr_ptr(rC(ctx->opcode));                              \
2510         rd = gen_avr_ptr(rD(ctx->opcode));                              \
2511         if (Rc(ctx->opcode)) {                                          \
2512             gen_helper_##name1(cpu_env, rd, ra, rb, rc);                \
2513         } else {                                                        \
2514             gen_helper_##name0(cpu_env, rd, ra, rb, rc);                \
2515         }                                                               \
2516         tcg_temp_free_ptr(ra);                                          \
2517         tcg_temp_free_ptr(rb);                                          \
2518         tcg_temp_free_ptr(rc);                                          \
2519         tcg_temp_free_ptr(rd);                                          \
2520     }
2522 GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
2524 static bool do_va_helper(DisasContext *ctx, arg_VA *a,
2525     void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2527     TCGv_ptr vrt, vra, vrb, vrc;
2528     REQUIRE_VECTOR(ctx);
2530     vrt = gen_avr_ptr(a->vrt);
2531     vra = gen_avr_ptr(a->vra);
2532     vrb = gen_avr_ptr(a->vrb);
2533     vrc = gen_avr_ptr(a->rc);
2534     gen_helper(vrt, vra, vrb, vrc);
2535     tcg_temp_free_ptr(vrt);
2536     tcg_temp_free_ptr(vra);
2537     tcg_temp_free_ptr(vrb);
2538     tcg_temp_free_ptr(vrc);
2540     return true;
2543 TRANS_FLAGS2(ALTIVEC_207, VADDECUQ, do_va_helper, gen_helper_VADDECUQ)
2544 TRANS_FLAGS2(ALTIVEC_207, VADDEUQM, do_va_helper, gen_helper_VADDEUQM)
2546 TRANS_FLAGS2(ALTIVEC_207, VSUBEUQM, do_va_helper, gen_helper_VSUBEUQM)
2547 TRANS_FLAGS2(ALTIVEC_207, VSUBECUQ, do_va_helper, gen_helper_VSUBECUQ)
2549 TRANS_FLAGS(ALTIVEC, VPERM, do_va_helper, gen_helper_VPERM)
2550 TRANS_FLAGS2(ISA300, VPERMR, do_va_helper, gen_helper_VPERMR)
2552 static void gen_vmladduhm_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
2553                               TCGv_vec c)
2555     tcg_gen_mul_vec(vece, t, a, b);
2556     tcg_gen_add_vec(vece, t, t, c);
2559 static bool trans_VMLADDUHM(DisasContext *ctx, arg_VA *a)
2561     static const TCGOpcode vecop_list[] = {
2562         INDEX_op_add_vec, INDEX_op_mul_vec, 0
2563     };
2565     static const GVecGen4 op = {
2566         .fno = gen_helper_VMLADDUHM,
2567         .fniv = gen_vmladduhm_vec,
2568         .opt_opc = vecop_list,
2569         .vece = MO_16
2570     };
2572     REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
2573     REQUIRE_VECTOR(ctx);
2575     tcg_gen_gvec_4(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2576                    avr_full_offset(a->vrb), avr_full_offset(a->rc),
2577                    16, 16, &op);
2579     return true;
2582 static bool trans_VSEL(DisasContext *ctx, arg_VA *a)
2584     REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
2585     REQUIRE_VECTOR(ctx);
2587     tcg_gen_gvec_bitsel(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->rc),
2588                         avr_full_offset(a->vrb), avr_full_offset(a->vra),
2589                         16, 16);
2591     return true;
2594 TRANS_FLAGS(ALTIVEC, VMSUMUBM, do_va_helper, gen_helper_VMSUMUBM)
2595 TRANS_FLAGS(ALTIVEC, VMSUMMBM, do_va_helper, gen_helper_VMSUMMBM)
2596 TRANS_FLAGS(ALTIVEC, VMSUMSHM, do_va_helper, gen_helper_VMSUMSHM)
2597 TRANS_FLAGS(ALTIVEC, VMSUMUHM, do_va_helper, gen_helper_VMSUMUHM)
2599 static bool do_va_env_helper(DisasContext *ctx, arg_VA *a,
2600     void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2602     TCGv_ptr vrt, vra, vrb, vrc;
2603     REQUIRE_VECTOR(ctx);
2605     vrt = gen_avr_ptr(a->vrt);
2606     vra = gen_avr_ptr(a->vra);
2607     vrb = gen_avr_ptr(a->vrb);
2608     vrc = gen_avr_ptr(a->rc);
2609     gen_helper(cpu_env, vrt, vra, vrb, vrc);
2610     tcg_temp_free_ptr(vrt);
2611     tcg_temp_free_ptr(vra);
2612     tcg_temp_free_ptr(vrb);
2613     tcg_temp_free_ptr(vrc);
2615     return true;
2618 TRANS_FLAGS(ALTIVEC, VMSUMUHS, do_va_env_helper, gen_helper_VMSUMUHS)
2619 TRANS_FLAGS(ALTIVEC, VMSUMSHS, do_va_env_helper, gen_helper_VMSUMSHS)
2621 TRANS_FLAGS(ALTIVEC, VMHADDSHS, do_va_env_helper, gen_helper_VMHADDSHS)
2622 TRANS_FLAGS(ALTIVEC, VMHRADDSHS, do_va_env_helper, gen_helper_VMHRADDSHS)
2624 GEN_VXFORM_NOA(vclzb, 1, 28)
2625 GEN_VXFORM_NOA(vclzh, 1, 29)
2626 GEN_VXFORM_TRANS(vclzw, 1, 30)
2627 GEN_VXFORM_TRANS(vclzd, 1, 31)
2628 GEN_VXFORM_NOA_2(vnegw, 1, 24, 6)
2629 GEN_VXFORM_NOA_2(vnegd, 1, 24, 7)
2631 static void gen_vexts_i64(TCGv_i64 t, TCGv_i64 b, int64_t s)
2633     tcg_gen_sextract_i64(t, b, 0, 64 - s);
2636 static void gen_vexts_i32(TCGv_i32 t, TCGv_i32 b, int32_t s)
2638     tcg_gen_sextract_i32(t, b, 0, 32 - s);
2641 static void gen_vexts_vec(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t s)
2643     tcg_gen_shli_vec(vece, t, b, s);
2644     tcg_gen_sari_vec(vece, t, t, s);
2647 static bool do_vexts(DisasContext *ctx, arg_VX_tb *a, unsigned vece, int64_t s)
2649     static const TCGOpcode vecop_list[] = {
2650         INDEX_op_shli_vec, INDEX_op_sari_vec, 0
2651     };
2653     static const GVecGen2i op[2] = {
2654         {
2655             .fni4 = gen_vexts_i32,
2656             .fniv = gen_vexts_vec,
2657             .opt_opc = vecop_list,
2658             .vece = MO_32
2659         },
2660         {
2661             .fni8 = gen_vexts_i64,
2662             .fniv = gen_vexts_vec,
2663             .opt_opc = vecop_list,
2664             .vece = MO_64
2665         },
2666     };
2668     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2669     REQUIRE_VECTOR(ctx);
2671     tcg_gen_gvec_2i(avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2672                     16, 16, s, &op[vece - MO_32]);
2674     return true;
2677 TRANS(VEXTSB2W, do_vexts, MO_32, 24);
2678 TRANS(VEXTSH2W, do_vexts, MO_32, 16);
2679 TRANS(VEXTSB2D, do_vexts, MO_64, 56);
2680 TRANS(VEXTSH2D, do_vexts, MO_64, 48);
2681 TRANS(VEXTSW2D, do_vexts, MO_64, 32);
2683 static bool trans_VEXTSD2Q(DisasContext *ctx, arg_VX_tb *a)
2685     TCGv_i64 tmp;
2687     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2688     REQUIRE_VECTOR(ctx);
2690     tmp = tcg_temp_new_i64();
2692     get_avr64(tmp, a->vrb, false);
2693     set_avr64(a->vrt, tmp, false);
2694     tcg_gen_sari_i64(tmp, tmp, 63);
2695     set_avr64(a->vrt, tmp, true);
2697     tcg_temp_free_i64(tmp);
2698     return true;
2701 GEN_VXFORM_NOA_2(vctzb, 1, 24, 28)
2702 GEN_VXFORM_NOA_2(vctzh, 1, 24, 29)
2703 GEN_VXFORM_NOA_2(vctzw, 1, 24, 30)
2704 GEN_VXFORM_NOA_2(vctzd, 1, 24, 31)
2705 GEN_VXFORM_NOA_3(vclzlsbb, 1, 24, 0)
2706 GEN_VXFORM_NOA_3(vctzlsbb, 1, 24, 1)
2707 GEN_VXFORM_NOA(vpopcntb, 1, 28)
2708 GEN_VXFORM_NOA(vpopcnth, 1, 29)
2709 GEN_VXFORM_NOA(vpopcntw, 1, 30)
2710 GEN_VXFORM_NOA(vpopcntd, 1, 31)
2711 GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \
2712                 vpopcntb, PPC_NONE, PPC2_ALTIVEC_207)
2713 GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \
2714                 vpopcnth, PPC_NONE, PPC2_ALTIVEC_207)
2715 GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \
2716                 vpopcntw, PPC_NONE, PPC2_ALTIVEC_207)
2717 GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \
2718                 vpopcntd, PPC_NONE, PPC2_ALTIVEC_207)
2719 GEN_VXFORM(vbpermd, 6, 23);
2720 GEN_VXFORM(vbpermq, 6, 21);
2721 GEN_VXFORM_TRANS(vgbbd, 6, 20);
2722 GEN_VXFORM(vpmsumb, 4, 16)
2723 GEN_VXFORM(vpmsumh, 4, 17)
2724 GEN_VXFORM(vpmsumw, 4, 18)
2726 #define GEN_BCD(op)                                 \
2727 static void gen_##op(DisasContext *ctx)             \
2728 {                                                   \
2729     TCGv_ptr ra, rb, rd;                            \
2730     TCGv_i32 ps;                                    \
2731                                                     \
2732     if (unlikely(!ctx->altivec_enabled)) {          \
2733         gen_exception(ctx, POWERPC_EXCP_VPU);       \
2734         return;                                     \
2735     }                                               \
2736                                                     \
2737     ra = gen_avr_ptr(rA(ctx->opcode));              \
2738     rb = gen_avr_ptr(rB(ctx->opcode));              \
2739     rd = gen_avr_ptr(rD(ctx->opcode));              \
2740                                                     \
2741     ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
2742                                                     \
2743     gen_helper_##op(cpu_crf[6], rd, ra, rb, ps);    \
2744                                                     \
2745     tcg_temp_free_ptr(ra);                          \
2746     tcg_temp_free_ptr(rb);                          \
2747     tcg_temp_free_ptr(rd);                          \
2748     tcg_temp_free_i32(ps);                          \
2751 #define GEN_BCD2(op)                                \
2752 static void gen_##op(DisasContext *ctx)             \
2753 {                                                   \
2754     TCGv_ptr rd, rb;                                \
2755     TCGv_i32 ps;                                    \
2756                                                     \
2757     if (unlikely(!ctx->altivec_enabled)) {          \
2758         gen_exception(ctx, POWERPC_EXCP_VPU);       \
2759         return;                                     \
2760     }                                               \
2761                                                     \
2762     rb = gen_avr_ptr(rB(ctx->opcode));              \
2763     rd = gen_avr_ptr(rD(ctx->opcode));              \
2764                                                     \
2765     ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
2766                                                     \
2767     gen_helper_##op(cpu_crf[6], rd, rb, ps);        \
2768                                                     \
2769     tcg_temp_free_ptr(rb);                          \
2770     tcg_temp_free_ptr(rd);                          \
2771     tcg_temp_free_i32(ps);                          \
2774 GEN_BCD(bcdadd)
2775 GEN_BCD(bcdsub)
2776 GEN_BCD2(bcdcfn)
2777 GEN_BCD2(bcdctn)
2778 GEN_BCD2(bcdcfz)
2779 GEN_BCD2(bcdctz)
2780 GEN_BCD2(bcdcfsq)
2781 GEN_BCD2(bcdctsq)
2782 GEN_BCD2(bcdsetsgn)
2783 GEN_BCD(bcdcpsgn);
2784 GEN_BCD(bcds);
2785 GEN_BCD(bcdus);
2786 GEN_BCD(bcdsr);
2787 GEN_BCD(bcdtrunc);
2788 GEN_BCD(bcdutrunc);
2790 static void gen_xpnd04_1(DisasContext *ctx)
2792     switch (opc4(ctx->opcode)) {
2793     case 0:
2794         gen_bcdctsq(ctx);
2795         break;
2796     case 2:
2797         gen_bcdcfsq(ctx);
2798         break;
2799     case 4:
2800         gen_bcdctz(ctx);
2801         break;
2802     case 5:
2803         gen_bcdctn(ctx);
2804         break;
2805     case 6:
2806         gen_bcdcfz(ctx);
2807         break;
2808     case 7:
2809         gen_bcdcfn(ctx);
2810         break;
2811     case 31:
2812         gen_bcdsetsgn(ctx);
2813         break;
2814     default:
2815         gen_invalid(ctx);
2816         break;
2817     }
2820 static void gen_xpnd04_2(DisasContext *ctx)
2822     switch (opc4(ctx->opcode)) {
2823     case 0:
2824         gen_bcdctsq(ctx);
2825         break;
2826     case 2:
2827         gen_bcdcfsq(ctx);
2828         break;
2829     case 4:
2830         gen_bcdctz(ctx);
2831         break;
2832     case 6:
2833         gen_bcdcfz(ctx);
2834         break;
2835     case 7:
2836         gen_bcdcfn(ctx);
2837         break;
2838     case 31:
2839         gen_bcdsetsgn(ctx);
2840         break;
2841     default:
2842         gen_invalid(ctx);
2843         break;
2844     }
2848 GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \
2849                 xpnd04_2, PPC_NONE, PPC2_ISA300)
2851 GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \
2852                 bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
2853 GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \
2854                 bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
2855 GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \
2856                 bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
2857 GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \
2858                 bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
2859 GEN_VXFORM_DUAL(vaddshs, PPC_ALTIVEC, PPC_NONE, \
2860                 bcdcpsgn, PPC_NONE, PPC2_ISA300)
2861 GEN_VXFORM_DUAL(vsubudm, PPC2_ALTIVEC_207, PPC_NONE, \
2862                 bcds, PPC_NONE, PPC2_ISA300)
2863 GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \
2864                 bcdus, PPC_NONE, PPC2_ISA300)
2865 GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \
2866                 bcdtrunc, PPC_NONE, PPC2_ISA300)
2868 static void gen_vsbox(DisasContext *ctx)
2870     TCGv_ptr ra, rd;
2871     if (unlikely(!ctx->altivec_enabled)) {
2872         gen_exception(ctx, POWERPC_EXCP_VPU);
2873         return;
2874     }
2875     ra = gen_avr_ptr(rA(ctx->opcode));
2876     rd = gen_avr_ptr(rD(ctx->opcode));
2877     gen_helper_vsbox(rd, ra);
2878     tcg_temp_free_ptr(ra);
2879     tcg_temp_free_ptr(rd);
2882 GEN_VXFORM(vcipher, 4, 20)
2883 GEN_VXFORM(vcipherlast, 4, 20)
2884 GEN_VXFORM(vncipher, 4, 21)
2885 GEN_VXFORM(vncipherlast, 4, 21)
2887 GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207,
2888                 vcipherlast, PPC_NONE, PPC2_ALTIVEC_207)
2889 GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207,
2890                 vncipherlast, PPC_NONE, PPC2_ALTIVEC_207)
2892 #define VSHASIGMA(op)                         \
2893 static void gen_##op(DisasContext *ctx)       \
2894 {                                             \
2895     TCGv_ptr ra, rd;                          \
2896     TCGv_i32 st_six;                          \
2897     if (unlikely(!ctx->altivec_enabled)) {    \
2898         gen_exception(ctx, POWERPC_EXCP_VPU); \
2899         return;                               \
2900     }                                         \
2901     ra = gen_avr_ptr(rA(ctx->opcode));        \
2902     rd = gen_avr_ptr(rD(ctx->opcode));        \
2903     st_six = tcg_const_i32(rB(ctx->opcode));  \
2904     gen_helper_##op(rd, ra, st_six);          \
2905     tcg_temp_free_ptr(ra);                    \
2906     tcg_temp_free_ptr(rd);                    \
2907     tcg_temp_free_i32(st_six);                \
2910 VSHASIGMA(vshasigmaw)
2911 VSHASIGMA(vshasigmad)
2913 GEN_VXFORM3(vpermxor, 22, 0xFF)
2914 GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
2915                 vpermxor, PPC_NONE, PPC2_ALTIVEC_207)
2917 static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a)
2919     static const GVecGen3 g = {
2920         .fni8 = gen_helper_CFUGED,
2921         .vece = MO_64,
2922     };
2924     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2925     REQUIRE_VECTOR(ctx);
2927     tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2928                    avr_full_offset(a->vrb), 16, 16, &g);
2930     return true;
2933 static bool trans_VCLZDM(DisasContext *ctx, arg_VX *a)
2935     static const GVecGen3i g = {
2936         .fni8 = do_cntzdm,
2937         .vece = MO_64,
2938     };
2940     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2941     REQUIRE_VECTOR(ctx);
2943     tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2944                     avr_full_offset(a->vrb), 16, 16, false, &g);
2946     return true;
2949 static bool trans_VCTZDM(DisasContext *ctx, arg_VX *a)
2951     static const GVecGen3i g = {
2952         .fni8 = do_cntzdm,
2953         .vece = MO_64,
2954     };
2956     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2957     REQUIRE_VECTOR(ctx);
2959     tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2960                     avr_full_offset(a->vrb), 16, 16, true, &g);
2962     return true;
2965 static bool trans_VPDEPD(DisasContext *ctx, arg_VX *a)
2967     static const GVecGen3 g = {
2968         .fni8 = gen_helper_PDEPD,
2969         .vece = MO_64,
2970     };
2972     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2973     REQUIRE_VECTOR(ctx);
2975     tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2976                    avr_full_offset(a->vrb), 16, 16, &g);
2978     return true;
2981 static bool trans_VPEXTD(DisasContext *ctx, arg_VX *a)
2983     static const GVecGen3 g = {
2984         .fni8 = gen_helper_PEXTD,
2985         .vece = MO_64,
2986     };
2988     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2989     REQUIRE_VECTOR(ctx);
2991     tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2992                    avr_full_offset(a->vrb), 16, 16, &g);
2994     return true;
2997 static bool trans_VMSUMUDM(DisasContext *ctx, arg_VA *a)
2999     TCGv_i64 rl, rh, src1, src2;
3000     int dw;
3002     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
3003     REQUIRE_VECTOR(ctx);
3005     rh = tcg_temp_new_i64();
3006     rl = tcg_temp_new_i64();
3007     src1 = tcg_temp_new_i64();
3008     src2 = tcg_temp_new_i64();
3010     get_avr64(rl, a->rc, false);
3011     get_avr64(rh, a->rc, true);
3013     for (dw = 0; dw < 2; dw++) {
3014         get_avr64(src1, a->vra, dw);
3015         get_avr64(src2, a->vrb, dw);
3016         tcg_gen_mulu2_i64(src1, src2, src1, src2);
3017         tcg_gen_add2_i64(rl, rh, rl, rh, src1, src2);
3018     }
3020     set_avr64(a->vrt, rl, false);
3021     set_avr64(a->vrt, rh, true);
3023     tcg_temp_free_i64(rl);
3024     tcg_temp_free_i64(rh);
3025     tcg_temp_free_i64(src1);
3026     tcg_temp_free_i64(src2);
3028     return true;
3031 static bool trans_VMSUMCUD(DisasContext *ctx, arg_VA *a)
3033     TCGv_i64 tmp0, tmp1, prod1h, prod1l, prod0h, prod0l, zero;
3035     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3036     REQUIRE_VECTOR(ctx);
3038     tmp0 = tcg_temp_new_i64();
3039     tmp1 = tcg_temp_new_i64();
3040     prod1h = tcg_temp_new_i64();
3041     prod1l = tcg_temp_new_i64();
3042     prod0h = tcg_temp_new_i64();
3043     prod0l = tcg_temp_new_i64();
3044     zero = tcg_constant_i64(0);
3046     /* prod1 = vsr[vra+32].dw[1] * vsr[vrb+32].dw[1] */
3047     get_avr64(tmp0, a->vra, false);
3048     get_avr64(tmp1, a->vrb, false);
3049     tcg_gen_mulu2_i64(prod1l, prod1h, tmp0, tmp1);
3051     /* prod0 = vsr[vra+32].dw[0] * vsr[vrb+32].dw[0] */
3052     get_avr64(tmp0, a->vra, true);
3053     get_avr64(tmp1, a->vrb, true);
3054     tcg_gen_mulu2_i64(prod0l, prod0h, tmp0, tmp1);
3056     /* Sum lower 64-bits elements */
3057     get_avr64(tmp1, a->rc, false);
3058     tcg_gen_add2_i64(tmp1, tmp0, tmp1, zero, prod1l, zero);
3059     tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0l, zero);
3061     /*
3062      * Discard lower 64-bits, leaving the carry into bit 64.
3063      * Then sum the higher 64-bit elements.
3064      */
3065     get_avr64(tmp1, a->rc, true);
3066     tcg_gen_add2_i64(tmp1, tmp0, tmp0, zero, tmp1, zero);
3067     tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod1h, zero);
3068     tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0h, zero);
3070     /* Discard 64 more bits to complete the CHOP128(temp >> 128) */
3071     set_avr64(a->vrt, tmp0, false);
3072     set_avr64(a->vrt, zero, true);
3074     tcg_temp_free_i64(tmp0);
3075     tcg_temp_free_i64(tmp1);
3076     tcg_temp_free_i64(prod1h);
3077     tcg_temp_free_i64(prod1l);
3078     tcg_temp_free_i64(prod0h);
3079     tcg_temp_free_i64(prod0l);
3081     return true;
3084 static bool do_vx_helper(DisasContext *ctx, arg_VX *a,
3085                          void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr))
3087     TCGv_ptr ra, rb, rd;
3088     REQUIRE_VECTOR(ctx);
3090     ra = gen_avr_ptr(a->vra);
3091     rb = gen_avr_ptr(a->vrb);
3092     rd = gen_avr_ptr(a->vrt);
3093     gen_helper(rd, ra, rb);
3094     tcg_temp_free_ptr(ra);
3095     tcg_temp_free_ptr(rb);
3096     tcg_temp_free_ptr(rd);
3098     return true;
3101 TRANS_FLAGS2(ALTIVEC_207, VADDCUQ, do_vx_helper, gen_helper_VADDCUQ)
3102 TRANS_FLAGS2(ALTIVEC_207, VADDUQM, do_vx_helper, gen_helper_VADDUQM)
3104 TRANS_FLAGS2(ALTIVEC_207, VPMSUMD, do_vx_helper, gen_helper_VPMSUMD)
3106 TRANS_FLAGS2(ALTIVEC_207, VSUBCUQ, do_vx_helper, gen_helper_VSUBCUQ)
3107 TRANS_FLAGS2(ALTIVEC_207, VSUBUQM, do_vx_helper, gen_helper_VSUBUQM)
3109 static void gen_VADDCUW_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3111     tcg_gen_not_vec(vece, a, a);
3112     tcg_gen_cmp_vec(TCG_COND_LTU, vece, t, a, b);
3113     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(t, vece, 1));
3116 static void gen_VADDCUW_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3118     tcg_gen_not_i32(a, a);
3119     tcg_gen_setcond_i32(TCG_COND_LTU, t, a, b);
3122 static void gen_VSUBCUW_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
3124     tcg_gen_cmp_vec(TCG_COND_GEU, vece, t, a, b);
3125     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(t, vece, 1));
3128 static void gen_VSUBCUW_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3130     tcg_gen_setcond_i32(TCG_COND_GEU, t, a, b);
3133 static bool do_vx_vaddsubcuw(DisasContext *ctx, arg_VX *a, int add)
3135     static const TCGOpcode vecop_list[] = {
3136         INDEX_op_cmp_vec, 0
3137     };
3139     static const GVecGen3 op[] = {
3140         {
3141             .fniv = gen_VSUBCUW_vec,
3142             .fni4 = gen_VSUBCUW_i32,
3143             .opt_opc = vecop_list,
3144             .vece = MO_32
3145         },
3146         {
3147             .fniv = gen_VADDCUW_vec,
3148             .fni4 = gen_VADDCUW_i32,
3149             .opt_opc = vecop_list,
3150             .vece = MO_32
3151         },
3152     };
3154     REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
3155     REQUIRE_VECTOR(ctx);
3157     tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
3158                    avr_full_offset(a->vrb), 16, 16, &op[add]);
3160     return true;
3163 TRANS(VSUBCUW, do_vx_vaddsubcuw, 0)
3164 TRANS(VADDCUW, do_vx_vaddsubcuw, 1)
3166 static bool do_vx_vmuleo(DisasContext *ctx, arg_VX *a, bool even,
3167                          void (*gen_mul)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
3169     TCGv_i64 vra, vrb, vrt0, vrt1;
3170     REQUIRE_VECTOR(ctx);
3172     vra = tcg_temp_new_i64();
3173     vrb = tcg_temp_new_i64();
3174     vrt0 = tcg_temp_new_i64();
3175     vrt1 = tcg_temp_new_i64();
3177     get_avr64(vra, a->vra, even);
3178     get_avr64(vrb, a->vrb, even);
3179     gen_mul(vrt0, vrt1, vra, vrb);
3180     set_avr64(a->vrt, vrt0, false);
3181     set_avr64(a->vrt, vrt1, true);
3183     tcg_temp_free_i64(vra);
3184     tcg_temp_free_i64(vrb);
3185     tcg_temp_free_i64(vrt0);
3186     tcg_temp_free_i64(vrt1);
3188     return true;
3191 static bool trans_VMULLD(DisasContext *ctx, arg_VX *a)
3193     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3194     REQUIRE_VECTOR(ctx);
3196     tcg_gen_gvec_mul(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->vra),
3197                      avr_full_offset(a->vrb), 16, 16);
3199     return true;
3202 TRANS_FLAGS(ALTIVEC, VMULESB, do_vx_helper, gen_helper_VMULESB)
3203 TRANS_FLAGS(ALTIVEC, VMULOSB, do_vx_helper, gen_helper_VMULOSB)
3204 TRANS_FLAGS(ALTIVEC, VMULEUB, do_vx_helper, gen_helper_VMULEUB)
3205 TRANS_FLAGS(ALTIVEC, VMULOUB, do_vx_helper, gen_helper_VMULOUB)
3206 TRANS_FLAGS(ALTIVEC, VMULESH, do_vx_helper, gen_helper_VMULESH)
3207 TRANS_FLAGS(ALTIVEC, VMULOSH, do_vx_helper, gen_helper_VMULOSH)
3208 TRANS_FLAGS(ALTIVEC, VMULEUH, do_vx_helper, gen_helper_VMULEUH)
3209 TRANS_FLAGS(ALTIVEC, VMULOUH, do_vx_helper, gen_helper_VMULOUH)
3210 TRANS_FLAGS2(ALTIVEC_207, VMULESW, do_vx_helper, gen_helper_VMULESW)
3211 TRANS_FLAGS2(ALTIVEC_207, VMULOSW, do_vx_helper, gen_helper_VMULOSW)
3212 TRANS_FLAGS2(ALTIVEC_207, VMULEUW, do_vx_helper, gen_helper_VMULEUW)
3213 TRANS_FLAGS2(ALTIVEC_207, VMULOUW, do_vx_helper, gen_helper_VMULOUW)
3214 TRANS_FLAGS2(ISA310, VMULESD, do_vx_vmuleo, true , tcg_gen_muls2_i64)
3215 TRANS_FLAGS2(ISA310, VMULOSD, do_vx_vmuleo, false, tcg_gen_muls2_i64)
3216 TRANS_FLAGS2(ISA310, VMULEUD, do_vx_vmuleo, true , tcg_gen_mulu2_i64)
3217 TRANS_FLAGS2(ISA310, VMULOUD, do_vx_vmuleo, false, tcg_gen_mulu2_i64)
3219 static void do_vx_vmulhw_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
3221     TCGv_i64 hh, lh, temp;
3223     hh = tcg_temp_new_i64();
3224     lh = tcg_temp_new_i64();
3225     temp = tcg_temp_new_i64();
3227     if (sign) {
3228         tcg_gen_ext32s_i64(lh, a);
3229         tcg_gen_ext32s_i64(temp, b);
3230     } else {
3231         tcg_gen_ext32u_i64(lh, a);
3232         tcg_gen_ext32u_i64(temp, b);
3233     }
3234     tcg_gen_mul_i64(lh, lh, temp);
3236     if (sign) {
3237         tcg_gen_sari_i64(hh, a, 32);
3238         tcg_gen_sari_i64(temp, b, 32);
3239     } else {
3240         tcg_gen_shri_i64(hh, a, 32);
3241         tcg_gen_shri_i64(temp, b, 32);
3242     }
3243     tcg_gen_mul_i64(hh, hh, temp);
3245     tcg_gen_shri_i64(lh, lh, 32);
3246     tcg_gen_deposit_i64(t, hh, lh, 0, 32);
3248     tcg_temp_free_i64(hh);
3249     tcg_temp_free_i64(lh);
3250     tcg_temp_free_i64(temp);
3253 static void do_vx_vmulhd_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
3255     TCGv_i64 tlow;
3257     tlow  = tcg_temp_new_i64();
3258     if (sign) {
3259         tcg_gen_muls2_i64(tlow, t, a, b);
3260     } else {
3261         tcg_gen_mulu2_i64(tlow, t, a, b);
3262     }
3264     tcg_temp_free_i64(tlow);
3267 static bool do_vx_mulh(DisasContext *ctx, arg_VX *a, bool sign,
3268                        void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, bool))
3270     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3271     REQUIRE_VECTOR(ctx);
3273     TCGv_i64 vra, vrb, vrt;
3274     int i;
3276     vra = tcg_temp_new_i64();
3277     vrb = tcg_temp_new_i64();
3278     vrt = tcg_temp_new_i64();
3280     for (i = 0; i < 2; i++) {
3281         get_avr64(vra, a->vra, i);
3282         get_avr64(vrb, a->vrb, i);
3283         get_avr64(vrt, a->vrt, i);
3285         func(vrt, vra, vrb, sign);
3287         set_avr64(a->vrt, vrt, i);
3288     }
3290     tcg_temp_free_i64(vra);
3291     tcg_temp_free_i64(vrb);
3292     tcg_temp_free_i64(vrt);
3294     return true;
3298 TRANS(VMULHSW, do_vx_mulh, true , do_vx_vmulhw_i64)
3299 TRANS(VMULHSD, do_vx_mulh, true , do_vx_vmulhd_i64)
3300 TRANS(VMULHUW, do_vx_mulh, false, do_vx_vmulhw_i64)
3301 TRANS(VMULHUD, do_vx_mulh, false, do_vx_vmulhd_i64)
3303 static bool do_vdiv_vmod(DisasContext *ctx, arg_VX *a, const int vece,
3304                          void (*func_32)(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b),
3305                          void (*func_64)(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b))
3307     const GVecGen3 op = {
3308         .fni4 = func_32,
3309         .fni8 = func_64,
3310         .vece = vece
3311     };
3313     REQUIRE_VECTOR(ctx);
3315     tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
3316                    avr_full_offset(a->vrb), 16, 16, &op);
3318     return true;
3321 #define DIVU32(NAME, DIV)                                               \
3322 static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)                    \
3323 {                                                                       \
3324     TCGv_i32 zero = tcg_constant_i32(0);                                \
3325     TCGv_i32 one = tcg_constant_i32(1);                                 \
3326     tcg_gen_movcond_i32(TCG_COND_EQ, b, b, zero, one, b);               \
3327     DIV(t, a, b);                                                       \
3330 #define DIVS32(NAME, DIV)                                               \
3331 static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)                    \
3332 {                                                                       \
3333     TCGv_i32 t0 = tcg_temp_new_i32();                                   \
3334     TCGv_i32 t1 = tcg_temp_new_i32();                                   \
3335     tcg_gen_setcondi_i32(TCG_COND_EQ, t0, a, INT32_MIN);                \
3336     tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, -1);                       \
3337     tcg_gen_and_i32(t0, t0, t1);                                        \
3338     tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, 0);                        \
3339     tcg_gen_or_i32(t0, t0, t1);                                         \
3340     tcg_gen_movi_i32(t1, 0);                                            \
3341     tcg_gen_movcond_i32(TCG_COND_NE, b, t0, t1, t0, b);                 \
3342     DIV(t, a, b);                                                       \
3343     tcg_temp_free_i32(t0);                                              \
3344     tcg_temp_free_i32(t1);                                              \
3347 #define DIVU64(NAME, DIV)                                               \
3348 static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)                    \
3349 {                                                                       \
3350     TCGv_i64 zero = tcg_constant_i64(0);                                \
3351     TCGv_i64 one = tcg_constant_i64(1);                                 \
3352     tcg_gen_movcond_i64(TCG_COND_EQ, b, b, zero, one, b);               \
3353     DIV(t, a, b);                                                       \
3356 #define DIVS64(NAME, DIV)                                               \
3357 static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)                    \
3358 {                                                                       \
3359     TCGv_i64 t0 = tcg_temp_new_i64();                                   \
3360     TCGv_i64 t1 = tcg_temp_new_i64();                                   \
3361     tcg_gen_setcondi_i64(TCG_COND_EQ, t0, a, INT64_MIN);                \
3362     tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, -1);                       \
3363     tcg_gen_and_i64(t0, t0, t1);                                        \
3364     tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, 0);                        \
3365     tcg_gen_or_i64(t0, t0, t1);                                         \
3366     tcg_gen_movi_i64(t1, 0);                                            \
3367     tcg_gen_movcond_i64(TCG_COND_NE, b, t0, t1, t0, b);                 \
3368     DIV(t, a, b);                                                       \
3369     tcg_temp_free_i64(t0);                                              \
3370     tcg_temp_free_i64(t1);                                              \
3373 DIVS32(do_divsw, tcg_gen_div_i32)
3374 DIVU32(do_divuw, tcg_gen_divu_i32)
3375 DIVS64(do_divsd, tcg_gen_div_i64)
3376 DIVU64(do_divud, tcg_gen_divu_i64)
3378 TRANS_FLAGS2(ISA310, VDIVSW, do_vdiv_vmod, MO_32, do_divsw, NULL)
3379 TRANS_FLAGS2(ISA310, VDIVUW, do_vdiv_vmod, MO_32, do_divuw, NULL)
3380 TRANS_FLAGS2(ISA310, VDIVSD, do_vdiv_vmod, MO_64, NULL, do_divsd)
3381 TRANS_FLAGS2(ISA310, VDIVUD, do_vdiv_vmod, MO_64, NULL, do_divud)
3382 TRANS_FLAGS2(ISA310, VDIVSQ, do_vx_helper, gen_helper_VDIVSQ)
3383 TRANS_FLAGS2(ISA310, VDIVUQ, do_vx_helper, gen_helper_VDIVUQ)
3385 static void do_dives_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3387     TCGv_i64 val1, val2;
3389     val1 = tcg_temp_new_i64();
3390     val2 = tcg_temp_new_i64();
3392     tcg_gen_ext_i32_i64(val1, a);
3393     tcg_gen_ext_i32_i64(val2, b);
3395     /* (a << 32)/b */
3396     tcg_gen_shli_i64(val1, val1, 32);
3397     tcg_gen_div_i64(val1, val1, val2);
3399     /* if quotient doesn't fit in 32 bits the result is undefined */
3400     tcg_gen_extrl_i64_i32(t, val1);
3402     tcg_temp_free_i64(val1);
3403     tcg_temp_free_i64(val2);
3406 static void do_diveu_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3408     TCGv_i64 val1, val2;
3410     val1 = tcg_temp_new_i64();
3411     val2 = tcg_temp_new_i64();
3413     tcg_gen_extu_i32_i64(val1, a);
3414     tcg_gen_extu_i32_i64(val2, b);
3416     /* (a << 32)/b */
3417     tcg_gen_shli_i64(val1, val1, 32);
3418     tcg_gen_divu_i64(val1, val1, val2);
3420     /* if quotient doesn't fit in 32 bits the result is undefined */
3421     tcg_gen_extrl_i64_i32(t, val1);
3423     tcg_temp_free_i64(val1);
3424     tcg_temp_free_i64(val2);
3427 DIVS32(do_divesw, do_dives_i32)
3428 DIVU32(do_diveuw, do_diveu_i32)
3430 DIVS32(do_modsw, tcg_gen_rem_i32)
3431 DIVU32(do_moduw, tcg_gen_remu_i32)
3432 DIVS64(do_modsd, tcg_gen_rem_i64)
3433 DIVU64(do_modud, tcg_gen_remu_i64)
3435 TRANS_FLAGS2(ISA310, VDIVESW, do_vdiv_vmod, MO_32, do_divesw, NULL)
3436 TRANS_FLAGS2(ISA310, VDIVEUW, do_vdiv_vmod, MO_32, do_diveuw, NULL)
3437 TRANS_FLAGS2(ISA310, VDIVESD, do_vx_helper, gen_helper_VDIVESD)
3438 TRANS_FLAGS2(ISA310, VDIVEUD, do_vx_helper, gen_helper_VDIVEUD)
3439 TRANS_FLAGS2(ISA310, VDIVESQ, do_vx_helper, gen_helper_VDIVESQ)
3440 TRANS_FLAGS2(ISA310, VDIVEUQ, do_vx_helper, gen_helper_VDIVEUQ)
3442 TRANS_FLAGS2(ISA310, VMODSW, do_vdiv_vmod, MO_32, do_modsw , NULL)
3443 TRANS_FLAGS2(ISA310, VMODUW, do_vdiv_vmod, MO_32, do_moduw, NULL)
3444 TRANS_FLAGS2(ISA310, VMODSD, do_vdiv_vmod, MO_64, NULL, do_modsd)
3445 TRANS_FLAGS2(ISA310, VMODUD, do_vdiv_vmod, MO_64, NULL, do_modud)
3446 TRANS_FLAGS2(ISA310, VMODSQ, do_vx_helper, gen_helper_VMODSQ)
3447 TRANS_FLAGS2(ISA310, VMODUQ, do_vx_helper, gen_helper_VMODUQ)
3449 #undef DIVS32
3450 #undef DIVU32
3451 #undef DIVS64
3452 #undef DIVU64
3454 #undef GEN_VR_LDX
3455 #undef GEN_VR_STX
3456 #undef GEN_VR_LVE
3457 #undef GEN_VR_STVE
3459 #undef GEN_VX_LOGICAL
3460 #undef GEN_VX_LOGICAL_207
3461 #undef GEN_VXFORM
3462 #undef GEN_VXFORM_207
3463 #undef GEN_VXFORM_DUAL
3464 #undef GEN_VXRFORM_DUAL
3465 #undef GEN_VXRFORM1
3466 #undef GEN_VXRFORM
3467 #undef GEN_VXFORM_VSPLTI
3468 #undef GEN_VXFORM_NOA
3469 #undef GEN_VXFORM_UIMM
3470 #undef GEN_VAFORM_PAIRED
3472 #undef GEN_BCD2