target/ppc: use int128.h methods in vsubcuq
[qemu/kevin.git] / target / ppc / translate / vmx-impl.c.inc
blobe644ad3236e468c3f59e162a90483e9f691c3d0a
1 /*
2  * translate/vmx-impl.c
3  *
4  * Altivec/VMX translation
5  */
7 /***                      Altivec vector extension                         ***/
8 /* Altivec registers moves */
10 static inline TCGv_ptr gen_avr_ptr(int reg)
12     TCGv_ptr r = tcg_temp_new_ptr();
13     tcg_gen_addi_ptr(r, cpu_env, avr_full_offset(reg));
14     return r;
17 #define GEN_VR_LDX(name, opc2, opc3)                                          \
18 static void glue(gen_, name)(DisasContext *ctx)                               \
19 {                                                                             \
20     TCGv EA;                                                                  \
21     TCGv_i64 avr;                                                             \
22     if (unlikely(!ctx->altivec_enabled)) {                                    \
23         gen_exception(ctx, POWERPC_EXCP_VPU);                                 \
24         return;                                                               \
25     }                                                                         \
26     gen_set_access_type(ctx, ACCESS_INT);                                     \
27     avr = tcg_temp_new_i64();                                                 \
28     EA = tcg_temp_new();                                                      \
29     gen_addr_reg_index(ctx, EA);                                              \
30     tcg_gen_andi_tl(EA, EA, ~0xf);                                            \
31     /*                                                                        \
32      * We only need to swap high and low halves. gen_qemu_ld64_i64            \
33      * does necessary 64-bit byteswap already.                                \
34      */                                                                       \
35     if (ctx->le_mode) {                                                       \
36         gen_qemu_ld64_i64(ctx, avr, EA);                                      \
37         set_avr64(rD(ctx->opcode), avr, false);                               \
38         tcg_gen_addi_tl(EA, EA, 8);                                           \
39         gen_qemu_ld64_i64(ctx, avr, EA);                                      \
40         set_avr64(rD(ctx->opcode), avr, true);                                \
41     } else {                                                                  \
42         gen_qemu_ld64_i64(ctx, avr, EA);                                      \
43         set_avr64(rD(ctx->opcode), avr, true);                                \
44         tcg_gen_addi_tl(EA, EA, 8);                                           \
45         gen_qemu_ld64_i64(ctx, avr, EA);                                      \
46         set_avr64(rD(ctx->opcode), avr, false);                               \
47     }                                                                         \
48     tcg_temp_free(EA);                                                        \
49     tcg_temp_free_i64(avr);                                                   \
52 #define GEN_VR_STX(name, opc2, opc3)                                          \
53 static void gen_st##name(DisasContext *ctx)                                   \
54 {                                                                             \
55     TCGv EA;                                                                  \
56     TCGv_i64 avr;                                                             \
57     if (unlikely(!ctx->altivec_enabled)) {                                    \
58         gen_exception(ctx, POWERPC_EXCP_VPU);                                 \
59         return;                                                               \
60     }                                                                         \
61     gen_set_access_type(ctx, ACCESS_INT);                                     \
62     avr = tcg_temp_new_i64();                                                 \
63     EA = tcg_temp_new();                                                      \
64     gen_addr_reg_index(ctx, EA);                                              \
65     tcg_gen_andi_tl(EA, EA, ~0xf);                                            \
66     /*                                                                        \
67      * We only need to swap high and low halves. gen_qemu_st64_i64            \
68      * does necessary 64-bit byteswap already.                                \
69      */                                                                       \
70     if (ctx->le_mode) {                                                       \
71         get_avr64(avr, rD(ctx->opcode), false);                               \
72         gen_qemu_st64_i64(ctx, avr, EA);                                      \
73         tcg_gen_addi_tl(EA, EA, 8);                                           \
74         get_avr64(avr, rD(ctx->opcode), true);                                \
75         gen_qemu_st64_i64(ctx, avr, EA);                                      \
76     } else {                                                                  \
77         get_avr64(avr, rD(ctx->opcode), true);                                \
78         gen_qemu_st64_i64(ctx, avr, EA);                                      \
79         tcg_gen_addi_tl(EA, EA, 8);                                           \
80         get_avr64(avr, rD(ctx->opcode), false);                               \
81         gen_qemu_st64_i64(ctx, avr, EA);                                      \
82     }                                                                         \
83     tcg_temp_free(EA);                                                        \
84     tcg_temp_free_i64(avr);                                                   \
87 #define GEN_VR_LVE(name, opc2, opc3, size)                              \
88 static void gen_lve##name(DisasContext *ctx)                            \
89     {                                                                   \
90         TCGv EA;                                                        \
91         TCGv_ptr rs;                                                    \
92         if (unlikely(!ctx->altivec_enabled)) {                          \
93             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
94             return;                                                     \
95         }                                                               \
96         gen_set_access_type(ctx, ACCESS_INT);                           \
97         EA = tcg_temp_new();                                            \
98         gen_addr_reg_index(ctx, EA);                                    \
99         if (size > 1) {                                                 \
100             tcg_gen_andi_tl(EA, EA, ~(size - 1));                       \
101         }                                                               \
102         rs = gen_avr_ptr(rS(ctx->opcode));                              \
103         gen_helper_lve##name(cpu_env, rs, EA);                          \
104         tcg_temp_free(EA);                                              \
105         tcg_temp_free_ptr(rs);                                          \
106     }
108 #define GEN_VR_STVE(name, opc2, opc3, size)                             \
109 static void gen_stve##name(DisasContext *ctx)                           \
110     {                                                                   \
111         TCGv EA;                                                        \
112         TCGv_ptr rs;                                                    \
113         if (unlikely(!ctx->altivec_enabled)) {                          \
114             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
115             return;                                                     \
116         }                                                               \
117         gen_set_access_type(ctx, ACCESS_INT);                           \
118         EA = tcg_temp_new();                                            \
119         gen_addr_reg_index(ctx, EA);                                    \
120         if (size > 1) {                                                 \
121             tcg_gen_andi_tl(EA, EA, ~(size - 1));                       \
122         }                                                               \
123         rs = gen_avr_ptr(rS(ctx->opcode));                              \
124         gen_helper_stve##name(cpu_env, rs, EA);                         \
125         tcg_temp_free(EA);                                              \
126         tcg_temp_free_ptr(rs);                                          \
127     }
129 GEN_VR_LDX(lvx, 0x07, 0x03);
130 /* As we don't emulate the cache, lvxl is stricly equivalent to lvx */
131 GEN_VR_LDX(lvxl, 0x07, 0x0B);
133 GEN_VR_LVE(bx, 0x07, 0x00, 1);
134 GEN_VR_LVE(hx, 0x07, 0x01, 2);
135 GEN_VR_LVE(wx, 0x07, 0x02, 4);
137 GEN_VR_STX(svx, 0x07, 0x07);
138 /* As we don't emulate the cache, stvxl is stricly equivalent to stvx */
139 GEN_VR_STX(svxl, 0x07, 0x0F);
141 GEN_VR_STVE(bx, 0x07, 0x04, 1);
142 GEN_VR_STVE(hx, 0x07, 0x05, 2);
143 GEN_VR_STVE(wx, 0x07, 0x06, 4);
145 static void gen_mfvscr(DisasContext *ctx)
147     TCGv_i32 t;
148     TCGv_i64 avr;
149     if (unlikely(!ctx->altivec_enabled)) {
150         gen_exception(ctx, POWERPC_EXCP_VPU);
151         return;
152     }
153     avr = tcg_temp_new_i64();
154     tcg_gen_movi_i64(avr, 0);
155     set_avr64(rD(ctx->opcode), avr, true);
156     t = tcg_temp_new_i32();
157     gen_helper_mfvscr(t, cpu_env);
158     tcg_gen_extu_i32_i64(avr, t);
159     set_avr64(rD(ctx->opcode), avr, false);
160     tcg_temp_free_i32(t);
161     tcg_temp_free_i64(avr);
164 static void gen_mtvscr(DisasContext *ctx)
166     TCGv_i32 val;
167     int bofs;
169     if (unlikely(!ctx->altivec_enabled)) {
170         gen_exception(ctx, POWERPC_EXCP_VPU);
171         return;
172     }
174     val = tcg_temp_new_i32();
175     bofs = avr_full_offset(rB(ctx->opcode));
176 #if HOST_BIG_ENDIAN
177     bofs += 3 * 4;
178 #endif
180     tcg_gen_ld_i32(val, cpu_env, bofs);
181     gen_helper_mtvscr(cpu_env, val);
182     tcg_temp_free_i32(val);
185 #define GEN_VX_VMUL10(name, add_cin, ret_carry)                         \
186 static void glue(gen_, name)(DisasContext *ctx)                         \
187 {                                                                       \
188     TCGv_i64 t0;                                                        \
189     TCGv_i64 t1;                                                        \
190     TCGv_i64 t2;                                                        \
191     TCGv_i64 avr;                                                       \
192     TCGv_i64 ten, z;                                                    \
193                                                                         \
194     if (unlikely(!ctx->altivec_enabled)) {                              \
195         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
196         return;                                                         \
197     }                                                                   \
198                                                                         \
199     t0 = tcg_temp_new_i64();                                            \
200     t1 = tcg_temp_new_i64();                                            \
201     t2 = tcg_temp_new_i64();                                            \
202     avr = tcg_temp_new_i64();                                           \
203     ten = tcg_const_i64(10);                                            \
204     z = tcg_const_i64(0);                                               \
205                                                                         \
206     if (add_cin) {                                                      \
207         get_avr64(avr, rA(ctx->opcode), false);                         \
208         tcg_gen_mulu2_i64(t0, t1, avr, ten);                            \
209         get_avr64(avr, rB(ctx->opcode), false);                         \
210         tcg_gen_andi_i64(t2, avr, 0xF);                                 \
211         tcg_gen_add2_i64(avr, t2, t0, t1, t2, z);                       \
212         set_avr64(rD(ctx->opcode), avr, false);                         \
213     } else {                                                            \
214         get_avr64(avr, rA(ctx->opcode), false);                         \
215         tcg_gen_mulu2_i64(avr, t2, avr, ten);                           \
216         set_avr64(rD(ctx->opcode), avr, false);                         \
217     }                                                                   \
218                                                                         \
219     if (ret_carry) {                                                    \
220         get_avr64(avr, rA(ctx->opcode), true);                          \
221         tcg_gen_mulu2_i64(t0, t1, avr, ten);                            \
222         tcg_gen_add2_i64(t0, avr, t0, t1, t2, z);                       \
223         set_avr64(rD(ctx->opcode), avr, false);                         \
224         set_avr64(rD(ctx->opcode), z, true);                            \
225     } else {                                                            \
226         get_avr64(avr, rA(ctx->opcode), true);                          \
227         tcg_gen_mul_i64(t0, avr, ten);                                  \
228         tcg_gen_add_i64(avr, t0, t2);                                   \
229         set_avr64(rD(ctx->opcode), avr, true);                          \
230     }                                                                   \
231                                                                         \
232     tcg_temp_free_i64(t0);                                              \
233     tcg_temp_free_i64(t1);                                              \
234     tcg_temp_free_i64(t2);                                              \
235     tcg_temp_free_i64(avr);                                             \
236     tcg_temp_free_i64(ten);                                             \
237     tcg_temp_free_i64(z);                                               \
238 }                                                                       \
240 GEN_VX_VMUL10(vmul10uq, 0, 0);
241 GEN_VX_VMUL10(vmul10euq, 1, 0);
242 GEN_VX_VMUL10(vmul10cuq, 0, 1);
243 GEN_VX_VMUL10(vmul10ecuq, 1, 1);
245 #define GEN_VXFORM_V(name, vece, tcg_op, opc2, opc3)                    \
246 static void glue(gen_, name)(DisasContext *ctx)                         \
247 {                                                                       \
248     if (unlikely(!ctx->altivec_enabled)) {                              \
249         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
250         return;                                                         \
251     }                                                                   \
252                                                                         \
253     tcg_op(vece,                                                        \
254            avr_full_offset(rD(ctx->opcode)),                            \
255            avr_full_offset(rA(ctx->opcode)),                            \
256            avr_full_offset(rB(ctx->opcode)),                            \
257            16, 16);                                                     \
260 /* Logical operations */
261 GEN_VXFORM_V(vand, MO_64, tcg_gen_gvec_and, 2, 16);
262 GEN_VXFORM_V(vandc, MO_64, tcg_gen_gvec_andc, 2, 17);
263 GEN_VXFORM_V(vor, MO_64, tcg_gen_gvec_or, 2, 18);
264 GEN_VXFORM_V(vxor, MO_64, tcg_gen_gvec_xor, 2, 19);
265 GEN_VXFORM_V(vnor, MO_64, tcg_gen_gvec_nor, 2, 20);
266 GEN_VXFORM_V(veqv, MO_64, tcg_gen_gvec_eqv, 2, 26);
267 GEN_VXFORM_V(vnand, MO_64, tcg_gen_gvec_nand, 2, 22);
268 GEN_VXFORM_V(vorc, MO_64, tcg_gen_gvec_orc, 2, 21);
270 #define GEN_VXFORM(name, opc2, opc3)                                    \
271 static void glue(gen_, name)(DisasContext *ctx)                         \
272 {                                                                       \
273     TCGv_ptr ra, rb, rd;                                                \
274     if (unlikely(!ctx->altivec_enabled)) {                              \
275         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
276         return;                                                         \
277     }                                                                   \
278     ra = gen_avr_ptr(rA(ctx->opcode));                                  \
279     rb = gen_avr_ptr(rB(ctx->opcode));                                  \
280     rd = gen_avr_ptr(rD(ctx->opcode));                                  \
281     gen_helper_##name(rd, ra, rb);                                      \
282     tcg_temp_free_ptr(ra);                                              \
283     tcg_temp_free_ptr(rb);                                              \
284     tcg_temp_free_ptr(rd);                                              \
287 #define GEN_VXFORM_TRANS(name, opc2, opc3)                              \
288 static void glue(gen_, name)(DisasContext *ctx)                         \
289 {                                                                       \
290     if (unlikely(!ctx->altivec_enabled)) {                              \
291         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
292         return;                                                         \
293     }                                                                   \
294     trans_##name(ctx);                                                  \
297 #define GEN_VXFORM_ENV(name, opc2, opc3)                                \
298 static void glue(gen_, name)(DisasContext *ctx)                         \
299 {                                                                       \
300     TCGv_ptr ra, rb, rd;                                                \
301     if (unlikely(!ctx->altivec_enabled)) {                              \
302         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
303         return;                                                         \
304     }                                                                   \
305     ra = gen_avr_ptr(rA(ctx->opcode));                                  \
306     rb = gen_avr_ptr(rB(ctx->opcode));                                  \
307     rd = gen_avr_ptr(rD(ctx->opcode));                                  \
308     gen_helper_##name(cpu_env, rd, ra, rb);                             \
309     tcg_temp_free_ptr(ra);                                              \
310     tcg_temp_free_ptr(rb);                                              \
311     tcg_temp_free_ptr(rd);                                              \
314 #define GEN_VXFORM3(name, opc2, opc3)                                   \
315 static void glue(gen_, name)(DisasContext *ctx)                         \
316 {                                                                       \
317     TCGv_ptr ra, rb, rc, rd;                                            \
318     if (unlikely(!ctx->altivec_enabled)) {                              \
319         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
320         return;                                                         \
321     }                                                                   \
322     ra = gen_avr_ptr(rA(ctx->opcode));                                  \
323     rb = gen_avr_ptr(rB(ctx->opcode));                                  \
324     rc = gen_avr_ptr(rC(ctx->opcode));                                  \
325     rd = gen_avr_ptr(rD(ctx->opcode));                                  \
326     gen_helper_##name(rd, ra, rb, rc);                                  \
327     tcg_temp_free_ptr(ra);                                              \
328     tcg_temp_free_ptr(rb);                                              \
329     tcg_temp_free_ptr(rc);                                              \
330     tcg_temp_free_ptr(rd);                                              \
334  * Support for Altivec instruction pairs that use bit 31 (Rc) as
335  * an opcode bit.  In general, these pairs come from different
336  * versions of the ISA, so we must also support a pair of flags for
337  * each instruction.
338  */
339 #define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)          \
340 static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
341 {                                                                      \
342     if ((Rc(ctx->opcode) == 0) &&                                      \
343         ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
344         gen_##name0(ctx);                                              \
345     } else if ((Rc(ctx->opcode) == 1) &&                               \
346         ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
347         gen_##name1(ctx);                                              \
348     } else {                                                           \
349         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
350     }                                                                  \
354  * We use this macro if one instruction is realized with direct
355  * translation, and second one with helper.
356  */
357 #define GEN_VXFORM_TRANS_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)\
358 static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
359 {                                                                      \
360     if ((Rc(ctx->opcode) == 0) &&                                      \
361         ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
362         if (unlikely(!ctx->altivec_enabled)) {                         \
363             gen_exception(ctx, POWERPC_EXCP_VPU);                      \
364             return;                                                    \
365         }                                                              \
366         trans_##name0(ctx);                                            \
367     } else if ((Rc(ctx->opcode) == 1) &&                               \
368         ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
369         gen_##name1(ctx);                                              \
370     } else {                                                           \
371         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
372     }                                                                  \
375 /* Adds support to provide invalid mask */
376 #define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0,                \
377                             name1, flg1, flg2_1, inval1)                \
378 static void glue(gen_, name0##_##name1)(DisasContext *ctx)              \
379 {                                                                       \
380     if ((Rc(ctx->opcode) == 0) &&                                       \
381         ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0)) &&  \
382         !(ctx->opcode & inval0)) {                                      \
383         gen_##name0(ctx);                                               \
384     } else if ((Rc(ctx->opcode) == 1) &&                                \
385                ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1)) && \
386                !(ctx->opcode & inval1)) {                               \
387         gen_##name1(ctx);                                               \
388     } else {                                                            \
389         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);             \
390     }                                                                   \
393 #define GEN_VXFORM_HETRO(name, opc2, opc3)                              \
394 static void glue(gen_, name)(DisasContext *ctx)                         \
395 {                                                                       \
396     TCGv_ptr rb;                                                        \
397     if (unlikely(!ctx->altivec_enabled)) {                              \
398         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
399         return;                                                         \
400     }                                                                   \
401     rb = gen_avr_ptr(rB(ctx->opcode));                                  \
402     gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \
403     tcg_temp_free_ptr(rb);                                              \
406 GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0);
407 GEN_VXFORM_DUAL_EXT(vaddubm, PPC_ALTIVEC, PPC_NONE, 0,       \
408                     vmul10cuq, PPC_NONE, PPC2_ISA300, 0x0000F800)
409 GEN_VXFORM_V(vadduhm, MO_16, tcg_gen_gvec_add, 0, 1);
410 GEN_VXFORM_DUAL(vadduhm, PPC_ALTIVEC, PPC_NONE,  \
411                 vmul10ecuq, PPC_NONE, PPC2_ISA300)
412 GEN_VXFORM_V(vadduwm, MO_32, tcg_gen_gvec_add, 0, 2);
413 GEN_VXFORM_V(vaddudm, MO_64, tcg_gen_gvec_add, 0, 3);
414 GEN_VXFORM_V(vsububm, MO_8, tcg_gen_gvec_sub, 0, 16);
415 GEN_VXFORM_V(vsubuhm, MO_16, tcg_gen_gvec_sub, 0, 17);
416 GEN_VXFORM_V(vsubuwm, MO_32, tcg_gen_gvec_sub, 0, 18);
417 GEN_VXFORM_V(vsubudm, MO_64, tcg_gen_gvec_sub, 0, 19);
418 GEN_VXFORM_V(vmaxub, MO_8, tcg_gen_gvec_umax, 1, 0);
419 GEN_VXFORM_V(vmaxuh, MO_16, tcg_gen_gvec_umax, 1, 1);
420 GEN_VXFORM_V(vmaxuw, MO_32, tcg_gen_gvec_umax, 1, 2);
421 GEN_VXFORM_V(vmaxud, MO_64, tcg_gen_gvec_umax, 1, 3);
422 GEN_VXFORM_V(vmaxsb, MO_8, tcg_gen_gvec_smax, 1, 4);
423 GEN_VXFORM_V(vmaxsh, MO_16, tcg_gen_gvec_smax, 1, 5);
424 GEN_VXFORM_V(vmaxsw, MO_32, tcg_gen_gvec_smax, 1, 6);
425 GEN_VXFORM_V(vmaxsd, MO_64, tcg_gen_gvec_smax, 1, 7);
426 GEN_VXFORM_V(vminub, MO_8, tcg_gen_gvec_umin, 1, 8);
427 GEN_VXFORM_V(vminuh, MO_16, tcg_gen_gvec_umin, 1, 9);
428 GEN_VXFORM_V(vminuw, MO_32, tcg_gen_gvec_umin, 1, 10);
429 GEN_VXFORM_V(vminud, MO_64, tcg_gen_gvec_umin, 1, 11);
430 GEN_VXFORM_V(vminsb, MO_8, tcg_gen_gvec_smin, 1, 12);
431 GEN_VXFORM_V(vminsh, MO_16, tcg_gen_gvec_smin, 1, 13);
432 GEN_VXFORM_V(vminsw, MO_32, tcg_gen_gvec_smin, 1, 14);
433 GEN_VXFORM_V(vminsd, MO_64, tcg_gen_gvec_smin, 1, 15);
434 GEN_VXFORM(vavgub, 1, 16);
435 GEN_VXFORM(vabsdub, 1, 16);
436 GEN_VXFORM_DUAL(vavgub, PPC_ALTIVEC, PPC_NONE, \
437                 vabsdub, PPC_NONE, PPC2_ISA300)
438 GEN_VXFORM(vavguh, 1, 17);
439 GEN_VXFORM(vabsduh, 1, 17);
440 GEN_VXFORM_DUAL(vavguh, PPC_ALTIVEC, PPC_NONE, \
441                 vabsduh, PPC_NONE, PPC2_ISA300)
442 GEN_VXFORM(vavguw, 1, 18);
443 GEN_VXFORM(vabsduw, 1, 18);
444 GEN_VXFORM_DUAL(vavguw, PPC_ALTIVEC, PPC_NONE, \
445                 vabsduw, PPC_NONE, PPC2_ISA300)
446 GEN_VXFORM(vavgsb, 1, 20);
447 GEN_VXFORM(vavgsh, 1, 21);
448 GEN_VXFORM(vavgsw, 1, 22);
449 GEN_VXFORM(vmrghb, 6, 0);
450 GEN_VXFORM(vmrghh, 6, 1);
451 GEN_VXFORM(vmrghw, 6, 2);
452 GEN_VXFORM(vmrglb, 6, 4);
453 GEN_VXFORM(vmrglh, 6, 5);
454 GEN_VXFORM(vmrglw, 6, 6);
456 static void trans_vmrgew(DisasContext *ctx)
458     int VT = rD(ctx->opcode);
459     int VA = rA(ctx->opcode);
460     int VB = rB(ctx->opcode);
461     TCGv_i64 tmp = tcg_temp_new_i64();
462     TCGv_i64 avr = tcg_temp_new_i64();
464     get_avr64(avr, VB, true);
465     tcg_gen_shri_i64(tmp, avr, 32);
466     get_avr64(avr, VA, true);
467     tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
468     set_avr64(VT, avr, true);
470     get_avr64(avr, VB, false);
471     tcg_gen_shri_i64(tmp, avr, 32);
472     get_avr64(avr, VA, false);
473     tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
474     set_avr64(VT, avr, false);
476     tcg_temp_free_i64(tmp);
477     tcg_temp_free_i64(avr);
480 static void trans_vmrgow(DisasContext *ctx)
482     int VT = rD(ctx->opcode);
483     int VA = rA(ctx->opcode);
484     int VB = rB(ctx->opcode);
485     TCGv_i64 t0 = tcg_temp_new_i64();
486     TCGv_i64 t1 = tcg_temp_new_i64();
487     TCGv_i64 avr = tcg_temp_new_i64();
489     get_avr64(t0, VB, true);
490     get_avr64(t1, VA, true);
491     tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
492     set_avr64(VT, avr, true);
494     get_avr64(t0, VB, false);
495     get_avr64(t1, VA, false);
496     tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
497     set_avr64(VT, avr, false);
499     tcg_temp_free_i64(t0);
500     tcg_temp_free_i64(t1);
501     tcg_temp_free_i64(avr);
505  * lvsl VRT,RA,RB - Load Vector for Shift Left
507  * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
508  * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
509  * Bytes sh:sh+15 of X are placed into vD.
510  */
511 static void trans_lvsl(DisasContext *ctx)
513     int VT = rD(ctx->opcode);
514     TCGv_i64 result = tcg_temp_new_i64();
515     TCGv_i64 sh = tcg_temp_new_i64();
516     TCGv EA = tcg_temp_new();
518     /* Get sh(from description) by anding EA with 0xf. */
519     gen_addr_reg_index(ctx, EA);
520     tcg_gen_extu_tl_i64(sh, EA);
521     tcg_gen_andi_i64(sh, sh, 0xfULL);
523     /*
524      * Create bytes sh:sh+7 of X(from description) and place them in
525      * higher doubleword of vD.
526      */
527     tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
528     tcg_gen_addi_i64(result, sh, 0x0001020304050607ull);
529     set_avr64(VT, result, true);
530     /*
531      * Create bytes sh+8:sh+15 of X(from description) and place them in
532      * lower doubleword of vD.
533      */
534     tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL);
535     set_avr64(VT, result, false);
537     tcg_temp_free_i64(result);
538     tcg_temp_free_i64(sh);
539     tcg_temp_free(EA);
543  * lvsr VRT,RA,RB - Load Vector for Shift Right
545  * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
546  * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
547  * Bytes (16-sh):(31-sh) of X are placed into vD.
548  */
549 static void trans_lvsr(DisasContext *ctx)
551     int VT = rD(ctx->opcode);
552     TCGv_i64 result = tcg_temp_new_i64();
553     TCGv_i64 sh = tcg_temp_new_i64();
554     TCGv EA = tcg_temp_new();
557     /* Get sh(from description) by anding EA with 0xf. */
558     gen_addr_reg_index(ctx, EA);
559     tcg_gen_extu_tl_i64(sh, EA);
560     tcg_gen_andi_i64(sh, sh, 0xfULL);
562     /*
563      * Create bytes (16-sh):(23-sh) of X(from description) and place them in
564      * higher doubleword of vD.
565      */
566     tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
567     tcg_gen_subfi_i64(result, 0x1011121314151617ULL, sh);
568     set_avr64(VT, result, true);
569     /*
570      * Create bytes (24-sh):(32-sh) of X(from description) and place them in
571      * lower doubleword of vD.
572      */
573     tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh);
574     set_avr64(VT, result, false);
576     tcg_temp_free_i64(result);
577     tcg_temp_free_i64(sh);
578     tcg_temp_free(EA);
582  * vsl VRT,VRA,VRB - Vector Shift Left
584  * Shifting left 128 bit value of vA by value specified in bits 125-127 of vB.
585  * Lowest 3 bits in each byte element of register vB must be identical or
586  * result is undefined.
587  */
588 static void trans_vsl(DisasContext *ctx)
590     int VT = rD(ctx->opcode);
591     int VA = rA(ctx->opcode);
592     int VB = rB(ctx->opcode);
593     TCGv_i64 avr = tcg_temp_new_i64();
594     TCGv_i64 sh = tcg_temp_new_i64();
595     TCGv_i64 carry = tcg_temp_new_i64();
596     TCGv_i64 tmp = tcg_temp_new_i64();
598     /* Place bits 125-127 of vB in 'sh'. */
599     get_avr64(avr, VB, false);
600     tcg_gen_andi_i64(sh, avr, 0x07ULL);
602     /*
603      * Save highest 'sh' bits of lower doubleword element of vA in variable
604      * 'carry' and perform shift on lower doubleword.
605      */
606     get_avr64(avr, VA, false);
607     tcg_gen_subfi_i64(tmp, 32, sh);
608     tcg_gen_shri_i64(carry, avr, 32);
609     tcg_gen_shr_i64(carry, carry, tmp);
610     tcg_gen_shl_i64(avr, avr, sh);
611     set_avr64(VT, avr, false);
613     /*
614      * Perform shift on higher doubleword element of vA and replace lowest
615      * 'sh' bits with 'carry'.
616      */
617     get_avr64(avr, VA, true);
618     tcg_gen_shl_i64(avr, avr, sh);
619     tcg_gen_or_i64(avr, avr, carry);
620     set_avr64(VT, avr, true);
622     tcg_temp_free_i64(avr);
623     tcg_temp_free_i64(sh);
624     tcg_temp_free_i64(carry);
625     tcg_temp_free_i64(tmp);
629  * vsr VRT,VRA,VRB - Vector Shift Right
631  * Shifting right 128 bit value of vA by value specified in bits 125-127 of vB.
632  * Lowest 3 bits in each byte element of register vB must be identical or
633  * result is undefined.
634  */
635 static void trans_vsr(DisasContext *ctx)
637     int VT = rD(ctx->opcode);
638     int VA = rA(ctx->opcode);
639     int VB = rB(ctx->opcode);
640     TCGv_i64 avr = tcg_temp_new_i64();
641     TCGv_i64 sh = tcg_temp_new_i64();
642     TCGv_i64 carry = tcg_temp_new_i64();
643     TCGv_i64 tmp = tcg_temp_new_i64();
645     /* Place bits 125-127 of vB in 'sh'. */
646     get_avr64(avr, VB, false);
647     tcg_gen_andi_i64(sh, avr, 0x07ULL);
649     /*
650      * Save lowest 'sh' bits of higher doubleword element of vA in variable
651      * 'carry' and perform shift on higher doubleword.
652      */
653     get_avr64(avr, VA, true);
654     tcg_gen_subfi_i64(tmp, 32, sh);
655     tcg_gen_shli_i64(carry, avr, 32);
656     tcg_gen_shl_i64(carry, carry, tmp);
657     tcg_gen_shr_i64(avr, avr, sh);
658     set_avr64(VT, avr, true);
659     /*
660      * Perform shift on lower doubleword element of vA and replace highest
661      * 'sh' bits with 'carry'.
662      */
663     get_avr64(avr, VA, false);
664     tcg_gen_shr_i64(avr, avr, sh);
665     tcg_gen_or_i64(avr, avr, carry);
666     set_avr64(VT, avr, false);
668     tcg_temp_free_i64(avr);
669     tcg_temp_free_i64(sh);
670     tcg_temp_free_i64(carry);
671     tcg_temp_free_i64(tmp);
675  * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword
677  * All ith bits (i in range 1 to 8) of each byte of doubleword element in source
678  * register are concatenated and placed into ith byte of appropriate doubleword
679  * element in destination register.
681  * Following solution is done for both doubleword elements of source register
682  * in parallel, in order to reduce the number of instructions needed(that's why
683  * arrays are used):
684  * First, both doubleword elements of source register vB are placed in
685  * appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for
686  * loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of
687  * byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with
688  * tcg_mask. For every following iteration, both avr[i] and tcg_mask variables
689  * have to be shifted right for 7 and 8 places, respectively, in order to get
690  * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so
691  * shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask...
692  * After first 8 iteration(first loop), all the first bits are in their final
693  * places, all second bits but second bit from eight byte are in their places...
694  * only 1 eight bit from eight byte is in it's place). In second loop we do all
695  * operations symmetrically, in order to get other half of bits in their final
696  * spots. Results for first and second doubleword elements are saved in
697  * result[0] and result[1] respectively. In the end those results are saved in
698  * appropriate doubleword element of destination register vD.
699  */
700 static void trans_vgbbd(DisasContext *ctx)
702     int VT = rD(ctx->opcode);
703     int VB = rB(ctx->opcode);
704     TCGv_i64 tmp = tcg_temp_new_i64();
705     uint64_t mask = 0x8040201008040201ULL;
706     int i, j;
708     TCGv_i64 result[2];
709     result[0] = tcg_temp_new_i64();
710     result[1] = tcg_temp_new_i64();
711     TCGv_i64 avr[2];
712     avr[0] = tcg_temp_new_i64();
713     avr[1] = tcg_temp_new_i64();
714     TCGv_i64 tcg_mask = tcg_temp_new_i64();
716     tcg_gen_movi_i64(tcg_mask, mask);
717     for (j = 0; j < 2; j++) {
718         get_avr64(avr[j], VB, j);
719         tcg_gen_and_i64(result[j], avr[j], tcg_mask);
720     }
721     for (i = 1; i < 8; i++) {
722         tcg_gen_movi_i64(tcg_mask, mask >> (i * 8));
723         for (j = 0; j < 2; j++) {
724             tcg_gen_shri_i64(tmp, avr[j], i * 7);
725             tcg_gen_and_i64(tmp, tmp, tcg_mask);
726             tcg_gen_or_i64(result[j], result[j], tmp);
727         }
728     }
729     for (i = 1; i < 8; i++) {
730         tcg_gen_movi_i64(tcg_mask, mask << (i * 8));
731         for (j = 0; j < 2; j++) {
732             tcg_gen_shli_i64(tmp, avr[j], i * 7);
733             tcg_gen_and_i64(tmp, tmp, tcg_mask);
734             tcg_gen_or_i64(result[j], result[j], tmp);
735         }
736     }
737     for (j = 0; j < 2; j++) {
738         set_avr64(VT, result[j], j);
739     }
741     tcg_temp_free_i64(tmp);
742     tcg_temp_free_i64(tcg_mask);
743     tcg_temp_free_i64(result[0]);
744     tcg_temp_free_i64(result[1]);
745     tcg_temp_free_i64(avr[0]);
746     tcg_temp_free_i64(avr[1]);
750  * vclzw VRT,VRB - Vector Count Leading Zeros Word
752  * Counting the number of leading zero bits of each word element in source
753  * register and placing result in appropriate word element of destination
754  * register.
755  */
756 static void trans_vclzw(DisasContext *ctx)
758     int VT = rD(ctx->opcode);
759     int VB = rB(ctx->opcode);
760     TCGv_i32 tmp = tcg_temp_new_i32();
761     int i;
763     /* Perform count for every word element using tcg_gen_clzi_i32. */
764     for (i = 0; i < 4; i++) {
765         tcg_gen_ld_i32(tmp, cpu_env,
766             offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4);
767         tcg_gen_clzi_i32(tmp, tmp, 32);
768         tcg_gen_st_i32(tmp, cpu_env,
769             offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4);
770     }
772     tcg_temp_free_i32(tmp);
776  * vclzd VRT,VRB - Vector Count Leading Zeros Doubleword
778  * Counting the number of leading zero bits of each doubleword element in source
779  * register and placing result in appropriate doubleword element of destination
780  * register.
781  */
782 static void trans_vclzd(DisasContext *ctx)
784     int VT = rD(ctx->opcode);
785     int VB = rB(ctx->opcode);
786     TCGv_i64 avr = tcg_temp_new_i64();
788     /* high doubleword */
789     get_avr64(avr, VB, true);
790     tcg_gen_clzi_i64(avr, avr, 64);
791     set_avr64(VT, avr, true);
793     /* low doubleword */
794     get_avr64(avr, VB, false);
795     tcg_gen_clzi_i64(avr, avr, 64);
796     set_avr64(VT, avr, false);
798     tcg_temp_free_i64(avr);
801 GEN_VXFORM_V(vmuluwm, MO_32, tcg_gen_gvec_mul, 4, 2);
802 GEN_VXFORM(vsrv, 2, 28);
803 GEN_VXFORM(vslv, 2, 29);
804 GEN_VXFORM(vslo, 6, 16);
805 GEN_VXFORM(vsro, 6, 17);
806 GEN_VXFORM(vaddcuw, 0, 6);
807 GEN_VXFORM(vsubcuw, 0, 22);
809 static bool do_vector_gvec3_VX(DisasContext *ctx, arg_VX *a, int vece,
810                                void (*gen_gvec)(unsigned, uint32_t, uint32_t,
811                                                 uint32_t, uint32_t, uint32_t))
813     REQUIRE_VECTOR(ctx);
815     gen_gvec(vece, avr_full_offset(a->vrt), avr_full_offset(a->vra),
816              avr_full_offset(a->vrb), 16, 16);
818     return true;
821 TRANS_FLAGS(ALTIVEC, VSLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shlv);
822 TRANS_FLAGS(ALTIVEC, VSLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shlv);
823 TRANS_FLAGS(ALTIVEC, VSLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shlv);
824 TRANS_FLAGS2(ALTIVEC_207, VSLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shlv);
826 TRANS_FLAGS(ALTIVEC, VSRB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shrv);
827 TRANS_FLAGS(ALTIVEC, VSRH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shrv);
828 TRANS_FLAGS(ALTIVEC, VSRW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shrv);
829 TRANS_FLAGS2(ALTIVEC_207, VSRD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shrv);
831 TRANS_FLAGS(ALTIVEC, VSRAB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_sarv);
832 TRANS_FLAGS(ALTIVEC, VSRAH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_sarv);
833 TRANS_FLAGS(ALTIVEC, VSRAW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_sarv);
834 TRANS_FLAGS2(ALTIVEC_207, VSRAD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_sarv);
836 TRANS_FLAGS(ALTIVEC, VRLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_rotlv)
837 TRANS_FLAGS(ALTIVEC, VRLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_rotlv)
838 TRANS_FLAGS(ALTIVEC, VRLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_rotlv)
839 TRANS_FLAGS2(ALTIVEC_207, VRLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_rotlv)
841 static TCGv_vec do_vrl_mask_vec(unsigned vece, TCGv_vec vrb)
843     TCGv_vec t0 = tcg_temp_new_vec_matching(vrb),
844              t1 = tcg_temp_new_vec_matching(vrb),
845              t2 = tcg_temp_new_vec_matching(vrb),
846              ones = tcg_constant_vec_matching(vrb, vece, -1);
848     /* Extract b and e */
849     tcg_gen_dupi_vec(vece, t2, (8 << vece) - 1);
851     tcg_gen_shri_vec(vece, t0, vrb, 16);
852     tcg_gen_and_vec(vece, t0, t0, t2);
854     tcg_gen_shri_vec(vece, t1, vrb, 8);
855     tcg_gen_and_vec(vece, t1, t1, t2);
857     /* Compare b and e to negate the mask where begin > end */
858     tcg_gen_cmp_vec(TCG_COND_GT, vece, t2, t0, t1);
860     /* Create the mask with (~0 >> b) ^ ((~0 >> e) >> 1) */
861     tcg_gen_shrv_vec(vece, t0, ones, t0);
862     tcg_gen_shrv_vec(vece, t1, ones, t1);
863     tcg_gen_shri_vec(vece, t1, t1, 1);
864     tcg_gen_xor_vec(vece, t0, t0, t1);
866     /* negate the mask */
867     tcg_gen_xor_vec(vece, t0, t0, t2);
869     tcg_temp_free_vec(t1);
870     tcg_temp_free_vec(t2);
872     return t0;
875 static void gen_vrlnm_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
876                           TCGv_vec vrb)
878     TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt);
880     /* Create the mask */
881     mask = do_vrl_mask_vec(vece, vrb);
883     /* Extract n */
884     tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
885     tcg_gen_and_vec(vece, n, vrb, n);
887     /* Rotate and mask */
888     tcg_gen_rotlv_vec(vece, vrt, vra, n);
889     tcg_gen_and_vec(vece, vrt, vrt, mask);
891     tcg_temp_free_vec(n);
892     tcg_temp_free_vec(mask);
895 static bool do_vrlnm(DisasContext *ctx, arg_VX *a, int vece)
897     static const TCGOpcode vecop_list[] = {
898         INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
899         INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
900     };
901     static const GVecGen3 ops[2] = {
902         {
903             .fniv = gen_vrlnm_vec,
904             .fno = gen_helper_VRLWNM,
905             .opt_opc = vecop_list,
906             .load_dest = true,
907             .vece = MO_32
908         },
909         {
910             .fniv = gen_vrlnm_vec,
911             .fno = gen_helper_VRLDNM,
912             .opt_opc = vecop_list,
913             .load_dest = true,
914             .vece = MO_64
915         }
916     };
918     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
919     REQUIRE_VSX(ctx);
921     tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
922                    avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
924     return true;
927 TRANS(VRLWNM, do_vrlnm, MO_32)
928 TRANS(VRLDNM, do_vrlnm, MO_64)
930 static void gen_vrlmi_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
931                           TCGv_vec vrb)
933     TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt),
934              tmp = tcg_temp_new_vec_matching(vrt);
936     /* Create the mask */
937     mask = do_vrl_mask_vec(vece, vrb);
939     /* Extract n */
940     tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
941     tcg_gen_and_vec(vece, n, vrb, n);
943     /* Rotate and insert */
944     tcg_gen_rotlv_vec(vece, tmp, vra, n);
945     tcg_gen_bitsel_vec(vece, vrt, mask, tmp, vrt);
947     tcg_temp_free_vec(n);
948     tcg_temp_free_vec(tmp);
949     tcg_temp_free_vec(mask);
952 static bool do_vrlmi(DisasContext *ctx, arg_VX *a, int vece)
954     static const TCGOpcode vecop_list[] = {
955         INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
956         INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
957     };
958     static const GVecGen3 ops[2] = {
959         {
960             .fniv = gen_vrlmi_vec,
961             .fno = gen_helper_VRLWMI,
962             .opt_opc = vecop_list,
963             .load_dest = true,
964             .vece = MO_32
965         },
966         {
967             .fniv = gen_vrlnm_vec,
968             .fno = gen_helper_VRLDMI,
969             .opt_opc = vecop_list,
970             .load_dest = true,
971             .vece = MO_64
972         }
973     };
975     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
976     REQUIRE_VSX(ctx);
978     tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
979                    avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
981     return true;
984 TRANS(VRLWMI, do_vrlmi, MO_32)
985 TRANS(VRLDMI, do_vrlmi, MO_64)
987 static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right,
988                                  bool alg)
990     TCGv_i64 hi, lo, t0, t1, n, zero = tcg_constant_i64(0);
992     REQUIRE_VECTOR(ctx);
994     n = tcg_temp_new_i64();
995     hi = tcg_temp_new_i64();
996     lo = tcg_temp_new_i64();
997     t0 = tcg_temp_new_i64();
998     t1 = tcg_const_i64(0);
1000     get_avr64(lo, a->vra, false);
1001     get_avr64(hi, a->vra, true);
1003     get_avr64(n, a->vrb, true);
1005     tcg_gen_andi_i64(t0, n, 64);
1006     if (right) {
1007         tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, hi, lo);
1008         if (alg) {
1009             tcg_gen_sari_i64(t1, lo, 63);
1010         }
1011         tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, t1, hi);
1012     } else {
1013         tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, lo, hi);
1014         tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, zero, lo);
1015     }
1016     tcg_gen_andi_i64(n, n, 0x3F);
1018     if (right) {
1019         if (alg) {
1020             tcg_gen_sar_i64(t0, hi, n);
1021         } else {
1022             tcg_gen_shr_i64(t0, hi, n);
1023         }
1024     } else {
1025         tcg_gen_shl_i64(t0, lo, n);
1026     }
1027     set_avr64(a->vrt, t0, right);
1029     if (right) {
1030         tcg_gen_shr_i64(lo, lo, n);
1031     } else {
1032         tcg_gen_shl_i64(hi, hi, n);
1033     }
1034     tcg_gen_xori_i64(n, n, 63);
1035     if (right) {
1036         tcg_gen_shl_i64(hi, hi, n);
1037         tcg_gen_shli_i64(hi, hi, 1);
1038     } else {
1039         tcg_gen_shr_i64(lo, lo, n);
1040         tcg_gen_shri_i64(lo, lo, 1);
1041     }
1042     tcg_gen_or_i64(hi, hi, lo);
1043     set_avr64(a->vrt, hi, !right);
1045     tcg_temp_free_i64(hi);
1046     tcg_temp_free_i64(lo);
1047     tcg_temp_free_i64(t0);
1048     tcg_temp_free_i64(t1);
1049     tcg_temp_free_i64(n);
1051     return true;
1054 TRANS_FLAGS2(ISA310, VSLQ, do_vector_shift_quad, false, false);
1055 TRANS_FLAGS2(ISA310, VSRQ, do_vector_shift_quad, true, false);
1056 TRANS_FLAGS2(ISA310, VSRAQ, do_vector_shift_quad, true, true);
1058 static void do_vrlq_mask(TCGv_i64 mh, TCGv_i64 ml, TCGv_i64 b, TCGv_i64 e)
1060     TCGv_i64 th, tl, t0, t1, zero = tcg_constant_i64(0),
1061              ones = tcg_constant_i64(-1);
1063     th = tcg_temp_new_i64();
1064     tl = tcg_temp_new_i64();
1065     t0 = tcg_temp_new_i64();
1066     t1 = tcg_temp_new_i64();
1068     /* m = ~0 >> b */
1069     tcg_gen_andi_i64(t0, b, 64);
1070     tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
1071     tcg_gen_andi_i64(t0, b, 0x3F);
1072     tcg_gen_shr_i64(mh, t1, t0);
1073     tcg_gen_shr_i64(ml, ones, t0);
1074     tcg_gen_xori_i64(t0, t0, 63);
1075     tcg_gen_shl_i64(t1, t1, t0);
1076     tcg_gen_shli_i64(t1, t1, 1);
1077     tcg_gen_or_i64(ml, t1, ml);
1079     /* t = ~0 >> e */
1080     tcg_gen_andi_i64(t0, e, 64);
1081     tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
1082     tcg_gen_andi_i64(t0, e, 0x3F);
1083     tcg_gen_shr_i64(th, t1, t0);
1084     tcg_gen_shr_i64(tl, ones, t0);
1085     tcg_gen_xori_i64(t0, t0, 63);
1086     tcg_gen_shl_i64(t1, t1, t0);
1087     tcg_gen_shli_i64(t1, t1, 1);
1088     tcg_gen_or_i64(tl, t1, tl);
1090     /* t = t >> 1 */
1091     tcg_gen_extract2_i64(tl, tl, th, 1);
1092     tcg_gen_shri_i64(th, th, 1);
1094     /* m = m ^ t */
1095     tcg_gen_xor_i64(mh, mh, th);
1096     tcg_gen_xor_i64(ml, ml, tl);
1098     /* Negate the mask if begin > end */
1099     tcg_gen_movcond_i64(TCG_COND_GT, t0, b, e, ones, zero);
1101     tcg_gen_xor_i64(mh, mh, t0);
1102     tcg_gen_xor_i64(ml, ml, t0);
1104     tcg_temp_free_i64(th);
1105     tcg_temp_free_i64(tl);
1106     tcg_temp_free_i64(t0);
1107     tcg_temp_free_i64(t1);
1110 static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask,
1111                                 bool insert)
1113     TCGv_i64 ah, al, vrb, n, t0, t1, zero = tcg_constant_i64(0);
1115     REQUIRE_VECTOR(ctx);
1116     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1118     ah = tcg_temp_new_i64();
1119     al = tcg_temp_new_i64();
1120     vrb = tcg_temp_new_i64();
1121     n = tcg_temp_new_i64();
1122     t0 = tcg_temp_new_i64();
1123     t1 = tcg_temp_new_i64();
1125     get_avr64(ah, a->vra, true);
1126     get_avr64(al, a->vra, false);
1127     get_avr64(vrb, a->vrb, true);
1129     tcg_gen_mov_i64(t0, ah);
1130     tcg_gen_andi_i64(t1, vrb, 64);
1131     tcg_gen_movcond_i64(TCG_COND_NE, ah, t1, zero, al, ah);
1132     tcg_gen_movcond_i64(TCG_COND_NE, al, t1, zero, t0, al);
1133     tcg_gen_andi_i64(n, vrb, 0x3F);
1135     tcg_gen_shl_i64(t0, ah, n);
1136     tcg_gen_shl_i64(t1, al, n);
1138     tcg_gen_xori_i64(n, n, 63);
1140     tcg_gen_shr_i64(al, al, n);
1141     tcg_gen_shri_i64(al, al, 1);
1142     tcg_gen_or_i64(t0, al, t0);
1144     tcg_gen_shr_i64(ah, ah, n);
1145     tcg_gen_shri_i64(ah, ah, 1);
1146     tcg_gen_or_i64(t1, ah, t1);
1148     if (mask || insert) {
1149         tcg_gen_extract_i64(n, vrb, 8, 7);
1150         tcg_gen_extract_i64(vrb, vrb, 16, 7);
1152         do_vrlq_mask(ah, al, vrb, n);
1154         tcg_gen_and_i64(t0, t0, ah);
1155         tcg_gen_and_i64(t1, t1, al);
1157         if (insert) {
1158             get_avr64(n, a->vrt, true);
1159             get_avr64(vrb, a->vrt, false);
1160             tcg_gen_andc_i64(n, n, ah);
1161             tcg_gen_andc_i64(vrb, vrb, al);
1162             tcg_gen_or_i64(t0, t0, n);
1163             tcg_gen_or_i64(t1, t1, vrb);
1164         }
1165     }
1167     set_avr64(a->vrt, t0, true);
1168     set_avr64(a->vrt, t1, false);
1170     tcg_temp_free_i64(ah);
1171     tcg_temp_free_i64(al);
1172     tcg_temp_free_i64(vrb);
1173     tcg_temp_free_i64(n);
1174     tcg_temp_free_i64(t0);
1175     tcg_temp_free_i64(t1);
1177     return true;
1180 TRANS(VRLQ, do_vector_rotl_quad, false, false)
1181 TRANS(VRLQNM, do_vector_rotl_quad, true, false)
1182 TRANS(VRLQMI, do_vector_rotl_quad, false, true)
1184 #define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3)               \
1185 static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t,     \
1186                                          TCGv_vec sat, TCGv_vec a,      \
1187                                          TCGv_vec b)                    \
1188 {                                                                       \
1189     TCGv_vec x = tcg_temp_new_vec_matching(t);                          \
1190     glue(glue(tcg_gen_, NORM), _vec)(VECE, x, a, b);                    \
1191     glue(glue(tcg_gen_, SAT), _vec)(VECE, t, a, b);                     \
1192     tcg_gen_cmp_vec(TCG_COND_NE, VECE, x, x, t);                        \
1193     tcg_gen_or_vec(VECE, sat, sat, x);                                  \
1194     tcg_temp_free_vec(x);                                               \
1195 }                                                                       \
1196 static void glue(gen_, NAME)(DisasContext *ctx)                         \
1197 {                                                                       \
1198     static const TCGOpcode vecop_list[] = {                             \
1199         glue(glue(INDEX_op_, NORM), _vec),                              \
1200         glue(glue(INDEX_op_, SAT), _vec),                               \
1201         INDEX_op_cmp_vec, 0                                             \
1202     };                                                                  \
1203     static const GVecGen4 g = {                                         \
1204         .fniv = glue(glue(gen_, NAME), _vec),                           \
1205         .fno = glue(gen_helper_, NAME),                                 \
1206         .opt_opc = vecop_list,                                          \
1207         .write_aofs = true,                                             \
1208         .vece = VECE,                                                   \
1209     };                                                                  \
1210     if (unlikely(!ctx->altivec_enabled)) {                              \
1211         gen_exception(ctx, POWERPC_EXCP_VPU);                           \
1212         return;                                                         \
1213     }                                                                   \
1214     tcg_gen_gvec_4(avr_full_offset(rD(ctx->opcode)),                    \
1215                    offsetof(CPUPPCState, vscr_sat),                     \
1216                    avr_full_offset(rA(ctx->opcode)),                    \
1217                    avr_full_offset(rB(ctx->opcode)),                    \
1218                    16, 16, &g);                                         \
1221 GEN_VXFORM_SAT(vaddubs, MO_8, add, usadd, 0, 8);
1222 GEN_VXFORM_DUAL_EXT(vaddubs, PPC_ALTIVEC, PPC_NONE, 0,       \
1223                     vmul10uq, PPC_NONE, PPC2_ISA300, 0x0000F800)
1224 GEN_VXFORM_SAT(vadduhs, MO_16, add, usadd, 0, 9);
1225 GEN_VXFORM_DUAL(vadduhs, PPC_ALTIVEC, PPC_NONE, \
1226                 vmul10euq, PPC_NONE, PPC2_ISA300)
1227 GEN_VXFORM_SAT(vadduws, MO_32, add, usadd, 0, 10);
1228 GEN_VXFORM_SAT(vaddsbs, MO_8, add, ssadd, 0, 12);
1229 GEN_VXFORM_SAT(vaddshs, MO_16, add, ssadd, 0, 13);
1230 GEN_VXFORM_SAT(vaddsws, MO_32, add, ssadd, 0, 14);
1231 GEN_VXFORM_SAT(vsububs, MO_8, sub, ussub, 0, 24);
1232 GEN_VXFORM_SAT(vsubuhs, MO_16, sub, ussub, 0, 25);
1233 GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26);
1234 GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28);
1235 GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29);
1236 GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30);
1237 GEN_VXFORM_TRANS(vsl, 2, 7);
1238 GEN_VXFORM_TRANS(vsr, 2, 11);
1239 GEN_VXFORM_ENV(vpkuhum, 7, 0);
1240 GEN_VXFORM_ENV(vpkuwum, 7, 1);
1241 GEN_VXFORM_ENV(vpkudum, 7, 17);
1242 GEN_VXFORM_ENV(vpkuhus, 7, 2);
1243 GEN_VXFORM_ENV(vpkuwus, 7, 3);
1244 GEN_VXFORM_ENV(vpkudus, 7, 19);
1245 GEN_VXFORM_ENV(vpkshus, 7, 4);
1246 GEN_VXFORM_ENV(vpkswus, 7, 5);
1247 GEN_VXFORM_ENV(vpksdus, 7, 21);
1248 GEN_VXFORM_ENV(vpkshss, 7, 6);
1249 GEN_VXFORM_ENV(vpkswss, 7, 7);
1250 GEN_VXFORM_ENV(vpksdss, 7, 23);
1251 GEN_VXFORM(vpkpx, 7, 12);
1252 GEN_VXFORM_ENV(vsum4ubs, 4, 24);
1253 GEN_VXFORM_ENV(vsum4sbs, 4, 28);
1254 GEN_VXFORM_ENV(vsum4shs, 4, 25);
1255 GEN_VXFORM_ENV(vsum2sws, 4, 26);
1256 GEN_VXFORM_ENV(vsumsws, 4, 30);
1257 GEN_VXFORM_ENV(vaddfp, 5, 0);
1258 GEN_VXFORM_ENV(vsubfp, 5, 1);
1259 GEN_VXFORM_ENV(vmaxfp, 5, 16);
1260 GEN_VXFORM_ENV(vminfp, 5, 17);
1261 GEN_VXFORM_HETRO(vextublx, 6, 24)
1262 GEN_VXFORM_HETRO(vextuhlx, 6, 25)
1263 GEN_VXFORM_HETRO(vextuwlx, 6, 26)
1264 GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207,
1265                 vextuwlx, PPC_NONE, PPC2_ISA300)
1266 GEN_VXFORM_HETRO(vextubrx, 6, 28)
1267 GEN_VXFORM_HETRO(vextuhrx, 6, 29)
1268 GEN_VXFORM_HETRO(vextuwrx, 6, 30)
1269 GEN_VXFORM_TRANS(lvsl, 6, 31)
1270 GEN_VXFORM_TRANS(lvsr, 6, 32)
1271 GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207,
1272                 vextuwrx, PPC_NONE, PPC2_ISA300)
1274 #define GEN_VXRFORM1(opname, name, str, opc2, opc3)                     \
1275 static void glue(gen_, name)(DisasContext *ctx)                         \
1276     {                                                                   \
1277         TCGv_ptr ra, rb, rd;                                            \
1278         if (unlikely(!ctx->altivec_enabled)) {                          \
1279             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1280             return;                                                     \
1281         }                                                               \
1282         ra = gen_avr_ptr(rA(ctx->opcode));                              \
1283         rb = gen_avr_ptr(rB(ctx->opcode));                              \
1284         rd = gen_avr_ptr(rD(ctx->opcode));                              \
1285         gen_helper_##opname(cpu_env, rd, ra, rb);                       \
1286         tcg_temp_free_ptr(ra);                                          \
1287         tcg_temp_free_ptr(rb);                                          \
1288         tcg_temp_free_ptr(rd);                                          \
1289     }
1291 #define GEN_VXRFORM(name, opc2, opc3)                                \
1292     GEN_VXRFORM1(name, name, #name, opc2, opc3)                      \
1293     GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
1296  * Support for Altivec instructions that use bit 31 (Rc) as an opcode
1297  * bit but also use bit 21 as an actual Rc bit.  In general, thse pairs
1298  * come from different versions of the ISA, so we must also support a
1299  * pair of flags for each instruction.
1300  */
1301 #define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)     \
1302 static void glue(gen_, name0##_##name1)(DisasContext *ctx)             \
1303 {                                                                      \
1304     if ((Rc(ctx->opcode) == 0) &&                                      \
1305         ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
1306         if (Rc21(ctx->opcode) == 0) {                                  \
1307             gen_##name0(ctx);                                          \
1308         } else {                                                       \
1309             gen_##name0##_(ctx);                                       \
1310         }                                                              \
1311     } else if ((Rc(ctx->opcode) == 1) &&                               \
1312         ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
1313         if (Rc21(ctx->opcode) == 0) {                                  \
1314             gen_##name1(ctx);                                          \
1315         } else {                                                       \
1316             gen_##name1##_(ctx);                                       \
1317         }                                                              \
1318     } else {                                                           \
1319         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);            \
1320     }                                                                  \
1323 static void do_vcmp_rc(int vrt)
1325     TCGv_i64 tmp, set, clr;
1327     tmp = tcg_temp_new_i64();
1328     set = tcg_temp_new_i64();
1329     clr = tcg_temp_new_i64();
1331     get_avr64(tmp, vrt, true);
1332     tcg_gen_mov_i64(set, tmp);
1333     get_avr64(tmp, vrt, false);
1334     tcg_gen_or_i64(clr, set, tmp);
1335     tcg_gen_and_i64(set, set, tmp);
1337     tcg_gen_setcondi_i64(TCG_COND_EQ, clr, clr, 0);
1338     tcg_gen_shli_i64(clr, clr, 1);
1340     tcg_gen_setcondi_i64(TCG_COND_EQ, set, set, -1);
1341     tcg_gen_shli_i64(set, set, 3);
1343     tcg_gen_or_i64(tmp, set, clr);
1344     tcg_gen_extrl_i64_i32(cpu_crf[6], tmp);
1346     tcg_temp_free_i64(tmp);
1347     tcg_temp_free_i64(set);
1348     tcg_temp_free_i64(clr);
1351 static bool do_vcmp(DisasContext *ctx, arg_VC *a, TCGCond cond, int vece)
1353     REQUIRE_VECTOR(ctx);
1355     tcg_gen_gvec_cmp(cond, vece, avr_full_offset(a->vrt),
1356                      avr_full_offset(a->vra), avr_full_offset(a->vrb), 16, 16);
1358     if (a->rc) {
1359         do_vcmp_rc(a->vrt);
1360     }
1362     return true;
1365 TRANS_FLAGS(ALTIVEC, VCMPEQUB, do_vcmp, TCG_COND_EQ, MO_8)
1366 TRANS_FLAGS(ALTIVEC, VCMPEQUH, do_vcmp, TCG_COND_EQ, MO_16)
1367 TRANS_FLAGS(ALTIVEC, VCMPEQUW, do_vcmp, TCG_COND_EQ, MO_32)
1368 TRANS_FLAGS2(ALTIVEC_207, VCMPEQUD, do_vcmp, TCG_COND_EQ, MO_64)
1370 TRANS_FLAGS(ALTIVEC, VCMPGTSB, do_vcmp, TCG_COND_GT, MO_8)
1371 TRANS_FLAGS(ALTIVEC, VCMPGTSH, do_vcmp, TCG_COND_GT, MO_16)
1372 TRANS_FLAGS(ALTIVEC, VCMPGTSW, do_vcmp, TCG_COND_GT, MO_32)
1373 TRANS_FLAGS2(ALTIVEC_207, VCMPGTSD, do_vcmp, TCG_COND_GT, MO_64)
1374 TRANS_FLAGS(ALTIVEC, VCMPGTUB, do_vcmp, TCG_COND_GTU, MO_8)
1375 TRANS_FLAGS(ALTIVEC, VCMPGTUH, do_vcmp, TCG_COND_GTU, MO_16)
1376 TRANS_FLAGS(ALTIVEC, VCMPGTUW, do_vcmp, TCG_COND_GTU, MO_32)
1377 TRANS_FLAGS2(ALTIVEC_207, VCMPGTUD, do_vcmp, TCG_COND_GTU, MO_64)
1379 TRANS_FLAGS2(ISA300, VCMPNEB, do_vcmp, TCG_COND_NE, MO_8)
1380 TRANS_FLAGS2(ISA300, VCMPNEH, do_vcmp, TCG_COND_NE, MO_16)
1381 TRANS_FLAGS2(ISA300, VCMPNEW, do_vcmp, TCG_COND_NE, MO_32)
1383 static void gen_vcmpnez_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
1385     TCGv_vec t0, t1, zero;
1387     t0 = tcg_temp_new_vec_matching(t);
1388     t1 = tcg_temp_new_vec_matching(t);
1389     zero = tcg_constant_vec_matching(t, vece, 0);
1391     tcg_gen_cmp_vec(TCG_COND_EQ, vece, t0, a, zero);
1392     tcg_gen_cmp_vec(TCG_COND_EQ, vece, t1, b, zero);
1393     tcg_gen_cmp_vec(TCG_COND_NE, vece, t, a, b);
1395     tcg_gen_or_vec(vece, t, t, t0);
1396     tcg_gen_or_vec(vece, t, t, t1);
1398     tcg_temp_free_vec(t0);
1399     tcg_temp_free_vec(t1);
1402 static bool do_vcmpnez(DisasContext *ctx, arg_VC *a, int vece)
1404     static const TCGOpcode vecop_list[] = {
1405         INDEX_op_cmp_vec, 0
1406     };
1407     static const GVecGen3 ops[3] = {
1408         {
1409             .fniv = gen_vcmpnez_vec,
1410             .fno = gen_helper_VCMPNEZB,
1411             .opt_opc = vecop_list,
1412             .vece = MO_8
1413         },
1414         {
1415             .fniv = gen_vcmpnez_vec,
1416             .fno = gen_helper_VCMPNEZH,
1417             .opt_opc = vecop_list,
1418             .vece = MO_16
1419         },
1420         {
1421             .fniv = gen_vcmpnez_vec,
1422             .fno = gen_helper_VCMPNEZW,
1423             .opt_opc = vecop_list,
1424             .vece = MO_32
1425         }
1426     };
1428     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
1429     REQUIRE_VECTOR(ctx);
1431     tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
1432                    avr_full_offset(a->vrb), 16, 16, &ops[vece]);
1434     if (a->rc) {
1435         do_vcmp_rc(a->vrt);
1436     }
1438     return true;
1441 TRANS(VCMPNEZB, do_vcmpnez, MO_8)
1442 TRANS(VCMPNEZH, do_vcmpnez, MO_16)
1443 TRANS(VCMPNEZW, do_vcmpnez, MO_32)
1445 static bool trans_VCMPEQUQ(DisasContext *ctx, arg_VC *a)
1447     TCGv_i64 t0, t1, t2;
1449     t0 = tcg_temp_new_i64();
1450     t1 = tcg_temp_new_i64();
1451     t2 = tcg_temp_new_i64();
1453     get_avr64(t0, a->vra, true);
1454     get_avr64(t1, a->vrb, true);
1455     tcg_gen_xor_i64(t2, t0, t1);
1457     get_avr64(t0, a->vra, false);
1458     get_avr64(t1, a->vrb, false);
1459     tcg_gen_xor_i64(t1, t0, t1);
1461     tcg_gen_or_i64(t1, t1, t2);
1462     tcg_gen_setcondi_i64(TCG_COND_EQ, t1, t1, 0);
1463     tcg_gen_neg_i64(t1, t1);
1465     set_avr64(a->vrt, t1, true);
1466     set_avr64(a->vrt, t1, false);
1468     if (a->rc) {
1469         tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
1470         tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
1471         tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
1472     }
1474     tcg_temp_free_i64(t0);
1475     tcg_temp_free_i64(t1);
1476     tcg_temp_free_i64(t2);
1478     return true;
1481 static bool do_vcmpgtq(DisasContext *ctx, arg_VC *a, bool sign)
1483     TCGv_i64 t0, t1, t2;
1485     t0 = tcg_temp_new_i64();
1486     t1 = tcg_temp_new_i64();
1487     t2 = tcg_temp_new_i64();
1489     get_avr64(t0, a->vra, false);
1490     get_avr64(t1, a->vrb, false);
1491     tcg_gen_setcond_i64(TCG_COND_GTU, t2, t0, t1);
1493     get_avr64(t0, a->vra, true);
1494     get_avr64(t1, a->vrb, true);
1495     tcg_gen_movcond_i64(TCG_COND_EQ, t2, t0, t1, t2, tcg_constant_i64(0));
1496     tcg_gen_setcond_i64(sign ? TCG_COND_GT : TCG_COND_GTU, t1, t0, t1);
1498     tcg_gen_or_i64(t1, t1, t2);
1499     tcg_gen_neg_i64(t1, t1);
1501     set_avr64(a->vrt, t1, true);
1502     set_avr64(a->vrt, t1, false);
1504     if (a->rc) {
1505         tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
1506         tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
1507         tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
1508     }
1510     tcg_temp_free_i64(t0);
1511     tcg_temp_free_i64(t1);
1512     tcg_temp_free_i64(t2);
1514     return true;
1517 TRANS(VCMPGTSQ, do_vcmpgtq, true)
1518 TRANS(VCMPGTUQ, do_vcmpgtq, false)
1520 static bool do_vcmpq(DisasContext *ctx, arg_VX_bf *a, bool sign)
1522     TCGv_i64 vra, vrb;
1523     TCGLabel *gt, *lt, *done;
1525     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1526     REQUIRE_VECTOR(ctx);
1528     vra = tcg_temp_local_new_i64();
1529     vrb = tcg_temp_local_new_i64();
1530     gt = gen_new_label();
1531     lt = gen_new_label();
1532     done = gen_new_label();
1534     get_avr64(vra, a->vra, true);
1535     get_avr64(vrb, a->vrb, true);
1536     tcg_gen_brcond_i64((sign ? TCG_COND_GT : TCG_COND_GTU), vra, vrb, gt);
1537     tcg_gen_brcond_i64((sign ? TCG_COND_LT : TCG_COND_LTU), vra, vrb, lt);
1539     get_avr64(vra, a->vra, false);
1540     get_avr64(vrb, a->vrb, false);
1541     tcg_gen_brcond_i64(TCG_COND_GTU, vra, vrb, gt);
1542     tcg_gen_brcond_i64(TCG_COND_LTU, vra, vrb, lt);
1544     tcg_gen_movi_i32(cpu_crf[a->bf], CRF_EQ);
1545     tcg_gen_br(done);
1547     gen_set_label(gt);
1548     tcg_gen_movi_i32(cpu_crf[a->bf], CRF_GT);
1549     tcg_gen_br(done);
1551     gen_set_label(lt);
1552     tcg_gen_movi_i32(cpu_crf[a->bf], CRF_LT);
1553     tcg_gen_br(done);
1555     gen_set_label(done);
1556     tcg_temp_free_i64(vra);
1557     tcg_temp_free_i64(vrb);
1559     return true;
1562 TRANS(VCMPSQ, do_vcmpq, true)
1563 TRANS(VCMPUQ, do_vcmpq, false)
1565 GEN_VXRFORM(vcmpeqfp, 3, 3)
1566 GEN_VXRFORM(vcmpgefp, 3, 7)
1567 GEN_VXRFORM(vcmpgtfp, 3, 11)
1568 GEN_VXRFORM(vcmpbfp, 3, 15)
1570 static void gen_vsplti(DisasContext *ctx, int vece)
1572     int simm;
1574     if (unlikely(!ctx->altivec_enabled)) {
1575         gen_exception(ctx, POWERPC_EXCP_VPU);
1576         return;
1577     }
1579     simm = SIMM5(ctx->opcode);
1580     tcg_gen_gvec_dup_imm(vece, avr_full_offset(rD(ctx->opcode)), 16, 16, simm);
1583 #define GEN_VXFORM_VSPLTI(name, vece, opc2, opc3) \
1584 static void glue(gen_, name)(DisasContext *ctx) { gen_vsplti(ctx, vece); }
1586 GEN_VXFORM_VSPLTI(vspltisb, MO_8, 6, 12);
1587 GEN_VXFORM_VSPLTI(vspltish, MO_16, 6, 13);
1588 GEN_VXFORM_VSPLTI(vspltisw, MO_32, 6, 14);
1590 #define GEN_VXFORM_NOA(name, opc2, opc3)                                \
1591 static void glue(gen_, name)(DisasContext *ctx)                         \
1592     {                                                                   \
1593         TCGv_ptr rb, rd;                                                \
1594         if (unlikely(!ctx->altivec_enabled)) {                          \
1595             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1596             return;                                                     \
1597         }                                                               \
1598         rb = gen_avr_ptr(rB(ctx->opcode));                              \
1599         rd = gen_avr_ptr(rD(ctx->opcode));                              \
1600         gen_helper_##name(rd, rb);                                      \
1601         tcg_temp_free_ptr(rb);                                          \
1602         tcg_temp_free_ptr(rd);                                          \
1603     }
1605 #define GEN_VXFORM_NOA_ENV(name, opc2, opc3)                            \
1606 static void glue(gen_, name)(DisasContext *ctx)                         \
1607     {                                                                   \
1608         TCGv_ptr rb, rd;                                                \
1609                                                                         \
1610         if (unlikely(!ctx->altivec_enabled)) {                          \
1611             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1612             return;                                                     \
1613         }                                                               \
1614         rb = gen_avr_ptr(rB(ctx->opcode));                              \
1615         rd = gen_avr_ptr(rD(ctx->opcode));                              \
1616         gen_helper_##name(cpu_env, rd, rb);                             \
1617         tcg_temp_free_ptr(rb);                                          \
1618         tcg_temp_free_ptr(rd);                                          \
1619     }
1621 #define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4)                        \
1622 static void glue(gen_, name)(DisasContext *ctx)                         \
1623     {                                                                   \
1624         TCGv_ptr rb, rd;                                                \
1625         if (unlikely(!ctx->altivec_enabled)) {                          \
1626             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1627             return;                                                     \
1628         }                                                               \
1629         rb = gen_avr_ptr(rB(ctx->opcode));                              \
1630         rd = gen_avr_ptr(rD(ctx->opcode));                              \
1631         gen_helper_##name(rd, rb);                                      \
1632         tcg_temp_free_ptr(rb);                                          \
1633         tcg_temp_free_ptr(rd);                                          \
1634     }
1636 #define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4)                        \
1637 static void glue(gen_, name)(DisasContext *ctx)                         \
1638     {                                                                   \
1639         TCGv_ptr rb;                                                    \
1640         if (unlikely(!ctx->altivec_enabled)) {                          \
1641             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1642             return;                                                     \
1643         }                                                               \
1644         rb = gen_avr_ptr(rB(ctx->opcode));                              \
1645         gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb);                \
1646         tcg_temp_free_ptr(rb);                                          \
1647     }
1648 GEN_VXFORM_NOA(vupkhsb, 7, 8);
1649 GEN_VXFORM_NOA(vupkhsh, 7, 9);
1650 GEN_VXFORM_NOA(vupkhsw, 7, 25);
1651 GEN_VXFORM_NOA(vupklsb, 7, 10);
1652 GEN_VXFORM_NOA(vupklsh, 7, 11);
1653 GEN_VXFORM_NOA(vupklsw, 7, 27);
1654 GEN_VXFORM_NOA(vupkhpx, 7, 13);
1655 GEN_VXFORM_NOA(vupklpx, 7, 15);
1656 GEN_VXFORM_NOA_ENV(vrefp, 5, 4);
1657 GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5);
1658 GEN_VXFORM_NOA_ENV(vexptefp, 5, 6);
1659 GEN_VXFORM_NOA_ENV(vlogefp, 5, 7);
1660 GEN_VXFORM_NOA_ENV(vrfim, 5, 11);
1661 GEN_VXFORM_NOA_ENV(vrfin, 5, 8);
1662 GEN_VXFORM_NOA_ENV(vrfip, 5, 10);
1663 GEN_VXFORM_NOA_ENV(vrfiz, 5, 9);
1664 GEN_VXFORM_NOA(vprtybw, 1, 24);
1665 GEN_VXFORM_NOA(vprtybd, 1, 24);
1666 GEN_VXFORM_NOA(vprtybq, 1, 24);
1668 static void gen_vsplt(DisasContext *ctx, int vece)
1670     int uimm, dofs, bofs;
1672     if (unlikely(!ctx->altivec_enabled)) {
1673         gen_exception(ctx, POWERPC_EXCP_VPU);
1674         return;
1675     }
1677     uimm = UIMM5(ctx->opcode);
1678     bofs = avr_full_offset(rB(ctx->opcode));
1679     dofs = avr_full_offset(rD(ctx->opcode));
1681     /* Experimental testing shows that hardware masks the immediate.  */
1682     bofs += (uimm << vece) & 15;
1683 #if !HOST_BIG_ENDIAN
1684     bofs ^= 15;
1685     bofs &= ~((1 << vece) - 1);
1686 #endif
1688     tcg_gen_gvec_dup_mem(vece, dofs, bofs, 16, 16);
1691 #define GEN_VXFORM_VSPLT(name, vece, opc2, opc3) \
1692 static void glue(gen_, name)(DisasContext *ctx) { gen_vsplt(ctx, vece); }
1694 #define GEN_VXFORM_UIMM_ENV(name, opc2, opc3)                           \
1695 static void glue(gen_, name)(DisasContext *ctx)                         \
1696     {                                                                   \
1697         TCGv_ptr rb, rd;                                                \
1698         TCGv_i32 uimm;                                                  \
1699                                                                         \
1700         if (unlikely(!ctx->altivec_enabled)) {                          \
1701             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1702             return;                                                     \
1703         }                                                               \
1704         uimm = tcg_const_i32(UIMM5(ctx->opcode));                       \
1705         rb = gen_avr_ptr(rB(ctx->opcode));                              \
1706         rd = gen_avr_ptr(rD(ctx->opcode));                              \
1707         gen_helper_##name(cpu_env, rd, rb, uimm);                       \
1708         tcg_temp_free_i32(uimm);                                        \
1709         tcg_temp_free_ptr(rb);                                          \
1710         tcg_temp_free_ptr(rd);                                          \
1711     }
1713 #define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max)              \
1714 static void glue(gen_, name)(DisasContext *ctx)                         \
1715     {                                                                   \
1716         TCGv_ptr rb, rd;                                                \
1717         uint8_t uimm = UIMM4(ctx->opcode);                              \
1718         TCGv_i32 t0;                                                    \
1719         if (unlikely(!ctx->altivec_enabled)) {                          \
1720             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
1721             return;                                                     \
1722         }                                                               \
1723         if (uimm > splat_max) {                                         \
1724             uimm = 0;                                                   \
1725         }                                                               \
1726         t0 = tcg_temp_new_i32();                                        \
1727         tcg_gen_movi_i32(t0, uimm);                                     \
1728         rb = gen_avr_ptr(rB(ctx->opcode));                              \
1729         rd = gen_avr_ptr(rD(ctx->opcode));                              \
1730         gen_helper_##name(rd, rb, t0);                                  \
1731         tcg_temp_free_i32(t0);                                          \
1732         tcg_temp_free_ptr(rb);                                          \
1733         tcg_temp_free_ptr(rd);                                          \
1734     }
1736 GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8);
1737 GEN_VXFORM_VSPLT(vsplth, MO_16, 6, 9);
1738 GEN_VXFORM_VSPLT(vspltw, MO_32, 6, 10);
1739 GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15);
1740 GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14);
1741 GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12);
1742 GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8);
1743 GEN_VXFORM_UIMM_ENV(vcfux, 5, 12);
1744 GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13);
1745 GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14);
1746 GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15);
1747 GEN_VXFORM_DUAL(vspltb, PPC_ALTIVEC, PPC_NONE,
1748                 vextractub, PPC_NONE, PPC2_ISA300);
1749 GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE,
1750                 vextractuh, PPC_NONE, PPC2_ISA300);
1751 GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE,
1752                 vextractuw, PPC_NONE, PPC2_ISA300);
1754 static bool trans_VGNB(DisasContext *ctx, arg_VX_n *a)
1756     /*
1757      * Similar to do_vextractm, we'll use a sequence of mask-shift-or operations
1758      * to gather the bits. The masks can be created with
1759      *
1760      * uint64_t mask(uint64_t n, uint64_t step)
1761      * {
1762      *     uint64_t p = ((1UL << (1UL << step)) - 1UL) << ((n - 1UL) << step),
1763      *                  plen = n << step, m = 0;
1764      *     for(int i = 0; i < 64/plen; i++) {
1765      *         m |= p;
1766      *         m = ror64(m, plen);
1767      *     }
1768      *     p >>= plen * DIV_ROUND_UP(64, plen) - 64;
1769      *     return m | p;
1770      * }
1771      *
1772      * But since there are few values of N, we'll use a lookup table to avoid
1773      * these calculations at runtime.
1774      */
1775     static const uint64_t mask[6][5] = {
1776         {
1777             0xAAAAAAAAAAAAAAAAULL, 0xccccccccccccccccULL, 0xf0f0f0f0f0f0f0f0ULL,
1778             0xff00ff00ff00ff00ULL, 0xffff0000ffff0000ULL
1779         },
1780         {
1781             0x9249249249249249ULL, 0xC30C30C30C30C30CULL, 0xF00F00F00F00F00FULL,
1782             0xFF0000FF0000FF00ULL, 0xFFFF00000000FFFFULL
1783         },
1784         {
1785             /* For N >= 4, some mask operations can be elided */
1786             0x8888888888888888ULL, 0, 0xf000f000f000f000ULL, 0,
1787             0xFFFF000000000000ULL
1788         },
1789         {
1790             0x8421084210842108ULL, 0, 0xF0000F0000F0000FULL, 0, 0
1791         },
1792         {
1793             0x8208208208208208ULL, 0, 0xF00000F00000F000ULL, 0, 0
1794         },
1795         {
1796             0x8102040810204081ULL, 0, 0xF000000F000000F0ULL, 0, 0
1797         }
1798     };
1799     uint64_t m;
1800     int i, sh, nbits = DIV_ROUND_UP(64, a->n);
1801     TCGv_i64 hi, lo, t0, t1;
1803     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1804     REQUIRE_VECTOR(ctx);
1806     if (a->n < 2) {
1807         /*
1808          * "N can be any value between 2 and 7, inclusive." Otherwise, the
1809          * result is undefined, so we don't need to change RT. Also, N > 7 is
1810          * impossible since the immediate field is 3 bits only.
1811          */
1812         return true;
1813     }
1815     hi = tcg_temp_new_i64();
1816     lo = tcg_temp_new_i64();
1817     t0 = tcg_temp_new_i64();
1818     t1 = tcg_temp_new_i64();
1820     get_avr64(hi, a->vrb, true);
1821     get_avr64(lo, a->vrb, false);
1823     /* Align the lower doubleword so we can use the same mask */
1824     tcg_gen_shli_i64(lo, lo, a->n * nbits - 64);
1826     /*
1827      * Starting from the most significant bit, gather every Nth bit with a
1828      * sequence of mask-shift-or operation. E.g.: for N=3
1829      * AxxBxxCxxDxxExxFxxGxxHxxIxxJxxKxxLxxMxxNxxOxxPxxQxxRxxSxxTxxUxxV
1830      *     & rep(0b100)
1831      * A..B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V
1832      *     << 2
1833      * .B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V..
1834      *     |
1835      * AB.BC.CD.DE.EF.FG.GH.HI.IJ.JK.KL.LM.MN.NO.OP.PQ.QR.RS.ST.TU.UV.V
1836      *  & rep(0b110000)
1837      * AB....CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV..
1838      *     << 4
1839      * ..CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV......
1840      *     |
1841      * ABCD..CDEF..EFGH..GHIJ..IJKL..KLMN..MNOP..OPQR..QRST..STUV..UV..
1842      *     & rep(0b111100000000)
1843      * ABCD........EFGH........IJKL........MNOP........QRST........UV..
1844      *     << 8
1845      * ....EFGH........IJKL........MNOP........QRST........UV..........
1846      *     |
1847      * ABCDEFGH....EFGHIJKL....IJKLMNOP....MNOPQRST....QRSTUV......UV..
1848      *  & rep(0b111111110000000000000000)
1849      * ABCDEFGH................IJKLMNOP................QRSTUV..........
1850      *     << 16
1851      * ........IJKLMNOP................QRSTUV..........................
1852      *     |
1853      * ABCDEFGHIJKLMNOP........IJKLMNOPQRSTUV..........QRSTUV..........
1854      *     & rep(0b111111111111111100000000000000000000000000000000)
1855      * ABCDEFGHIJKLMNOP................................QRSTUV..........
1856      *     << 32
1857      * ................QRSTUV..........................................
1858      *     |
1859      * ABCDEFGHIJKLMNOPQRSTUV..........................QRSTUV..........
1860      */
1861     for (i = 0, sh = a->n - 1; i < 5; i++, sh <<= 1) {
1862         m = mask[a->n - 2][i];
1863         if (m) {
1864             tcg_gen_andi_i64(hi, hi, m);
1865             tcg_gen_andi_i64(lo, lo, m);
1866         }
1867         if (sh < 64) {
1868             tcg_gen_shli_i64(t0, hi, sh);
1869             tcg_gen_shli_i64(t1, lo, sh);
1870             tcg_gen_or_i64(hi, t0, hi);
1871             tcg_gen_or_i64(lo, t1, lo);
1872         }
1873     }
1875     tcg_gen_andi_i64(hi, hi, ~(~0ULL >> nbits));
1876     tcg_gen_andi_i64(lo, lo, ~(~0ULL >> nbits));
1877     tcg_gen_shri_i64(lo, lo, nbits);
1878     tcg_gen_or_i64(hi, hi, lo);
1879     tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], hi);
1881     tcg_temp_free_i64(hi);
1882     tcg_temp_free_i64(lo);
1883     tcg_temp_free_i64(t0);
1884     tcg_temp_free_i64(t1);
1886     return true;
1889 static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right,
1890                void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv))
1892     TCGv_ptr vrt, vra, vrb;
1893     TCGv rc;
1895     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1896     REQUIRE_VECTOR(ctx);
1898     vrt = gen_avr_ptr(a->vrt);
1899     vra = gen_avr_ptr(a->vra);
1900     vrb = gen_avr_ptr(a->vrb);
1901     rc = tcg_temp_new();
1903     tcg_gen_andi_tl(rc, cpu_gpr[a->rc], 0x1F);
1904     if (right) {
1905         tcg_gen_subfi_tl(rc, 32 - size, rc);
1906     }
1907     gen_helper(cpu_env, vrt, vra, vrb, rc);
1909     tcg_temp_free_ptr(vrt);
1910     tcg_temp_free_ptr(vra);
1911     tcg_temp_free_ptr(vrb);
1912     tcg_temp_free(rc);
1913     return true;
1916 TRANS(VEXTDUBVLX, do_vextdx, 1, false, gen_helper_VEXTDUBVLX)
1917 TRANS(VEXTDUHVLX, do_vextdx, 2, false, gen_helper_VEXTDUHVLX)
1918 TRANS(VEXTDUWVLX, do_vextdx, 4, false, gen_helper_VEXTDUWVLX)
1919 TRANS(VEXTDDVLX, do_vextdx, 8, false, gen_helper_VEXTDDVLX)
1921 TRANS(VEXTDUBVRX, do_vextdx, 1, true, gen_helper_VEXTDUBVLX)
1922 TRANS(VEXTDUHVRX, do_vextdx, 2, true, gen_helper_VEXTDUHVLX)
1923 TRANS(VEXTDUWVRX, do_vextdx, 4, true, gen_helper_VEXTDUWVLX)
1924 TRANS(VEXTDDVRX, do_vextdx, 8, true, gen_helper_VEXTDDVLX)
1926 static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
1927             TCGv_i64 rb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1929     TCGv_ptr t;
1930     TCGv idx;
1932     t = gen_avr_ptr(vrt);
1933     idx = tcg_temp_new();
1935     tcg_gen_andi_tl(idx, ra, 0xF);
1936     if (right) {
1937         tcg_gen_subfi_tl(idx, 16 - size, idx);
1938     }
1940     gen_helper(cpu_env, t, rb, idx);
1942     tcg_temp_free_ptr(t);
1943     tcg_temp_free(idx);
1945     return true;
1948 static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
1949                 int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1951     bool ok;
1952     TCGv_i64 val;
1954     val = tcg_temp_new_i64();
1955     get_avr64(val, vrb, true);
1956     ok = do_vinsx(ctx, vrt, size, right, ra, val, gen_helper);
1958     tcg_temp_free_i64(val);
1959     return ok;
1962 static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
1963                         void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1965     bool ok;
1966     TCGv_i64 val;
1968     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1969     REQUIRE_VECTOR(ctx);
1971     val = tcg_temp_new_i64();
1972     tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
1974     ok = do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper);
1976     tcg_temp_free_i64(val);
1977     return ok;
1980 static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
1981                         void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1983     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1984     REQUIRE_VECTOR(ctx);
1986     return do_vinsvx(ctx, a->vrt, size, right, cpu_gpr[a->vra], a->vrb,
1987                      gen_helper);
1990 static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
1991                         void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
1993     bool ok;
1994     TCGv_i64 val;
1996     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
1997     REQUIRE_VECTOR(ctx);
1999     if (a->uim > (16 - size)) {
2000         /*
2001          * PowerISA v3.1 says that the resulting value is undefined in this
2002          * case, so just log a guest error and leave VRT unchanged. The
2003          * real hardware would do a partial insert, e.g. if VRT is zeroed and
2004          * RB is 0x12345678, executing "vinsw VRT,RB,14" results in
2005          * VRT = 0x0000...00001234, but we don't bother to reproduce this
2006          * behavior as software shouldn't rely on it.
2007          */
2008         qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINS* at"
2009             " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
2010             16 - size);
2011         return true;
2012     }
2014     val = tcg_temp_new_i64();
2015     tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
2017     ok = do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val,
2018                   gen_helper);
2020     tcg_temp_free_i64(val);
2021     return ok;
2024 static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
2025                         void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
2027     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2028     REQUIRE_VECTOR(ctx);
2030     if (a->uim > (16 - size)) {
2031         qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINSERT* at"
2032             " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
2033             16 - size);
2034         return true;
2035     }
2037     return do_vinsvx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), a->vrb,
2038                      gen_helper);
2041 TRANS(VINSBLX, do_vinsx_VX, 1, false, gen_helper_VINSBLX)
2042 TRANS(VINSHLX, do_vinsx_VX, 2, false, gen_helper_VINSHLX)
2043 TRANS(VINSWLX, do_vinsx_VX, 4, false, gen_helper_VINSWLX)
2044 TRANS(VINSDLX, do_vinsx_VX, 8, false, gen_helper_VINSDLX)
2046 TRANS(VINSBRX, do_vinsx_VX, 1, true, gen_helper_VINSBLX)
2047 TRANS(VINSHRX, do_vinsx_VX, 2, true, gen_helper_VINSHLX)
2048 TRANS(VINSWRX, do_vinsx_VX, 4, true, gen_helper_VINSWLX)
2049 TRANS(VINSDRX, do_vinsx_VX, 8, true, gen_helper_VINSDLX)
2051 TRANS(VINSW, do_vins_VX_uim4, 4, gen_helper_VINSWLX)
2052 TRANS(VINSD, do_vins_VX_uim4, 8, gen_helper_VINSDLX)
2054 TRANS(VINSBVLX, do_vinsvx_VX, 1, false, gen_helper_VINSBLX)
2055 TRANS(VINSHVLX, do_vinsvx_VX, 2, false, gen_helper_VINSHLX)
2056 TRANS(VINSWVLX, do_vinsvx_VX, 4, false, gen_helper_VINSWLX)
2058 TRANS(VINSBVRX, do_vinsvx_VX, 1, true, gen_helper_VINSBLX)
2059 TRANS(VINSHVRX, do_vinsvx_VX, 2, true, gen_helper_VINSHLX)
2060 TRANS(VINSWVRX, do_vinsvx_VX, 4, true, gen_helper_VINSWLX)
2062 TRANS(VINSERTB, do_vinsert_VX_uim4, 1, gen_helper_VINSBLX)
2063 TRANS(VINSERTH, do_vinsert_VX_uim4, 2, gen_helper_VINSHLX)
2064 TRANS(VINSERTW, do_vinsert_VX_uim4, 4, gen_helper_VINSWLX)
2065 TRANS(VINSERTD, do_vinsert_VX_uim4, 8, gen_helper_VINSDLX)
2067 static void gen_vsldoi(DisasContext *ctx)
2069     TCGv_ptr ra, rb, rd;
2070     TCGv_i32 sh;
2071     if (unlikely(!ctx->altivec_enabled)) {
2072         gen_exception(ctx, POWERPC_EXCP_VPU);
2073         return;
2074     }
2075     ra = gen_avr_ptr(rA(ctx->opcode));
2076     rb = gen_avr_ptr(rB(ctx->opcode));
2077     rd = gen_avr_ptr(rD(ctx->opcode));
2078     sh = tcg_const_i32(VSH(ctx->opcode));
2079     gen_helper_vsldoi(rd, ra, rb, sh);
2080     tcg_temp_free_ptr(ra);
2081     tcg_temp_free_ptr(rb);
2082     tcg_temp_free_ptr(rd);
2083     tcg_temp_free_i32(sh);
2086 static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a)
2088     TCGv_i64 t0, t1, t2;
2090     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2091     REQUIRE_VECTOR(ctx);
2093     t0 = tcg_temp_new_i64();
2094     t1 = tcg_temp_new_i64();
2096     get_avr64(t0, a->vra, true);
2097     get_avr64(t1, a->vra, false);
2099     if (a->sh != 0) {
2100         t2 = tcg_temp_new_i64();
2102         get_avr64(t2, a->vrb, true);
2104         tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh);
2105         tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh);
2107         tcg_temp_free_i64(t2);
2108     }
2110     set_avr64(a->vrt, t0, true);
2111     set_avr64(a->vrt, t1, false);
2113     tcg_temp_free_i64(t0);
2114     tcg_temp_free_i64(t1);
2116     return true;
2119 static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a)
2121     TCGv_i64 t2, t1, t0;
2123     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2124     REQUIRE_VECTOR(ctx);
2126     t0 = tcg_temp_new_i64();
2127     t1 = tcg_temp_new_i64();
2129     get_avr64(t0, a->vrb, false);
2130     get_avr64(t1, a->vrb, true);
2132     if (a->sh != 0) {
2133         t2 = tcg_temp_new_i64();
2135         get_avr64(t2, a->vra, false);
2137         tcg_gen_extract2_i64(t0, t0, t1, a->sh);
2138         tcg_gen_extract2_i64(t1, t1, t2, a->sh);
2140         tcg_temp_free_i64(t2);
2141     }
2143     set_avr64(a->vrt, t0, false);
2144     set_avr64(a->vrt, t1, true);
2146     tcg_temp_free_i64(t0);
2147     tcg_temp_free_i64(t1);
2149     return true;
2152 static bool do_vexpand(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2154     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2155     REQUIRE_VECTOR(ctx);
2157     tcg_gen_gvec_sari(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2158                       (8 << vece) - 1, 16, 16);
2160     return true;
2163 TRANS(VEXPANDBM, do_vexpand, MO_8)
2164 TRANS(VEXPANDHM, do_vexpand, MO_16)
2165 TRANS(VEXPANDWM, do_vexpand, MO_32)
2166 TRANS(VEXPANDDM, do_vexpand, MO_64)
2168 static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a)
2170     TCGv_i64 tmp;
2172     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2173     REQUIRE_VECTOR(ctx);
2175     tmp = tcg_temp_new_i64();
2177     get_avr64(tmp, a->vrb, true);
2178     tcg_gen_sari_i64(tmp, tmp, 63);
2179     set_avr64(a->vrt, tmp, false);
2180     set_avr64(a->vrt, tmp, true);
2182     tcg_temp_free_i64(tmp);
2183     return true;
2186 static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2188     const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece,
2189                    mask = dup_const(vece, 1 << (elem_width - 1));
2190     uint64_t i, j;
2191     TCGv_i64 lo, hi, t0, t1;
2193     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2194     REQUIRE_VECTOR(ctx);
2196     hi = tcg_temp_new_i64();
2197     lo = tcg_temp_new_i64();
2198     t0 = tcg_temp_new_i64();
2199     t1 = tcg_temp_new_i64();
2201     get_avr64(lo, a->vrb, false);
2202     get_avr64(hi, a->vrb, true);
2204     tcg_gen_andi_i64(lo, lo, mask);
2205     tcg_gen_andi_i64(hi, hi, mask);
2207     /*
2208      * Gather the most significant bit of each element in the highest element
2209      * element. E.g. for bytes:
2210      * aXXXXXXXbXXXXXXXcXXXXXXXdXXXXXXXeXXXXXXXfXXXXXXXgXXXXXXXhXXXXXXX
2211      *     & dup(1 << (elem_width - 1))
2212      * a0000000b0000000c0000000d0000000e0000000f0000000g0000000h0000000
2213      *     << 32 - 4
2214      * 0000e0000000f0000000g0000000h00000000000000000000000000000000000
2215      *     |
2216      * a000e000b000f000c000g000d000h000e0000000f0000000g0000000h0000000
2217      *     << 16 - 2
2218      * 00c000g000d000h000e0000000f0000000g0000000h000000000000000000000
2219      *     |
2220      * a0c0e0g0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h0000000
2221      *     << 8 - 1
2222      * 0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h00000000000000
2223      *     |
2224      * abcdefghbcdefgh0cdefgh00defgh000efgh0000fgh00000gh000000h0000000
2225      */
2226     for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
2227         tcg_gen_shli_i64(t0, hi, j - i);
2228         tcg_gen_shli_i64(t1, lo, j - i);
2229         tcg_gen_or_i64(hi, hi, t0);
2230         tcg_gen_or_i64(lo, lo, t1);
2231     }
2233     tcg_gen_shri_i64(hi, hi, 64 - elem_count_half);
2234     tcg_gen_extract2_i64(lo, lo, hi, 64 - elem_count_half);
2235     tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], lo);
2237     tcg_temp_free_i64(hi);
2238     tcg_temp_free_i64(lo);
2239     tcg_temp_free_i64(t0);
2240     tcg_temp_free_i64(t1);
2242     return true;
2245 TRANS(VEXTRACTBM, do_vextractm, MO_8)
2246 TRANS(VEXTRACTHM, do_vextractm, MO_16)
2247 TRANS(VEXTRACTWM, do_vextractm, MO_32)
2248 TRANS(VEXTRACTDM, do_vextractm, MO_64)
2250 static bool trans_VEXTRACTQM(DisasContext *ctx, arg_VX_tb *a)
2252     TCGv_i64 tmp;
2254     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2255     REQUIRE_VECTOR(ctx);
2257     tmp = tcg_temp_new_i64();
2259     get_avr64(tmp, a->vrb, true);
2260     tcg_gen_shri_i64(tmp, tmp, 63);
2261     tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], tmp);
2263     tcg_temp_free_i64(tmp);
2265     return true;
2268 static bool do_mtvsrm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
2270     const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece;
2271     uint64_t c;
2272     int i, j;
2273     TCGv_i64 hi, lo, t0, t1;
2275     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2276     REQUIRE_VECTOR(ctx);
2278     hi = tcg_temp_new_i64();
2279     lo = tcg_temp_new_i64();
2280     t0 = tcg_temp_new_i64();
2281     t1 = tcg_temp_new_i64();
2283     tcg_gen_extu_tl_i64(t0, cpu_gpr[a->vrb]);
2284     tcg_gen_extract_i64(hi, t0, elem_count_half, elem_count_half);
2285     tcg_gen_extract_i64(lo, t0, 0, elem_count_half);
2287     /*
2288      * Spread the bits into their respective elements.
2289      * E.g. for bytes:
2290      * 00000000000000000000000000000000000000000000000000000000abcdefgh
2291      *   << 32 - 4
2292      * 0000000000000000000000000000abcdefgh0000000000000000000000000000
2293      *   |
2294      * 0000000000000000000000000000abcdefgh00000000000000000000abcdefgh
2295      *   << 16 - 2
2296      * 00000000000000abcdefgh00000000000000000000abcdefgh00000000000000
2297      *   |
2298      * 00000000000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh
2299      *   << 8 - 1
2300      * 0000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh0000000
2301      *   |
2302      * 0000000abcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgh
2303      *   & dup(1)
2304      * 0000000a0000000b0000000c0000000d0000000e0000000f0000000g0000000h
2305      *   * 0xff
2306      * aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh
2307      */
2308     for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
2309         tcg_gen_shli_i64(t0, hi, j - i);
2310         tcg_gen_shli_i64(t1, lo, j - i);
2311         tcg_gen_or_i64(hi, hi, t0);
2312         tcg_gen_or_i64(lo, lo, t1);
2313     }
2315     c = dup_const(vece, 1);
2316     tcg_gen_andi_i64(hi, hi, c);
2317     tcg_gen_andi_i64(lo, lo, c);
2319     c = MAKE_64BIT_MASK(0, elem_width);
2320     tcg_gen_muli_i64(hi, hi, c);
2321     tcg_gen_muli_i64(lo, lo, c);
2323     set_avr64(a->vrt, lo, false);
2324     set_avr64(a->vrt, hi, true);
2326     tcg_temp_free_i64(hi);
2327     tcg_temp_free_i64(lo);
2328     tcg_temp_free_i64(t0);
2329     tcg_temp_free_i64(t1);
2331     return true;
2334 TRANS(MTVSRBM, do_mtvsrm, MO_8)
2335 TRANS(MTVSRHM, do_mtvsrm, MO_16)
2336 TRANS(MTVSRWM, do_mtvsrm, MO_32)
2337 TRANS(MTVSRDM, do_mtvsrm, MO_64)
2339 static bool trans_MTVSRQM(DisasContext *ctx, arg_VX_tb *a)
2341     TCGv_i64 tmp;
2343     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2344     REQUIRE_VECTOR(ctx);
2346     tmp = tcg_temp_new_i64();
2348     tcg_gen_ext_tl_i64(tmp, cpu_gpr[a->vrb]);
2349     tcg_gen_sextract_i64(tmp, tmp, 0, 1);
2350     set_avr64(a->vrt, tmp, false);
2351     set_avr64(a->vrt, tmp, true);
2353     tcg_temp_free_i64(tmp);
2355     return true;
2358 static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a)
2360     const uint64_t mask = dup_const(MO_8, 1);
2361     uint64_t hi, lo;
2363     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2364     REQUIRE_VECTOR(ctx);
2366     hi = extract16(a->b, 8, 8);
2367     lo = extract16(a->b, 0, 8);
2369     for (int i = 4, j = 32; i > 0; i >>= 1, j >>= 1) {
2370         hi |= hi << (j - i);
2371         lo |= lo << (j - i);
2372     }
2374     hi = (hi & mask) * 0xFF;
2375     lo = (lo & mask) * 0xFF;
2377     set_avr64(a->vrt, tcg_constant_i64(hi), true);
2378     set_avr64(a->vrt, tcg_constant_i64(lo), false);
2380     return true;
2383 static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece)
2385     TCGv_i64 rt, vrb, mask;
2386     rt = tcg_const_i64(0);
2387     vrb = tcg_temp_new_i64();
2388     mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1)));
2390     for (int i = 0; i < 2; i++) {
2391         get_avr64(vrb, a->vrb, i);
2392         if (a->mp) {
2393             tcg_gen_and_i64(vrb, mask, vrb);
2394         } else {
2395             tcg_gen_andc_i64(vrb, mask, vrb);
2396         }
2397         tcg_gen_ctpop_i64(vrb, vrb);
2398         tcg_gen_add_i64(rt, rt, vrb);
2399     }
2401     tcg_gen_shli_i64(rt, rt, TARGET_LONG_BITS - 8 + vece);
2402     tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], rt);
2404     tcg_temp_free_i64(vrb);
2405     tcg_temp_free_i64(rt);
2407     return true;
2410 TRANS(VCNTMBB, do_vcntmb, MO_8)
2411 TRANS(VCNTMBH, do_vcntmb, MO_16)
2412 TRANS(VCNTMBW, do_vcntmb, MO_32)
2413 TRANS(VCNTMBD, do_vcntmb, MO_64)
2415 static bool do_vstri(DisasContext *ctx, arg_VX_tb_rc *a,
2416                      void (*gen_helper)(TCGv_i32, TCGv_ptr, TCGv_ptr))
2418     TCGv_ptr vrt, vrb;
2420     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2421     REQUIRE_VECTOR(ctx);
2423     vrt = gen_avr_ptr(a->vrt);
2424     vrb = gen_avr_ptr(a->vrb);
2426     if (a->rc) {
2427         gen_helper(cpu_crf[6], vrt, vrb);
2428     } else {
2429         TCGv_i32 discard = tcg_temp_new_i32();
2430         gen_helper(discard, vrt, vrb);
2431         tcg_temp_free_i32(discard);
2432     }
2434     tcg_temp_free_ptr(vrt);
2435     tcg_temp_free_ptr(vrb);
2437     return true;
2440 TRANS(VSTRIBL, do_vstri, gen_helper_VSTRIBL)
2441 TRANS(VSTRIBR, do_vstri, gen_helper_VSTRIBR)
2442 TRANS(VSTRIHL, do_vstri, gen_helper_VSTRIHL)
2443 TRANS(VSTRIHR, do_vstri, gen_helper_VSTRIHR)
2445 static bool do_vclrb(DisasContext *ctx, arg_VX *a, bool right)
2447     TCGv_i64 rb, mh, ml, tmp,
2448              ones = tcg_constant_i64(-1),
2449              zero = tcg_constant_i64(0);
2451     rb = tcg_temp_new_i64();
2452     mh = tcg_temp_new_i64();
2453     ml = tcg_temp_new_i64();
2454     tmp = tcg_temp_new_i64();
2456     tcg_gen_extu_tl_i64(rb, cpu_gpr[a->vrb]);
2457     tcg_gen_andi_i64(tmp, rb, 7);
2458     tcg_gen_shli_i64(tmp, tmp, 3);
2459     if (right) {
2460         tcg_gen_shr_i64(tmp, ones, tmp);
2461     } else {
2462         tcg_gen_shl_i64(tmp, ones, tmp);
2463     }
2464     tcg_gen_not_i64(tmp, tmp);
2466     if (right) {
2467         tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
2468                             tmp, ones);
2469         tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
2470                             zero, tmp);
2471         tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(16),
2472                             ml, ones);
2473     } else {
2474         tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
2475                             tmp, ones);
2476         tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
2477                             zero, tmp);
2478         tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(16),
2479                             mh, ones);
2480     }
2482     get_avr64(tmp, a->vra, true);
2483     tcg_gen_and_i64(tmp, tmp, mh);
2484     set_avr64(a->vrt, tmp, true);
2486     get_avr64(tmp, a->vra, false);
2487     tcg_gen_and_i64(tmp, tmp, ml);
2488     set_avr64(a->vrt, tmp, false);
2490     tcg_temp_free_i64(rb);
2491     tcg_temp_free_i64(mh);
2492     tcg_temp_free_i64(ml);
2493     tcg_temp_free_i64(tmp);
2495     return true;
2498 TRANS(VCLRLB, do_vclrb, false)
2499 TRANS(VCLRRB, do_vclrb, true)
2501 #define GEN_VAFORM_PAIRED(name0, name1, opc2)                           \
2502 static void glue(gen_, name0##_##name1)(DisasContext *ctx)              \
2503     {                                                                   \
2504         TCGv_ptr ra, rb, rc, rd;                                        \
2505         if (unlikely(!ctx->altivec_enabled)) {                          \
2506             gen_exception(ctx, POWERPC_EXCP_VPU);                       \
2507             return;                                                     \
2508         }                                                               \
2509         ra = gen_avr_ptr(rA(ctx->opcode));                              \
2510         rb = gen_avr_ptr(rB(ctx->opcode));                              \
2511         rc = gen_avr_ptr(rC(ctx->opcode));                              \
2512         rd = gen_avr_ptr(rD(ctx->opcode));                              \
2513         if (Rc(ctx->opcode)) {                                          \
2514             gen_helper_##name1(cpu_env, rd, ra, rb, rc);                \
2515         } else {                                                        \
2516             gen_helper_##name0(cpu_env, rd, ra, rb, rc);                \
2517         }                                                               \
2518         tcg_temp_free_ptr(ra);                                          \
2519         tcg_temp_free_ptr(rb);                                          \
2520         tcg_temp_free_ptr(rc);                                          \
2521         tcg_temp_free_ptr(rd);                                          \
2522     }
2524 GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16)
2526 static void gen_vmladduhm(DisasContext *ctx)
2528     TCGv_ptr ra, rb, rc, rd;
2529     if (unlikely(!ctx->altivec_enabled)) {
2530         gen_exception(ctx, POWERPC_EXCP_VPU);
2531         return;
2532     }
2533     ra = gen_avr_ptr(rA(ctx->opcode));
2534     rb = gen_avr_ptr(rB(ctx->opcode));
2535     rc = gen_avr_ptr(rC(ctx->opcode));
2536     rd = gen_avr_ptr(rD(ctx->opcode));
2537     gen_helper_vmladduhm(rd, ra, rb, rc);
2538     tcg_temp_free_ptr(ra);
2539     tcg_temp_free_ptr(rb);
2540     tcg_temp_free_ptr(rc);
2541     tcg_temp_free_ptr(rd);
2544 static bool do_va_helper(DisasContext *ctx, arg_VA *a,
2545     void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2547     TCGv_ptr vrt, vra, vrb, vrc;
2548     REQUIRE_VECTOR(ctx);
2550     vrt = gen_avr_ptr(a->vrt);
2551     vra = gen_avr_ptr(a->vra);
2552     vrb = gen_avr_ptr(a->vrb);
2553     vrc = gen_avr_ptr(a->rc);
2554     gen_helper(vrt, vra, vrb, vrc);
2555     tcg_temp_free_ptr(vrt);
2556     tcg_temp_free_ptr(vra);
2557     tcg_temp_free_ptr(vrb);
2558     tcg_temp_free_ptr(vrc);
2560     return true;
2563 TRANS_FLAGS2(ALTIVEC_207, VADDECUQ, do_va_helper, gen_helper_VADDECUQ)
2564 TRANS_FLAGS2(ALTIVEC_207, VADDEUQM, do_va_helper, gen_helper_VADDEUQM)
2566 TRANS_FLAGS2(ALTIVEC_207, VSUBEUQM, do_va_helper, gen_helper_VSUBEUQM)
2567 TRANS_FLAGS2(ALTIVEC_207, VSUBECUQ, do_va_helper, gen_helper_VSUBECUQ)
2569 TRANS_FLAGS(ALTIVEC, VPERM, do_va_helper, gen_helper_VPERM)
2570 TRANS_FLAGS2(ISA300, VPERMR, do_va_helper, gen_helper_VPERMR)
2572 static bool trans_VSEL(DisasContext *ctx, arg_VA *a)
2574     REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
2575     REQUIRE_VECTOR(ctx);
2577     tcg_gen_gvec_bitsel(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->rc),
2578                         avr_full_offset(a->vrb), avr_full_offset(a->vra),
2579                         16, 16);
2581     return true;
2584 TRANS_FLAGS(ALTIVEC, VMSUMUBM, do_va_helper, gen_helper_VMSUMUBM)
2585 TRANS_FLAGS(ALTIVEC, VMSUMMBM, do_va_helper, gen_helper_VMSUMMBM)
2586 TRANS_FLAGS(ALTIVEC, VMSUMSHM, do_va_helper, gen_helper_VMSUMSHM)
2587 TRANS_FLAGS(ALTIVEC, VMSUMUHM, do_va_helper, gen_helper_VMSUMUHM)
2589 static bool do_va_env_helper(DisasContext *ctx, arg_VA *a,
2590     void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
2592     TCGv_ptr vrt, vra, vrb, vrc;
2593     REQUIRE_VECTOR(ctx);
2595     vrt = gen_avr_ptr(a->vrt);
2596     vra = gen_avr_ptr(a->vra);
2597     vrb = gen_avr_ptr(a->vrb);
2598     vrc = gen_avr_ptr(a->rc);
2599     gen_helper(cpu_env, vrt, vra, vrb, vrc);
2600     tcg_temp_free_ptr(vrt);
2601     tcg_temp_free_ptr(vra);
2602     tcg_temp_free_ptr(vrb);
2603     tcg_temp_free_ptr(vrc);
2605     return true;
2608 TRANS_FLAGS(ALTIVEC, VMSUMUHS, do_va_env_helper, gen_helper_VMSUMUHS)
2609 TRANS_FLAGS(ALTIVEC, VMSUMSHS, do_va_env_helper, gen_helper_VMSUMSHS)
2611 GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
2613 GEN_VXFORM_NOA(vclzb, 1, 28)
2614 GEN_VXFORM_NOA(vclzh, 1, 29)
2615 GEN_VXFORM_TRANS(vclzw, 1, 30)
2616 GEN_VXFORM_TRANS(vclzd, 1, 31)
2617 GEN_VXFORM_NOA_2(vnegw, 1, 24, 6)
2618 GEN_VXFORM_NOA_2(vnegd, 1, 24, 7)
2620 static void gen_vexts_i64(TCGv_i64 t, TCGv_i64 b, int64_t s)
2622     tcg_gen_sextract_i64(t, b, 0, 64 - s);
2625 static void gen_vexts_i32(TCGv_i32 t, TCGv_i32 b, int32_t s)
2627     tcg_gen_sextract_i32(t, b, 0, 32 - s);
2630 static void gen_vexts_vec(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t s)
2632     tcg_gen_shli_vec(vece, t, b, s);
2633     tcg_gen_sari_vec(vece, t, t, s);
2636 static bool do_vexts(DisasContext *ctx, arg_VX_tb *a, unsigned vece, int64_t s)
2638     static const TCGOpcode vecop_list[] = {
2639         INDEX_op_shli_vec, INDEX_op_sari_vec, 0
2640     };
2642     static const GVecGen2i op[2] = {
2643         {
2644             .fni4 = gen_vexts_i32,
2645             .fniv = gen_vexts_vec,
2646             .opt_opc = vecop_list,
2647             .vece = MO_32
2648         },
2649         {
2650             .fni8 = gen_vexts_i64,
2651             .fniv = gen_vexts_vec,
2652             .opt_opc = vecop_list,
2653             .vece = MO_64
2654         },
2655     };
2657     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2658     REQUIRE_VECTOR(ctx);
2660     tcg_gen_gvec_2i(avr_full_offset(a->vrt), avr_full_offset(a->vrb),
2661                     16, 16, s, &op[vece - MO_32]);
2663     return true;
2666 TRANS(VEXTSB2W, do_vexts, MO_32, 24);
2667 TRANS(VEXTSH2W, do_vexts, MO_32, 16);
2668 TRANS(VEXTSB2D, do_vexts, MO_64, 56);
2669 TRANS(VEXTSH2D, do_vexts, MO_64, 48);
2670 TRANS(VEXTSW2D, do_vexts, MO_64, 32);
2672 static bool trans_VEXTSD2Q(DisasContext *ctx, arg_VX_tb *a)
2674     TCGv_i64 tmp;
2676     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2677     REQUIRE_VECTOR(ctx);
2679     tmp = tcg_temp_new_i64();
2681     get_avr64(tmp, a->vrb, false);
2682     set_avr64(a->vrt, tmp, false);
2683     tcg_gen_sari_i64(tmp, tmp, 63);
2684     set_avr64(a->vrt, tmp, true);
2686     tcg_temp_free_i64(tmp);
2687     return true;
2690 GEN_VXFORM_NOA_2(vctzb, 1, 24, 28)
2691 GEN_VXFORM_NOA_2(vctzh, 1, 24, 29)
2692 GEN_VXFORM_NOA_2(vctzw, 1, 24, 30)
2693 GEN_VXFORM_NOA_2(vctzd, 1, 24, 31)
2694 GEN_VXFORM_NOA_3(vclzlsbb, 1, 24, 0)
2695 GEN_VXFORM_NOA_3(vctzlsbb, 1, 24, 1)
2696 GEN_VXFORM_NOA(vpopcntb, 1, 28)
2697 GEN_VXFORM_NOA(vpopcnth, 1, 29)
2698 GEN_VXFORM_NOA(vpopcntw, 1, 30)
2699 GEN_VXFORM_NOA(vpopcntd, 1, 31)
2700 GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \
2701                 vpopcntb, PPC_NONE, PPC2_ALTIVEC_207)
2702 GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \
2703                 vpopcnth, PPC_NONE, PPC2_ALTIVEC_207)
2704 GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \
2705                 vpopcntw, PPC_NONE, PPC2_ALTIVEC_207)
2706 GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \
2707                 vpopcntd, PPC_NONE, PPC2_ALTIVEC_207)
2708 GEN_VXFORM(vbpermd, 6, 23);
2709 GEN_VXFORM(vbpermq, 6, 21);
2710 GEN_VXFORM_TRANS(vgbbd, 6, 20);
2711 GEN_VXFORM(vpmsumb, 4, 16)
2712 GEN_VXFORM(vpmsumh, 4, 17)
2713 GEN_VXFORM(vpmsumw, 4, 18)
2715 #define GEN_BCD(op)                                 \
2716 static void gen_##op(DisasContext *ctx)             \
2717 {                                                   \
2718     TCGv_ptr ra, rb, rd;                            \
2719     TCGv_i32 ps;                                    \
2720                                                     \
2721     if (unlikely(!ctx->altivec_enabled)) {          \
2722         gen_exception(ctx, POWERPC_EXCP_VPU);       \
2723         return;                                     \
2724     }                                               \
2725                                                     \
2726     ra = gen_avr_ptr(rA(ctx->opcode));              \
2727     rb = gen_avr_ptr(rB(ctx->opcode));              \
2728     rd = gen_avr_ptr(rD(ctx->opcode));              \
2729                                                     \
2730     ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
2731                                                     \
2732     gen_helper_##op(cpu_crf[6], rd, ra, rb, ps);    \
2733                                                     \
2734     tcg_temp_free_ptr(ra);                          \
2735     tcg_temp_free_ptr(rb);                          \
2736     tcg_temp_free_ptr(rd);                          \
2737     tcg_temp_free_i32(ps);                          \
2740 #define GEN_BCD2(op)                                \
2741 static void gen_##op(DisasContext *ctx)             \
2742 {                                                   \
2743     TCGv_ptr rd, rb;                                \
2744     TCGv_i32 ps;                                    \
2745                                                     \
2746     if (unlikely(!ctx->altivec_enabled)) {          \
2747         gen_exception(ctx, POWERPC_EXCP_VPU);       \
2748         return;                                     \
2749     }                                               \
2750                                                     \
2751     rb = gen_avr_ptr(rB(ctx->opcode));              \
2752     rd = gen_avr_ptr(rD(ctx->opcode));              \
2753                                                     \
2754     ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
2755                                                     \
2756     gen_helper_##op(cpu_crf[6], rd, rb, ps);        \
2757                                                     \
2758     tcg_temp_free_ptr(rb);                          \
2759     tcg_temp_free_ptr(rd);                          \
2760     tcg_temp_free_i32(ps);                          \
2763 GEN_BCD(bcdadd)
2764 GEN_BCD(bcdsub)
2765 GEN_BCD2(bcdcfn)
2766 GEN_BCD2(bcdctn)
2767 GEN_BCD2(bcdcfz)
2768 GEN_BCD2(bcdctz)
2769 GEN_BCD2(bcdcfsq)
2770 GEN_BCD2(bcdctsq)
2771 GEN_BCD2(bcdsetsgn)
2772 GEN_BCD(bcdcpsgn);
2773 GEN_BCD(bcds);
2774 GEN_BCD(bcdus);
2775 GEN_BCD(bcdsr);
2776 GEN_BCD(bcdtrunc);
2777 GEN_BCD(bcdutrunc);
2779 static void gen_xpnd04_1(DisasContext *ctx)
2781     switch (opc4(ctx->opcode)) {
2782     case 0:
2783         gen_bcdctsq(ctx);
2784         break;
2785     case 2:
2786         gen_bcdcfsq(ctx);
2787         break;
2788     case 4:
2789         gen_bcdctz(ctx);
2790         break;
2791     case 5:
2792         gen_bcdctn(ctx);
2793         break;
2794     case 6:
2795         gen_bcdcfz(ctx);
2796         break;
2797     case 7:
2798         gen_bcdcfn(ctx);
2799         break;
2800     case 31:
2801         gen_bcdsetsgn(ctx);
2802         break;
2803     default:
2804         gen_invalid(ctx);
2805         break;
2806     }
2809 static void gen_xpnd04_2(DisasContext *ctx)
2811     switch (opc4(ctx->opcode)) {
2812     case 0:
2813         gen_bcdctsq(ctx);
2814         break;
2815     case 2:
2816         gen_bcdcfsq(ctx);
2817         break;
2818     case 4:
2819         gen_bcdctz(ctx);
2820         break;
2821     case 6:
2822         gen_bcdcfz(ctx);
2823         break;
2824     case 7:
2825         gen_bcdcfn(ctx);
2826         break;
2827     case 31:
2828         gen_bcdsetsgn(ctx);
2829         break;
2830     default:
2831         gen_invalid(ctx);
2832         break;
2833     }
2837 GEN_VXFORM_DUAL(vsubcuw, PPC_ALTIVEC, PPC_NONE, \
2838                 xpnd04_1, PPC_NONE, PPC2_ISA300)
2839 GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \
2840                 xpnd04_2, PPC_NONE, PPC2_ISA300)
2842 GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \
2843                 bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
2844 GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \
2845                 bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
2846 GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \
2847                 bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
2848 GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \
2849                 bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
2850 GEN_VXFORM_DUAL(vaddshs, PPC_ALTIVEC, PPC_NONE, \
2851                 bcdcpsgn, PPC_NONE, PPC2_ISA300)
2852 GEN_VXFORM_DUAL(vsubudm, PPC2_ALTIVEC_207, PPC_NONE, \
2853                 bcds, PPC_NONE, PPC2_ISA300)
2854 GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \
2855                 bcdus, PPC_NONE, PPC2_ISA300)
2856 GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \
2857                 bcdtrunc, PPC_NONE, PPC2_ISA300)
2859 static void gen_vsbox(DisasContext *ctx)
2861     TCGv_ptr ra, rd;
2862     if (unlikely(!ctx->altivec_enabled)) {
2863         gen_exception(ctx, POWERPC_EXCP_VPU);
2864         return;
2865     }
2866     ra = gen_avr_ptr(rA(ctx->opcode));
2867     rd = gen_avr_ptr(rD(ctx->opcode));
2868     gen_helper_vsbox(rd, ra);
2869     tcg_temp_free_ptr(ra);
2870     tcg_temp_free_ptr(rd);
2873 GEN_VXFORM(vcipher, 4, 20)
2874 GEN_VXFORM(vcipherlast, 4, 20)
2875 GEN_VXFORM(vncipher, 4, 21)
2876 GEN_VXFORM(vncipherlast, 4, 21)
2878 GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207,
2879                 vcipherlast, PPC_NONE, PPC2_ALTIVEC_207)
2880 GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207,
2881                 vncipherlast, PPC_NONE, PPC2_ALTIVEC_207)
2883 #define VSHASIGMA(op)                         \
2884 static void gen_##op(DisasContext *ctx)       \
2885 {                                             \
2886     TCGv_ptr ra, rd;                          \
2887     TCGv_i32 st_six;                          \
2888     if (unlikely(!ctx->altivec_enabled)) {    \
2889         gen_exception(ctx, POWERPC_EXCP_VPU); \
2890         return;                               \
2891     }                                         \
2892     ra = gen_avr_ptr(rA(ctx->opcode));        \
2893     rd = gen_avr_ptr(rD(ctx->opcode));        \
2894     st_six = tcg_const_i32(rB(ctx->opcode));  \
2895     gen_helper_##op(rd, ra, st_six);          \
2896     tcg_temp_free_ptr(ra);                    \
2897     tcg_temp_free_ptr(rd);                    \
2898     tcg_temp_free_i32(st_six);                \
2901 VSHASIGMA(vshasigmaw)
2902 VSHASIGMA(vshasigmad)
2904 GEN_VXFORM3(vpermxor, 22, 0xFF)
2905 GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
2906                 vpermxor, PPC_NONE, PPC2_ALTIVEC_207)
2908 static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a)
2910     static const GVecGen3 g = {
2911         .fni8 = gen_helper_CFUGED,
2912         .vece = MO_64,
2913     };
2915     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2916     REQUIRE_VECTOR(ctx);
2918     tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2919                    avr_full_offset(a->vrb), 16, 16, &g);
2921     return true;
2924 static bool trans_VCLZDM(DisasContext *ctx, arg_VX *a)
2926     static const GVecGen3i g = {
2927         .fni8 = do_cntzdm,
2928         .vece = MO_64,
2929     };
2931     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2932     REQUIRE_VECTOR(ctx);
2934     tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2935                     avr_full_offset(a->vrb), 16, 16, false, &g);
2937     return true;
2940 static bool trans_VCTZDM(DisasContext *ctx, arg_VX *a)
2942     static const GVecGen3i g = {
2943         .fni8 = do_cntzdm,
2944         .vece = MO_64,
2945     };
2947     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2948     REQUIRE_VECTOR(ctx);
2950     tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2951                     avr_full_offset(a->vrb), 16, 16, true, &g);
2953     return true;
2956 static bool trans_VPDEPD(DisasContext *ctx, arg_VX *a)
2958     static const GVecGen3 g = {
2959         .fni8 = gen_helper_PDEPD,
2960         .vece = MO_64,
2961     };
2963     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2964     REQUIRE_VECTOR(ctx);
2966     tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2967                    avr_full_offset(a->vrb), 16, 16, &g);
2969     return true;
2972 static bool trans_VPEXTD(DisasContext *ctx, arg_VX *a)
2974     static const GVecGen3 g = {
2975         .fni8 = gen_helper_PEXTD,
2976         .vece = MO_64,
2977     };
2979     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
2980     REQUIRE_VECTOR(ctx);
2982     tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
2983                    avr_full_offset(a->vrb), 16, 16, &g);
2985     return true;
2988 static bool trans_VMSUMUDM(DisasContext *ctx, arg_VA *a)
2990     TCGv_i64 rl, rh, src1, src2;
2991     int dw;
2993     REQUIRE_INSNS_FLAGS2(ctx, ISA300);
2994     REQUIRE_VECTOR(ctx);
2996     rh = tcg_temp_new_i64();
2997     rl = tcg_temp_new_i64();
2998     src1 = tcg_temp_new_i64();
2999     src2 = tcg_temp_new_i64();
3001     get_avr64(rl, a->rc, false);
3002     get_avr64(rh, a->rc, true);
3004     for (dw = 0; dw < 2; dw++) {
3005         get_avr64(src1, a->vra, dw);
3006         get_avr64(src2, a->vrb, dw);
3007         tcg_gen_mulu2_i64(src1, src2, src1, src2);
3008         tcg_gen_add2_i64(rl, rh, rl, rh, src1, src2);
3009     }
3011     set_avr64(a->vrt, rl, false);
3012     set_avr64(a->vrt, rh, true);
3014     tcg_temp_free_i64(rl);
3015     tcg_temp_free_i64(rh);
3016     tcg_temp_free_i64(src1);
3017     tcg_temp_free_i64(src2);
3019     return true;
3022 static bool trans_VMSUMCUD(DisasContext *ctx, arg_VA *a)
3024     TCGv_i64 tmp0, tmp1, prod1h, prod1l, prod0h, prod0l, zero;
3026     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3027     REQUIRE_VECTOR(ctx);
3029     tmp0 = tcg_temp_new_i64();
3030     tmp1 = tcg_temp_new_i64();
3031     prod1h = tcg_temp_new_i64();
3032     prod1l = tcg_temp_new_i64();
3033     prod0h = tcg_temp_new_i64();
3034     prod0l = tcg_temp_new_i64();
3035     zero = tcg_constant_i64(0);
3037     /* prod1 = vsr[vra+32].dw[1] * vsr[vrb+32].dw[1] */
3038     get_avr64(tmp0, a->vra, false);
3039     get_avr64(tmp1, a->vrb, false);
3040     tcg_gen_mulu2_i64(prod1l, prod1h, tmp0, tmp1);
3042     /* prod0 = vsr[vra+32].dw[0] * vsr[vrb+32].dw[0] */
3043     get_avr64(tmp0, a->vra, true);
3044     get_avr64(tmp1, a->vrb, true);
3045     tcg_gen_mulu2_i64(prod0l, prod0h, tmp0, tmp1);
3047     /* Sum lower 64-bits elements */
3048     get_avr64(tmp1, a->rc, false);
3049     tcg_gen_add2_i64(tmp1, tmp0, tmp1, zero, prod1l, zero);
3050     tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0l, zero);
3052     /*
3053      * Discard lower 64-bits, leaving the carry into bit 64.
3054      * Then sum the higher 64-bit elements.
3055      */
3056     get_avr64(tmp1, a->rc, true);
3057     tcg_gen_add2_i64(tmp1, tmp0, tmp0, zero, tmp1, zero);
3058     tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod1h, zero);
3059     tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0h, zero);
3061     /* Discard 64 more bits to complete the CHOP128(temp >> 128) */
3062     set_avr64(a->vrt, tmp0, false);
3063     set_avr64(a->vrt, zero, true);
3065     tcg_temp_free_i64(tmp0);
3066     tcg_temp_free_i64(tmp1);
3067     tcg_temp_free_i64(prod1h);
3068     tcg_temp_free_i64(prod1l);
3069     tcg_temp_free_i64(prod0h);
3070     tcg_temp_free_i64(prod0l);
3072     return true;
3075 static bool do_vx_helper(DisasContext *ctx, arg_VX *a,
3076                          void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr))
3078     TCGv_ptr ra, rb, rd;
3079     REQUIRE_VECTOR(ctx);
3081     ra = gen_avr_ptr(a->vra);
3082     rb = gen_avr_ptr(a->vrb);
3083     rd = gen_avr_ptr(a->vrt);
3084     gen_helper(rd, ra, rb);
3085     tcg_temp_free_ptr(ra);
3086     tcg_temp_free_ptr(rb);
3087     tcg_temp_free_ptr(rd);
3089     return true;
3092 TRANS_FLAGS2(ALTIVEC_207, VADDCUQ, do_vx_helper, gen_helper_VADDCUQ)
3093 TRANS_FLAGS2(ALTIVEC_207, VADDUQM, do_vx_helper, gen_helper_VADDUQM)
3095 TRANS_FLAGS2(ALTIVEC_207, VPMSUMD, do_vx_helper, gen_helper_VPMSUMD)
3097 TRANS_FLAGS2(ALTIVEC_207, VSUBCUQ, do_vx_helper, gen_helper_VSUBCUQ)
3098 TRANS_FLAGS2(ALTIVEC_207, VSUBUQM, do_vx_helper, gen_helper_VSUBUQM)
3100 static bool do_vx_vmuleo(DisasContext *ctx, arg_VX *a, bool even,
3101                          void (*gen_mul)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
3103     TCGv_i64 vra, vrb, vrt0, vrt1;
3104     REQUIRE_VECTOR(ctx);
3106     vra = tcg_temp_new_i64();
3107     vrb = tcg_temp_new_i64();
3108     vrt0 = tcg_temp_new_i64();
3109     vrt1 = tcg_temp_new_i64();
3111     get_avr64(vra, a->vra, even);
3112     get_avr64(vrb, a->vrb, even);
3113     gen_mul(vrt0, vrt1, vra, vrb);
3114     set_avr64(a->vrt, vrt0, false);
3115     set_avr64(a->vrt, vrt1, true);
3117     tcg_temp_free_i64(vra);
3118     tcg_temp_free_i64(vrb);
3119     tcg_temp_free_i64(vrt0);
3120     tcg_temp_free_i64(vrt1);
3122     return true;
3125 static bool trans_VMULLD(DisasContext *ctx, arg_VX *a)
3127     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3128     REQUIRE_VECTOR(ctx);
3130     tcg_gen_gvec_mul(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->vra),
3131                      avr_full_offset(a->vrb), 16, 16);
3133     return true;
3136 TRANS_FLAGS(ALTIVEC, VMULESB, do_vx_helper, gen_helper_VMULESB)
3137 TRANS_FLAGS(ALTIVEC, VMULOSB, do_vx_helper, gen_helper_VMULOSB)
3138 TRANS_FLAGS(ALTIVEC, VMULEUB, do_vx_helper, gen_helper_VMULEUB)
3139 TRANS_FLAGS(ALTIVEC, VMULOUB, do_vx_helper, gen_helper_VMULOUB)
3140 TRANS_FLAGS(ALTIVEC, VMULESH, do_vx_helper, gen_helper_VMULESH)
3141 TRANS_FLAGS(ALTIVEC, VMULOSH, do_vx_helper, gen_helper_VMULOSH)
3142 TRANS_FLAGS(ALTIVEC, VMULEUH, do_vx_helper, gen_helper_VMULEUH)
3143 TRANS_FLAGS(ALTIVEC, VMULOUH, do_vx_helper, gen_helper_VMULOUH)
3144 TRANS_FLAGS2(ALTIVEC_207, VMULESW, do_vx_helper, gen_helper_VMULESW)
3145 TRANS_FLAGS2(ALTIVEC_207, VMULOSW, do_vx_helper, gen_helper_VMULOSW)
3146 TRANS_FLAGS2(ALTIVEC_207, VMULEUW, do_vx_helper, gen_helper_VMULEUW)
3147 TRANS_FLAGS2(ALTIVEC_207, VMULOUW, do_vx_helper, gen_helper_VMULOUW)
3148 TRANS_FLAGS2(ISA310, VMULESD, do_vx_vmuleo, true , tcg_gen_muls2_i64)
3149 TRANS_FLAGS2(ISA310, VMULOSD, do_vx_vmuleo, false, tcg_gen_muls2_i64)
3150 TRANS_FLAGS2(ISA310, VMULEUD, do_vx_vmuleo, true , tcg_gen_mulu2_i64)
3151 TRANS_FLAGS2(ISA310, VMULOUD, do_vx_vmuleo, false, tcg_gen_mulu2_i64)
3153 static void do_vx_vmulhw_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
3155     TCGv_i64 hh, lh, temp;
3157     hh = tcg_temp_new_i64();
3158     lh = tcg_temp_new_i64();
3159     temp = tcg_temp_new_i64();
3161     if (sign) {
3162         tcg_gen_ext32s_i64(lh, a);
3163         tcg_gen_ext32s_i64(temp, b);
3164     } else {
3165         tcg_gen_ext32u_i64(lh, a);
3166         tcg_gen_ext32u_i64(temp, b);
3167     }
3168     tcg_gen_mul_i64(lh, lh, temp);
3170     if (sign) {
3171         tcg_gen_sari_i64(hh, a, 32);
3172         tcg_gen_sari_i64(temp, b, 32);
3173     } else {
3174         tcg_gen_shri_i64(hh, a, 32);
3175         tcg_gen_shri_i64(temp, b, 32);
3176     }
3177     tcg_gen_mul_i64(hh, hh, temp);
3179     tcg_gen_shri_i64(lh, lh, 32);
3180     tcg_gen_deposit_i64(t, hh, lh, 0, 32);
3182     tcg_temp_free_i64(hh);
3183     tcg_temp_free_i64(lh);
3184     tcg_temp_free_i64(temp);
3187 static void do_vx_vmulhd_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
3189     TCGv_i64 tlow;
3191     tlow  = tcg_temp_new_i64();
3192     if (sign) {
3193         tcg_gen_muls2_i64(tlow, t, a, b);
3194     } else {
3195         tcg_gen_mulu2_i64(tlow, t, a, b);
3196     }
3198     tcg_temp_free_i64(tlow);
3201 static bool do_vx_mulh(DisasContext *ctx, arg_VX *a, bool sign,
3202                        void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, bool))
3204     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
3205     REQUIRE_VECTOR(ctx);
3207     TCGv_i64 vra, vrb, vrt;
3208     int i;
3210     vra = tcg_temp_new_i64();
3211     vrb = tcg_temp_new_i64();
3212     vrt = tcg_temp_new_i64();
3214     for (i = 0; i < 2; i++) {
3215         get_avr64(vra, a->vra, i);
3216         get_avr64(vrb, a->vrb, i);
3217         get_avr64(vrt, a->vrt, i);
3219         func(vrt, vra, vrb, sign);
3221         set_avr64(a->vrt, vrt, i);
3222     }
3224     tcg_temp_free_i64(vra);
3225     tcg_temp_free_i64(vrb);
3226     tcg_temp_free_i64(vrt);
3228     return true;
3232 TRANS(VMULHSW, do_vx_mulh, true , do_vx_vmulhw_i64)
3233 TRANS(VMULHSD, do_vx_mulh, true , do_vx_vmulhd_i64)
3234 TRANS(VMULHUW, do_vx_mulh, false, do_vx_vmulhw_i64)
3235 TRANS(VMULHUD, do_vx_mulh, false, do_vx_vmulhd_i64)
3237 static bool do_vdiv_vmod(DisasContext *ctx, arg_VX *a, const int vece,
3238                          void (*func_32)(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b),
3239                          void (*func_64)(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b))
3241     const GVecGen3 op = {
3242         .fni4 = func_32,
3243         .fni8 = func_64,
3244         .vece = vece
3245     };
3247     REQUIRE_VECTOR(ctx);
3249     tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
3250                    avr_full_offset(a->vrb), 16, 16, &op);
3252     return true;
3255 #define DIVU32(NAME, DIV)                                               \
3256 static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)                    \
3257 {                                                                       \
3258     TCGv_i32 zero = tcg_constant_i32(0);                                \
3259     TCGv_i32 one = tcg_constant_i32(1);                                 \
3260     tcg_gen_movcond_i32(TCG_COND_EQ, b, b, zero, one, b);               \
3261     DIV(t, a, b);                                                       \
3264 #define DIVS32(NAME, DIV)                                               \
3265 static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)                    \
3266 {                                                                       \
3267     TCGv_i32 t0 = tcg_temp_new_i32();                                   \
3268     TCGv_i32 t1 = tcg_temp_new_i32();                                   \
3269     tcg_gen_setcondi_i32(TCG_COND_EQ, t0, a, INT32_MIN);                \
3270     tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, -1);                       \
3271     tcg_gen_and_i32(t0, t0, t1);                                        \
3272     tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, 0);                        \
3273     tcg_gen_or_i32(t0, t0, t1);                                         \
3274     tcg_gen_movi_i32(t1, 0);                                            \
3275     tcg_gen_movcond_i32(TCG_COND_NE, b, t0, t1, t0, b);                 \
3276     DIV(t, a, b);                                                       \
3277     tcg_temp_free_i32(t0);                                              \
3278     tcg_temp_free_i32(t1);                                              \
3281 #define DIVU64(NAME, DIV)                                               \
3282 static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)                    \
3283 {                                                                       \
3284     TCGv_i64 zero = tcg_constant_i64(0);                                \
3285     TCGv_i64 one = tcg_constant_i64(1);                                 \
3286     tcg_gen_movcond_i64(TCG_COND_EQ, b, b, zero, one, b);               \
3287     DIV(t, a, b);                                                       \
3290 #define DIVS64(NAME, DIV)                                               \
3291 static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)                    \
3292 {                                                                       \
3293     TCGv_i64 t0 = tcg_temp_new_i64();                                   \
3294     TCGv_i64 t1 = tcg_temp_new_i64();                                   \
3295     tcg_gen_setcondi_i64(TCG_COND_EQ, t0, a, INT64_MIN);                \
3296     tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, -1);                       \
3297     tcg_gen_and_i64(t0, t0, t1);                                        \
3298     tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, 0);                        \
3299     tcg_gen_or_i64(t0, t0, t1);                                         \
3300     tcg_gen_movi_i64(t1, 0);                                            \
3301     tcg_gen_movcond_i64(TCG_COND_NE, b, t0, t1, t0, b);                 \
3302     DIV(t, a, b);                                                       \
3303     tcg_temp_free_i64(t0);                                              \
3304     tcg_temp_free_i64(t1);                                              \
3307 DIVS32(do_divsw, tcg_gen_div_i32)
3308 DIVU32(do_divuw, tcg_gen_divu_i32)
3309 DIVS64(do_divsd, tcg_gen_div_i64)
3310 DIVU64(do_divud, tcg_gen_divu_i64)
3312 TRANS_FLAGS2(ISA310, VDIVSW, do_vdiv_vmod, MO_32, do_divsw, NULL)
3313 TRANS_FLAGS2(ISA310, VDIVUW, do_vdiv_vmod, MO_32, do_divuw, NULL)
3314 TRANS_FLAGS2(ISA310, VDIVSD, do_vdiv_vmod, MO_64, NULL, do_divsd)
3315 TRANS_FLAGS2(ISA310, VDIVUD, do_vdiv_vmod, MO_64, NULL, do_divud)
3316 TRANS_FLAGS2(ISA310, VDIVSQ, do_vx_helper, gen_helper_VDIVSQ)
3317 TRANS_FLAGS2(ISA310, VDIVUQ, do_vx_helper, gen_helper_VDIVUQ)
3319 static void do_dives_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3321     TCGv_i64 val1, val2;
3323     val1 = tcg_temp_new_i64();
3324     val2 = tcg_temp_new_i64();
3326     tcg_gen_ext_i32_i64(val1, a);
3327     tcg_gen_ext_i32_i64(val2, b);
3329     /* (a << 32)/b */
3330     tcg_gen_shli_i64(val1, val1, 32);
3331     tcg_gen_div_i64(val1, val1, val2);
3333     /* if quotient doesn't fit in 32 bits the result is undefined */
3334     tcg_gen_extrl_i64_i32(t, val1);
3336     tcg_temp_free_i64(val1);
3337     tcg_temp_free_i64(val2);
3340 static void do_diveu_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
3342     TCGv_i64 val1, val2;
3344     val1 = tcg_temp_new_i64();
3345     val2 = tcg_temp_new_i64();
3347     tcg_gen_extu_i32_i64(val1, a);
3348     tcg_gen_extu_i32_i64(val2, b);
3350     /* (a << 32)/b */
3351     tcg_gen_shli_i64(val1, val1, 32);
3352     tcg_gen_divu_i64(val1, val1, val2);
3354     /* if quotient doesn't fit in 32 bits the result is undefined */
3355     tcg_gen_extrl_i64_i32(t, val1);
3357     tcg_temp_free_i64(val1);
3358     tcg_temp_free_i64(val2);
3361 DIVS32(do_divesw, do_dives_i32)
3362 DIVU32(do_diveuw, do_diveu_i32)
3364 DIVS32(do_modsw, tcg_gen_rem_i32)
3365 DIVU32(do_moduw, tcg_gen_remu_i32)
3366 DIVS64(do_modsd, tcg_gen_rem_i64)
3367 DIVU64(do_modud, tcg_gen_remu_i64)
3369 TRANS_FLAGS2(ISA310, VDIVESW, do_vdiv_vmod, MO_32, do_divesw, NULL)
3370 TRANS_FLAGS2(ISA310, VDIVEUW, do_vdiv_vmod, MO_32, do_diveuw, NULL)
3371 TRANS_FLAGS2(ISA310, VDIVESD, do_vx_helper, gen_helper_VDIVESD)
3372 TRANS_FLAGS2(ISA310, VDIVEUD, do_vx_helper, gen_helper_VDIVEUD)
3373 TRANS_FLAGS2(ISA310, VDIVESQ, do_vx_helper, gen_helper_VDIVESQ)
3374 TRANS_FLAGS2(ISA310, VDIVEUQ, do_vx_helper, gen_helper_VDIVEUQ)
3376 TRANS_FLAGS2(ISA310, VMODSW, do_vdiv_vmod, MO_32, do_modsw , NULL)
3377 TRANS_FLAGS2(ISA310, VMODUW, do_vdiv_vmod, MO_32, do_moduw, NULL)
3378 TRANS_FLAGS2(ISA310, VMODSD, do_vdiv_vmod, MO_64, NULL, do_modsd)
3379 TRANS_FLAGS2(ISA310, VMODUD, do_vdiv_vmod, MO_64, NULL, do_modud)
3380 TRANS_FLAGS2(ISA310, VMODSQ, do_vx_helper, gen_helper_VMODSQ)
3381 TRANS_FLAGS2(ISA310, VMODUQ, do_vx_helper, gen_helper_VMODUQ)
3383 #undef DIVS32
3384 #undef DIVU32
3385 #undef DIVS64
3386 #undef DIVU64
3388 #undef GEN_VR_LDX
3389 #undef GEN_VR_STX
3390 #undef GEN_VR_LVE
3391 #undef GEN_VR_STVE
3393 #undef GEN_VX_LOGICAL
3394 #undef GEN_VX_LOGICAL_207
3395 #undef GEN_VXFORM
3396 #undef GEN_VXFORM_207
3397 #undef GEN_VXFORM_DUAL
3398 #undef GEN_VXRFORM_DUAL
3399 #undef GEN_VXRFORM1
3400 #undef GEN_VXRFORM
3401 #undef GEN_VXFORM_VSPLTI
3402 #undef GEN_VXFORM_NOA
3403 #undef GEN_VXFORM_UIMM
3404 #undef GEN_VAFORM_PAIRED
3406 #undef GEN_BCD2