virtio-blk: cleanup: QOM cast
[qemu/cris-port.git] / target-ppc / int_helper.c
blob54eca9bbee10c8dd2f6383f30afc4b9ebaea4904
1 /*
2 * PowerPC integer and vector emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "cpu.h"
20 #include "qemu/host-utils.h"
21 #include "helper.h"
23 #include "helper_regs.h"
24 /*****************************************************************************/
25 /* Fixed point operations helpers */
26 #if defined(TARGET_PPC64)
28 uint64_t helper_mulldo(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
30 int64_t th;
31 uint64_t tl;
33 muls64(&tl, (uint64_t *)&th, arg1, arg2);
34 /* If th != 0 && th != -1, then we had an overflow */
35 if (likely((uint64_t)(th + 1) <= 1)) {
36 env->ov = 0;
37 } else {
38 env->so = env->ov = 1;
40 return (int64_t)tl;
42 #endif
44 target_ulong helper_cntlzw(target_ulong t)
46 return clz32(t);
49 #if defined(TARGET_PPC64)
50 target_ulong helper_cntlzd(target_ulong t)
52 return clz64(t);
54 #endif
56 /* shift right arithmetic helper */
57 target_ulong helper_sraw(CPUPPCState *env, target_ulong value,
58 target_ulong shift)
60 int32_t ret;
62 if (likely(!(shift & 0x20))) {
63 if (likely((uint32_t)shift != 0)) {
64 shift &= 0x1f;
65 ret = (int32_t)value >> shift;
66 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
67 env->ca = 0;
68 } else {
69 env->ca = 1;
71 } else {
72 ret = (int32_t)value;
73 env->ca = 0;
75 } else {
76 ret = (int32_t)value >> 31;
77 env->ca = (ret != 0);
79 return (target_long)ret;
82 #if defined(TARGET_PPC64)
83 target_ulong helper_srad(CPUPPCState *env, target_ulong value,
84 target_ulong shift)
86 int64_t ret;
88 if (likely(!(shift & 0x40))) {
89 if (likely((uint64_t)shift != 0)) {
90 shift &= 0x3f;
91 ret = (int64_t)value >> shift;
92 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
93 env->ca = 0;
94 } else {
95 env->ca = 1;
97 } else {
98 ret = (int64_t)value;
99 env->ca = 0;
101 } else {
102 ret = (int64_t)value >> 63;
103 env->ca = (ret != 0);
105 return ret;
107 #endif
109 #if defined(TARGET_PPC64)
110 target_ulong helper_popcntb(target_ulong val)
112 val = (val & 0x5555555555555555ULL) + ((val >> 1) &
113 0x5555555555555555ULL);
114 val = (val & 0x3333333333333333ULL) + ((val >> 2) &
115 0x3333333333333333ULL);
116 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
117 0x0f0f0f0f0f0f0f0fULL);
118 return val;
121 target_ulong helper_popcntw(target_ulong val)
123 val = (val & 0x5555555555555555ULL) + ((val >> 1) &
124 0x5555555555555555ULL);
125 val = (val & 0x3333333333333333ULL) + ((val >> 2) &
126 0x3333333333333333ULL);
127 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
128 0x0f0f0f0f0f0f0f0fULL);
129 val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) &
130 0x00ff00ff00ff00ffULL);
131 val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
132 0x0000ffff0000ffffULL);
133 return val;
136 target_ulong helper_popcntd(target_ulong val)
138 return ctpop64(val);
140 #else
141 target_ulong helper_popcntb(target_ulong val)
143 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
144 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
145 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
146 return val;
149 target_ulong helper_popcntw(target_ulong val)
151 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
152 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
153 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
154 val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff);
155 val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff);
156 return val;
158 #endif
160 /*****************************************************************************/
161 /* PowerPC 601 specific instructions (POWER bridge) */
162 target_ulong helper_div(CPUPPCState *env, target_ulong arg1, target_ulong arg2)
164 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
166 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
167 (int32_t)arg2 == 0) {
168 env->spr[SPR_MQ] = 0;
169 return INT32_MIN;
170 } else {
171 env->spr[SPR_MQ] = tmp % arg2;
172 return tmp / (int32_t)arg2;
176 target_ulong helper_divo(CPUPPCState *env, target_ulong arg1,
177 target_ulong arg2)
179 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
181 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
182 (int32_t)arg2 == 0) {
183 env->so = env->ov = 1;
184 env->spr[SPR_MQ] = 0;
185 return INT32_MIN;
186 } else {
187 env->spr[SPR_MQ] = tmp % arg2;
188 tmp /= (int32_t)arg2;
189 if ((int32_t)tmp != tmp) {
190 env->so = env->ov = 1;
191 } else {
192 env->ov = 0;
194 return tmp;
198 target_ulong helper_divs(CPUPPCState *env, target_ulong arg1,
199 target_ulong arg2)
201 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
202 (int32_t)arg2 == 0) {
203 env->spr[SPR_MQ] = 0;
204 return INT32_MIN;
205 } else {
206 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
207 return (int32_t)arg1 / (int32_t)arg2;
211 target_ulong helper_divso(CPUPPCState *env, target_ulong arg1,
212 target_ulong arg2)
214 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
215 (int32_t)arg2 == 0) {
216 env->so = env->ov = 1;
217 env->spr[SPR_MQ] = 0;
218 return INT32_MIN;
219 } else {
220 env->ov = 0;
221 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
222 return (int32_t)arg1 / (int32_t)arg2;
226 /*****************************************************************************/
227 /* 602 specific instructions */
228 /* mfrom is the most crazy instruction ever seen, imho ! */
229 /* Real implementation uses a ROM table. Do the same */
230 /* Extremely decomposed:
231 * -arg / 256
232 * return 256 * log10(10 + 1.0) + 0.5
234 #if !defined(CONFIG_USER_ONLY)
235 target_ulong helper_602_mfrom(target_ulong arg)
237 if (likely(arg < 602)) {
238 #include "mfrom_table.c"
239 return mfrom_ROM_table[arg];
240 } else {
241 return 0;
244 #endif
246 /*****************************************************************************/
247 /* Altivec extension helpers */
248 #if defined(HOST_WORDS_BIGENDIAN)
249 #define HI_IDX 0
250 #define LO_IDX 1
251 #else
252 #define HI_IDX 1
253 #define LO_IDX 0
254 #endif
256 #if defined(HOST_WORDS_BIGENDIAN)
257 #define VECTOR_FOR_INORDER_I(index, element) \
258 for (index = 0; index < ARRAY_SIZE(r->element); index++)
259 #else
260 #define VECTOR_FOR_INORDER_I(index, element) \
261 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
262 #endif
264 /* Saturating arithmetic helpers. */
265 #define SATCVT(from, to, from_type, to_type, min, max) \
266 static inline to_type cvt##from##to(from_type x, int *sat) \
268 to_type r; \
270 if (x < (from_type)min) { \
271 r = min; \
272 *sat = 1; \
273 } else if (x > (from_type)max) { \
274 r = max; \
275 *sat = 1; \
276 } else { \
277 r = x; \
279 return r; \
281 #define SATCVTU(from, to, from_type, to_type, min, max) \
282 static inline to_type cvt##from##to(from_type x, int *sat) \
284 to_type r; \
286 if (x > (from_type)max) { \
287 r = max; \
288 *sat = 1; \
289 } else { \
290 r = x; \
292 return r; \
294 SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
295 SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
296 SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
298 SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
299 SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
300 SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
301 SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
302 SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
303 SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
304 #undef SATCVT
305 #undef SATCVTU
307 void helper_lvsl(ppc_avr_t *r, target_ulong sh)
309 int i, j = (sh & 0xf);
311 VECTOR_FOR_INORDER_I(i, u8) {
312 r->u8[i] = j++;
316 void helper_lvsr(ppc_avr_t *r, target_ulong sh)
318 int i, j = 0x10 - (sh & 0xf);
320 VECTOR_FOR_INORDER_I(i, u8) {
321 r->u8[i] = j++;
325 void helper_mtvscr(CPUPPCState *env, ppc_avr_t *r)
327 #if defined(HOST_WORDS_BIGENDIAN)
328 env->vscr = r->u32[3];
329 #else
330 env->vscr = r->u32[0];
331 #endif
332 set_flush_to_zero(vscr_nj, &env->vec_status);
335 void helper_vaddcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
337 int i;
339 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
340 r->u32[i] = ~a->u32[i] < b->u32[i];
344 #define VARITH_DO(name, op, element) \
345 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
347 int i; \
349 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
350 r->element[i] = a->element[i] op b->element[i]; \
353 #define VARITH(suffix, element) \
354 VARITH_DO(add##suffix, +, element) \
355 VARITH_DO(sub##suffix, -, element)
356 VARITH(ubm, u8)
357 VARITH(uhm, u16)
358 VARITH(uwm, u32)
359 #undef VARITH_DO
360 #undef VARITH
362 #define VARITHFP(suffix, func) \
363 void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
364 ppc_avr_t *b) \
366 int i; \
368 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
369 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
372 VARITHFP(addfp, float32_add)
373 VARITHFP(subfp, float32_sub)
374 VARITHFP(minfp, float32_min)
375 VARITHFP(maxfp, float32_max)
376 #undef VARITHFP
378 #define VARITHFPFMA(suffix, type) \
379 void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
380 ppc_avr_t *b, ppc_avr_t *c) \
382 int i; \
383 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
384 r->f[i] = float32_muladd(a->f[i], c->f[i], b->f[i], \
385 type, &env->vec_status); \
388 VARITHFPFMA(maddfp, 0);
389 VARITHFPFMA(nmsubfp, float_muladd_negate_result | float_muladd_negate_c);
390 #undef VARITHFPFMA
392 #define VARITHSAT_CASE(type, op, cvt, element) \
394 type result = (type)a->element[i] op (type)b->element[i]; \
395 r->element[i] = cvt(result, &sat); \
398 #define VARITHSAT_DO(name, op, optype, cvt, element) \
399 void helper_v##name(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
400 ppc_avr_t *b) \
402 int sat = 0; \
403 int i; \
405 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
406 switch (sizeof(r->element[0])) { \
407 case 1: \
408 VARITHSAT_CASE(optype, op, cvt, element); \
409 break; \
410 case 2: \
411 VARITHSAT_CASE(optype, op, cvt, element); \
412 break; \
413 case 4: \
414 VARITHSAT_CASE(optype, op, cvt, element); \
415 break; \
418 if (sat) { \
419 env->vscr |= (1 << VSCR_SAT); \
422 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
423 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
424 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
425 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
426 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
427 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
428 VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
429 VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
430 VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
431 VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
432 VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
433 VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
434 #undef VARITHSAT_CASE
435 #undef VARITHSAT_DO
436 #undef VARITHSAT_SIGNED
437 #undef VARITHSAT_UNSIGNED
439 #define VAVG_DO(name, element, etype) \
440 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
442 int i; \
444 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
445 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
446 r->element[i] = x >> 1; \
450 #define VAVG(type, signed_element, signed_type, unsigned_element, \
451 unsigned_type) \
452 VAVG_DO(avgs##type, signed_element, signed_type) \
453 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
454 VAVG(b, s8, int16_t, u8, uint16_t)
455 VAVG(h, s16, int32_t, u16, uint32_t)
456 VAVG(w, s32, int64_t, u32, uint64_t)
457 #undef VAVG_DO
458 #undef VAVG
460 #define VCF(suffix, cvt, element) \
461 void helper_vcf##suffix(CPUPPCState *env, ppc_avr_t *r, \
462 ppc_avr_t *b, uint32_t uim) \
464 int i; \
466 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
467 float32 t = cvt(b->element[i], &env->vec_status); \
468 r->f[i] = float32_scalbn(t, -uim, &env->vec_status); \
471 VCF(ux, uint32_to_float32, u32)
472 VCF(sx, int32_to_float32, s32)
473 #undef VCF
475 #define VCMP_DO(suffix, compare, element, record) \
476 void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
477 ppc_avr_t *a, ppc_avr_t *b) \
479 uint32_t ones = (uint32_t)-1; \
480 uint32_t all = ones; \
481 uint32_t none = 0; \
482 int i; \
484 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
485 uint32_t result = (a->element[i] compare b->element[i] ? \
486 ones : 0x0); \
487 switch (sizeof(a->element[0])) { \
488 case 4: \
489 r->u32[i] = result; \
490 break; \
491 case 2: \
492 r->u16[i] = result; \
493 break; \
494 case 1: \
495 r->u8[i] = result; \
496 break; \
498 all &= result; \
499 none |= result; \
501 if (record) { \
502 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
505 #define VCMP(suffix, compare, element) \
506 VCMP_DO(suffix, compare, element, 0) \
507 VCMP_DO(suffix##_dot, compare, element, 1)
508 VCMP(equb, ==, u8)
509 VCMP(equh, ==, u16)
510 VCMP(equw, ==, u32)
511 VCMP(gtub, >, u8)
512 VCMP(gtuh, >, u16)
513 VCMP(gtuw, >, u32)
514 VCMP(gtsb, >, s8)
515 VCMP(gtsh, >, s16)
516 VCMP(gtsw, >, s32)
517 #undef VCMP_DO
518 #undef VCMP
520 #define VCMPFP_DO(suffix, compare, order, record) \
521 void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
522 ppc_avr_t *a, ppc_avr_t *b) \
524 uint32_t ones = (uint32_t)-1; \
525 uint32_t all = ones; \
526 uint32_t none = 0; \
527 int i; \
529 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
530 uint32_t result; \
531 int rel = float32_compare_quiet(a->f[i], b->f[i], \
532 &env->vec_status); \
533 if (rel == float_relation_unordered) { \
534 result = 0; \
535 } else if (rel compare order) { \
536 result = ones; \
537 } else { \
538 result = 0; \
540 r->u32[i] = result; \
541 all &= result; \
542 none |= result; \
544 if (record) { \
545 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
548 #define VCMPFP(suffix, compare, order) \
549 VCMPFP_DO(suffix, compare, order, 0) \
550 VCMPFP_DO(suffix##_dot, compare, order, 1)
551 VCMPFP(eqfp, ==, float_relation_equal)
552 VCMPFP(gefp, !=, float_relation_less)
553 VCMPFP(gtfp, ==, float_relation_greater)
554 #undef VCMPFP_DO
555 #undef VCMPFP
557 static inline void vcmpbfp_internal(CPUPPCState *env, ppc_avr_t *r,
558 ppc_avr_t *a, ppc_avr_t *b, int record)
560 int i;
561 int all_in = 0;
563 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
564 int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
565 if (le_rel == float_relation_unordered) {
566 r->u32[i] = 0xc0000000;
567 /* ALL_IN does not need to be updated here. */
568 } else {
569 float32 bneg = float32_chs(b->f[i]);
570 int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
571 int le = le_rel != float_relation_greater;
572 int ge = ge_rel != float_relation_less;
574 r->u32[i] = ((!le) << 31) | ((!ge) << 30);
575 all_in |= (!le | !ge);
578 if (record) {
579 env->crf[6] = (all_in == 0) << 1;
583 void helper_vcmpbfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
585 vcmpbfp_internal(env, r, a, b, 0);
588 void helper_vcmpbfp_dot(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
589 ppc_avr_t *b)
591 vcmpbfp_internal(env, r, a, b, 1);
594 #define VCT(suffix, satcvt, element) \
595 void helper_vct##suffix(CPUPPCState *env, ppc_avr_t *r, \
596 ppc_avr_t *b, uint32_t uim) \
598 int i; \
599 int sat = 0; \
600 float_status s = env->vec_status; \
602 set_float_rounding_mode(float_round_to_zero, &s); \
603 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
604 if (float32_is_any_nan(b->f[i])) { \
605 r->element[i] = 0; \
606 } else { \
607 float64 t = float32_to_float64(b->f[i], &s); \
608 int64_t j; \
610 t = float64_scalbn(t, uim, &s); \
611 j = float64_to_int64(t, &s); \
612 r->element[i] = satcvt(j, &sat); \
615 if (sat) { \
616 env->vscr |= (1 << VSCR_SAT); \
619 VCT(uxs, cvtsduw, u32)
620 VCT(sxs, cvtsdsw, s32)
621 #undef VCT
623 void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
624 ppc_avr_t *b, ppc_avr_t *c)
626 int sat = 0;
627 int i;
629 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
630 int32_t prod = a->s16[i] * b->s16[i];
631 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
633 r->s16[i] = cvtswsh(t, &sat);
636 if (sat) {
637 env->vscr |= (1 << VSCR_SAT);
641 void helper_vmhraddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
642 ppc_avr_t *b, ppc_avr_t *c)
644 int sat = 0;
645 int i;
647 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
648 int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
649 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
650 r->s16[i] = cvtswsh(t, &sat);
653 if (sat) {
654 env->vscr |= (1 << VSCR_SAT);
658 #define VMINMAX_DO(name, compare, element) \
659 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
661 int i; \
663 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
664 if (a->element[i] compare b->element[i]) { \
665 r->element[i] = b->element[i]; \
666 } else { \
667 r->element[i] = a->element[i]; \
671 #define VMINMAX(suffix, element) \
672 VMINMAX_DO(min##suffix, >, element) \
673 VMINMAX_DO(max##suffix, <, element)
674 VMINMAX(sb, s8)
675 VMINMAX(sh, s16)
676 VMINMAX(sw, s32)
677 VMINMAX(ub, u8)
678 VMINMAX(uh, u16)
679 VMINMAX(uw, u32)
680 #undef VMINMAX_DO
681 #undef VMINMAX
683 void helper_vmladduhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
685 int i;
687 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
688 int32_t prod = a->s16[i] * b->s16[i];
689 r->s16[i] = (int16_t) (prod + c->s16[i]);
693 #define VMRG_DO(name, element, highp) \
694 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
696 ppc_avr_t result; \
697 int i; \
698 size_t n_elems = ARRAY_SIZE(r->element); \
700 for (i = 0; i < n_elems / 2; i++) { \
701 if (highp) { \
702 result.element[i*2+HI_IDX] = a->element[i]; \
703 result.element[i*2+LO_IDX] = b->element[i]; \
704 } else { \
705 result.element[n_elems - i * 2 - (1 + HI_IDX)] = \
706 b->element[n_elems - i - 1]; \
707 result.element[n_elems - i * 2 - (1 + LO_IDX)] = \
708 a->element[n_elems - i - 1]; \
711 *r = result; \
713 #if defined(HOST_WORDS_BIGENDIAN)
714 #define MRGHI 0
715 #define MRGLO 1
716 #else
717 #define MRGHI 1
718 #define MRGLO 0
719 #endif
720 #define VMRG(suffix, element) \
721 VMRG_DO(mrgl##suffix, element, MRGHI) \
722 VMRG_DO(mrgh##suffix, element, MRGLO)
723 VMRG(b, u8)
724 VMRG(h, u16)
725 VMRG(w, u32)
726 #undef VMRG_DO
727 #undef VMRG
728 #undef MRGHI
729 #undef MRGLO
731 void helper_vmsummbm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
732 ppc_avr_t *b, ppc_avr_t *c)
734 int32_t prod[16];
735 int i;
737 for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
738 prod[i] = (int32_t)a->s8[i] * b->u8[i];
741 VECTOR_FOR_INORDER_I(i, s32) {
742 r->s32[i] = c->s32[i] + prod[4 * i] + prod[4 * i + 1] +
743 prod[4 * i + 2] + prod[4 * i + 3];
747 void helper_vmsumshm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
748 ppc_avr_t *b, ppc_avr_t *c)
750 int32_t prod[8];
751 int i;
753 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
754 prod[i] = a->s16[i] * b->s16[i];
757 VECTOR_FOR_INORDER_I(i, s32) {
758 r->s32[i] = c->s32[i] + prod[2 * i] + prod[2 * i + 1];
762 void helper_vmsumshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
763 ppc_avr_t *b, ppc_avr_t *c)
765 int32_t prod[8];
766 int i;
767 int sat = 0;
769 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
770 prod[i] = (int32_t)a->s16[i] * b->s16[i];
773 VECTOR_FOR_INORDER_I(i, s32) {
774 int64_t t = (int64_t)c->s32[i] + prod[2 * i] + prod[2 * i + 1];
776 r->u32[i] = cvtsdsw(t, &sat);
779 if (sat) {
780 env->vscr |= (1 << VSCR_SAT);
784 void helper_vmsumubm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
785 ppc_avr_t *b, ppc_avr_t *c)
787 uint16_t prod[16];
788 int i;
790 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
791 prod[i] = a->u8[i] * b->u8[i];
794 VECTOR_FOR_INORDER_I(i, u32) {
795 r->u32[i] = c->u32[i] + prod[4 * i] + prod[4 * i + 1] +
796 prod[4 * i + 2] + prod[4 * i + 3];
800 void helper_vmsumuhm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
801 ppc_avr_t *b, ppc_avr_t *c)
803 uint32_t prod[8];
804 int i;
806 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
807 prod[i] = a->u16[i] * b->u16[i];
810 VECTOR_FOR_INORDER_I(i, u32) {
811 r->u32[i] = c->u32[i] + prod[2 * i] + prod[2 * i + 1];
815 void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
816 ppc_avr_t *b, ppc_avr_t *c)
818 uint32_t prod[8];
819 int i;
820 int sat = 0;
822 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
823 prod[i] = a->u16[i] * b->u16[i];
826 VECTOR_FOR_INORDER_I(i, s32) {
827 uint64_t t = (uint64_t)c->u32[i] + prod[2 * i] + prod[2 * i + 1];
829 r->u32[i] = cvtuduw(t, &sat);
832 if (sat) {
833 env->vscr |= (1 << VSCR_SAT);
837 #define VMUL_DO(name, mul_element, prod_element, evenp) \
838 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
840 int i; \
842 VECTOR_FOR_INORDER_I(i, prod_element) { \
843 if (evenp) { \
844 r->prod_element[i] = a->mul_element[i * 2 + HI_IDX] * \
845 b->mul_element[i * 2 + HI_IDX]; \
846 } else { \
847 r->prod_element[i] = a->mul_element[i * 2 + LO_IDX] * \
848 b->mul_element[i * 2 + LO_IDX]; \
852 #define VMUL(suffix, mul_element, prod_element) \
853 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
854 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
855 VMUL(sb, s8, s16)
856 VMUL(sh, s16, s32)
857 VMUL(ub, u8, u16)
858 VMUL(uh, u16, u32)
859 #undef VMUL_DO
860 #undef VMUL
862 void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
863 ppc_avr_t *c)
865 ppc_avr_t result;
866 int i;
868 VECTOR_FOR_INORDER_I(i, u8) {
869 int s = c->u8[i] & 0x1f;
870 #if defined(HOST_WORDS_BIGENDIAN)
871 int index = s & 0xf;
872 #else
873 int index = 15 - (s & 0xf);
874 #endif
876 if (s & 0x10) {
877 result.u8[i] = b->u8[index];
878 } else {
879 result.u8[i] = a->u8[index];
882 *r = result;
885 #if defined(HOST_WORDS_BIGENDIAN)
886 #define PKBIG 1
887 #else
888 #define PKBIG 0
889 #endif
890 void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
892 int i, j;
893 ppc_avr_t result;
894 #if defined(HOST_WORDS_BIGENDIAN)
895 const ppc_avr_t *x[2] = { a, b };
896 #else
897 const ppc_avr_t *x[2] = { b, a };
898 #endif
900 VECTOR_FOR_INORDER_I(i, u64) {
901 VECTOR_FOR_INORDER_I(j, u32) {
902 uint32_t e = x[i]->u32[j];
904 result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
905 ((e >> 6) & 0x3e0) |
906 ((e >> 3) & 0x1f));
909 *r = result;
912 #define VPK(suffix, from, to, cvt, dosat) \
913 void helper_vpk##suffix(CPUPPCState *env, ppc_avr_t *r, \
914 ppc_avr_t *a, ppc_avr_t *b) \
916 int i; \
917 int sat = 0; \
918 ppc_avr_t result; \
919 ppc_avr_t *a0 = PKBIG ? a : b; \
920 ppc_avr_t *a1 = PKBIG ? b : a; \
922 VECTOR_FOR_INORDER_I(i, from) { \
923 result.to[i] = cvt(a0->from[i], &sat); \
924 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
926 *r = result; \
927 if (dosat && sat) { \
928 env->vscr |= (1 << VSCR_SAT); \
931 #define I(x, y) (x)
932 VPK(shss, s16, s8, cvtshsb, 1)
933 VPK(shus, s16, u8, cvtshub, 1)
934 VPK(swss, s32, s16, cvtswsh, 1)
935 VPK(swus, s32, u16, cvtswuh, 1)
936 VPK(uhus, u16, u8, cvtuhub, 1)
937 VPK(uwus, u32, u16, cvtuwuh, 1)
938 VPK(uhum, u16, u8, I, 0)
939 VPK(uwum, u32, u16, I, 0)
940 #undef I
941 #undef VPK
942 #undef PKBIG
944 void helper_vrefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
946 int i;
948 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
949 r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
953 #define VRFI(suffix, rounding) \
954 void helper_vrfi##suffix(CPUPPCState *env, ppc_avr_t *r, \
955 ppc_avr_t *b) \
957 int i; \
958 float_status s = env->vec_status; \
960 set_float_rounding_mode(rounding, &s); \
961 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
962 r->f[i] = float32_round_to_int (b->f[i], &s); \
965 VRFI(n, float_round_nearest_even)
966 VRFI(m, float_round_down)
967 VRFI(p, float_round_up)
968 VRFI(z, float_round_to_zero)
969 #undef VRFI
971 #define VROTATE(suffix, element) \
972 void helper_vrl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
974 int i; \
976 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
977 unsigned int mask = ((1 << \
978 (3 + (sizeof(a->element[0]) >> 1))) \
979 - 1); \
980 unsigned int shift = b->element[i] & mask; \
981 r->element[i] = (a->element[i] << shift) | \
982 (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
985 VROTATE(b, u8)
986 VROTATE(h, u16)
987 VROTATE(w, u32)
988 #undef VROTATE
990 void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
992 int i;
994 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
995 float32 t = float32_sqrt(b->f[i], &env->vec_status);
997 r->f[i] = float32_div(float32_one, t, &env->vec_status);
1001 void helper_vsel(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
1002 ppc_avr_t *c)
1004 r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
1005 r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
1008 void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1010 int i;
1012 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
1013 r->f[i] = float32_exp2(b->f[i], &env->vec_status);
1017 void helper_vlogefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1019 int i;
1021 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
1022 r->f[i] = float32_log2(b->f[i], &env->vec_status);
1026 #if defined(HOST_WORDS_BIGENDIAN)
1027 #define LEFT 0
1028 #define RIGHT 1
1029 #else
1030 #define LEFT 1
1031 #define RIGHT 0
1032 #endif
1033 /* The specification says that the results are undefined if all of the
1034 * shift counts are not identical. We check to make sure that they are
1035 * to conform to what real hardware appears to do. */
1036 #define VSHIFT(suffix, leftp) \
1037 void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1039 int shift = b->u8[LO_IDX*15] & 0x7; \
1040 int doit = 1; \
1041 int i; \
1043 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
1044 doit = doit && ((b->u8[i] & 0x7) == shift); \
1046 if (doit) { \
1047 if (shift == 0) { \
1048 *r = *a; \
1049 } else if (leftp) { \
1050 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
1052 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
1053 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
1054 } else { \
1055 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
1057 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
1058 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
1062 VSHIFT(l, LEFT)
1063 VSHIFT(r, RIGHT)
1064 #undef VSHIFT
1065 #undef LEFT
1066 #undef RIGHT
1068 #define VSL(suffix, element) \
1069 void helper_vsl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1071 int i; \
1073 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1074 unsigned int mask = ((1 << \
1075 (3 + (sizeof(a->element[0]) >> 1))) \
1076 - 1); \
1077 unsigned int shift = b->element[i] & mask; \
1079 r->element[i] = a->element[i] << shift; \
1082 VSL(b, u8)
1083 VSL(h, u16)
1084 VSL(w, u32)
1085 #undef VSL
1087 void helper_vsldoi(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
1089 int sh = shift & 0xf;
1090 int i;
1091 ppc_avr_t result;
1093 #if defined(HOST_WORDS_BIGENDIAN)
1094 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
1095 int index = sh + i;
1096 if (index > 0xf) {
1097 result.u8[i] = b->u8[index - 0x10];
1098 } else {
1099 result.u8[i] = a->u8[index];
1102 #else
1103 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
1104 int index = (16 - sh) + i;
1105 if (index > 0xf) {
1106 result.u8[i] = a->u8[index - 0x10];
1107 } else {
1108 result.u8[i] = b->u8[index];
1111 #endif
1112 *r = result;
1115 void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1117 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
1119 #if defined(HOST_WORDS_BIGENDIAN)
1120 memmove(&r->u8[0], &a->u8[sh], 16 - sh);
1121 memset(&r->u8[16-sh], 0, sh);
1122 #else
1123 memmove(&r->u8[sh], &a->u8[0], 16 - sh);
1124 memset(&r->u8[0], 0, sh);
1125 #endif
1128 /* Experimental testing shows that hardware masks the immediate. */
1129 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
1130 #if defined(HOST_WORDS_BIGENDIAN)
1131 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
1132 #else
1133 #define SPLAT_ELEMENT(element) \
1134 (ARRAY_SIZE(r->element) - 1 - _SPLAT_MASKED(element))
1135 #endif
1136 #define VSPLT(suffix, element) \
1137 void helper_vsplt##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
1139 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
1140 int i; \
1142 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1143 r->element[i] = s; \
1146 VSPLT(b, u8)
1147 VSPLT(h, u16)
1148 VSPLT(w, u32)
1149 #undef VSPLT
1150 #undef SPLAT_ELEMENT
1151 #undef _SPLAT_MASKED
1153 #define VSPLTI(suffix, element, splat_type) \
1154 void helper_vspltis##suffix(ppc_avr_t *r, uint32_t splat) \
1156 splat_type x = (int8_t)(splat << 3) >> 3; \
1157 int i; \
1159 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1160 r->element[i] = x; \
1163 VSPLTI(b, s8, int8_t)
1164 VSPLTI(h, s16, int16_t)
1165 VSPLTI(w, s32, int32_t)
1166 #undef VSPLTI
1168 #define VSR(suffix, element) \
1169 void helper_vsr##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1171 int i; \
1173 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1174 unsigned int mask = ((1 << \
1175 (3 + (sizeof(a->element[0]) >> 1))) \
1176 - 1); \
1177 unsigned int shift = b->element[i] & mask; \
1179 r->element[i] = a->element[i] >> shift; \
1182 VSR(ab, s8)
1183 VSR(ah, s16)
1184 VSR(aw, s32)
1185 VSR(b, u8)
1186 VSR(h, u16)
1187 VSR(w, u32)
1188 #undef VSR
1190 void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1192 int sh = (b->u8[LO_IDX * 0xf] >> 3) & 0xf;
1194 #if defined(HOST_WORDS_BIGENDIAN)
1195 memmove(&r->u8[sh], &a->u8[0], 16 - sh);
1196 memset(&r->u8[0], 0, sh);
1197 #else
1198 memmove(&r->u8[0], &a->u8[sh], 16 - sh);
1199 memset(&r->u8[16 - sh], 0, sh);
1200 #endif
1203 void helper_vsubcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1205 int i;
1207 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
1208 r->u32[i] = a->u32[i] >= b->u32[i];
1212 void helper_vsumsws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1214 int64_t t;
1215 int i, upper;
1216 ppc_avr_t result;
1217 int sat = 0;
1219 #if defined(HOST_WORDS_BIGENDIAN)
1220 upper = ARRAY_SIZE(r->s32)-1;
1221 #else
1222 upper = 0;
1223 #endif
1224 t = (int64_t)b->s32[upper];
1225 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
1226 t += a->s32[i];
1227 result.s32[i] = 0;
1229 result.s32[upper] = cvtsdsw(t, &sat);
1230 *r = result;
1232 if (sat) {
1233 env->vscr |= (1 << VSCR_SAT);
1237 void helper_vsum2sws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1239 int i, j, upper;
1240 ppc_avr_t result;
1241 int sat = 0;
1243 #if defined(HOST_WORDS_BIGENDIAN)
1244 upper = 1;
1245 #else
1246 upper = 0;
1247 #endif
1248 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
1249 int64_t t = (int64_t)b->s32[upper + i * 2];
1251 result.u64[i] = 0;
1252 for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
1253 t += a->s32[2 * i + j];
1255 result.s32[upper + i * 2] = cvtsdsw(t, &sat);
1258 *r = result;
1259 if (sat) {
1260 env->vscr |= (1 << VSCR_SAT);
1264 void helper_vsum4sbs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1266 int i, j;
1267 int sat = 0;
1269 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
1270 int64_t t = (int64_t)b->s32[i];
1272 for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
1273 t += a->s8[4 * i + j];
1275 r->s32[i] = cvtsdsw(t, &sat);
1278 if (sat) {
1279 env->vscr |= (1 << VSCR_SAT);
1283 void helper_vsum4shs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1285 int sat = 0;
1286 int i;
1288 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
1289 int64_t t = (int64_t)b->s32[i];
1291 t += a->s16[2 * i] + a->s16[2 * i + 1];
1292 r->s32[i] = cvtsdsw(t, &sat);
1295 if (sat) {
1296 env->vscr |= (1 << VSCR_SAT);
1300 void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1302 int i, j;
1303 int sat = 0;
1305 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
1306 uint64_t t = (uint64_t)b->u32[i];
1308 for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
1309 t += a->u8[4 * i + j];
1311 r->u32[i] = cvtuduw(t, &sat);
1314 if (sat) {
1315 env->vscr |= (1 << VSCR_SAT);
1319 #if defined(HOST_WORDS_BIGENDIAN)
1320 #define UPKHI 1
1321 #define UPKLO 0
1322 #else
1323 #define UPKHI 0
1324 #define UPKLO 1
1325 #endif
1326 #define VUPKPX(suffix, hi) \
1327 void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
1329 int i; \
1330 ppc_avr_t result; \
1332 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
1333 uint16_t e = b->u16[hi ? i : i+4]; \
1334 uint8_t a = (e >> 15) ? 0xff : 0; \
1335 uint8_t r = (e >> 10) & 0x1f; \
1336 uint8_t g = (e >> 5) & 0x1f; \
1337 uint8_t b = e & 0x1f; \
1339 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
1341 *r = result; \
1343 VUPKPX(lpx, UPKLO)
1344 VUPKPX(hpx, UPKHI)
1345 #undef VUPKPX
1347 #define VUPK(suffix, unpacked, packee, hi) \
1348 void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
1350 int i; \
1351 ppc_avr_t result; \
1353 if (hi) { \
1354 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
1355 result.unpacked[i] = b->packee[i]; \
1357 } else { \
1358 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); \
1359 i++) { \
1360 result.unpacked[i - ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
1363 *r = result; \
1365 VUPK(hsb, s16, s8, UPKHI)
1366 VUPK(hsh, s32, s16, UPKHI)
1367 VUPK(lsb, s16, s8, UPKLO)
1368 VUPK(lsh, s32, s16, UPKLO)
1369 #undef VUPK
1370 #undef UPKHI
1371 #undef UPKLO
1373 #undef VECTOR_FOR_INORDER_I
1374 #undef HI_IDX
1375 #undef LO_IDX
1377 /*****************************************************************************/
1378 /* SPE extension helpers */
1379 /* Use a table to make this quicker */
1380 static const uint8_t hbrev[16] = {
1381 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1382 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1385 static inline uint8_t byte_reverse(uint8_t val)
1387 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
1390 static inline uint32_t word_reverse(uint32_t val)
1392 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
1393 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
1396 #define MASKBITS 16 /* Random value - to be fixed (implementation dependent) */
1397 target_ulong helper_brinc(target_ulong arg1, target_ulong arg2)
1399 uint32_t a, b, d, mask;
1401 mask = UINT32_MAX >> (32 - MASKBITS);
1402 a = arg1 & mask;
1403 b = arg2 & mask;
1404 d = word_reverse(1 + word_reverse(a | ~b));
1405 return (arg1 & ~mask) | (d & b);
1408 uint32_t helper_cntlsw32(uint32_t val)
1410 if (val & 0x80000000) {
1411 return clz32(~val);
1412 } else {
1413 return clz32(val);
1417 uint32_t helper_cntlzw32(uint32_t val)
1419 return clz32(val);
1422 /* 440 specific */
1423 target_ulong helper_dlmzb(CPUPPCState *env, target_ulong high,
1424 target_ulong low, uint32_t update_Rc)
1426 target_ulong mask;
1427 int i;
1429 i = 1;
1430 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1431 if ((high & mask) == 0) {
1432 if (update_Rc) {
1433 env->crf[0] = 0x4;
1435 goto done;
1437 i++;
1439 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1440 if ((low & mask) == 0) {
1441 if (update_Rc) {
1442 env->crf[0] = 0x8;
1444 goto done;
1446 i++;
1448 if (update_Rc) {
1449 env->crf[0] = 0x2;
1451 done:
1452 env->xer = (env->xer & ~0x7F) | i;
1453 if (update_Rc) {
1454 env->crf[0] |= xer_so;
1456 return i;