2 * iwMMXt micro operations for XScale.
4 * Copyright (c) 2007 OpenedHand, Ltd.
5 * Written by Andrzej Zaborowski <andrew@openedhand.com>
6 * Copyright (c) 2008 CodeSourcery
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
29 /* iwMMXt macros extracted from GNU gdb. */
31 /* Set the SIMD wCASF flags for 8, 16, 32 or 64-bit operations. */
32 #define SIMD8_SET( v, n, b) ((v != 0) << ((((b) + 1) * 4) + (n)))
33 #define SIMD16_SET(v, n, h) ((v != 0) << ((((h) + 1) * 8) + (n)))
34 #define SIMD32_SET(v, n, w) ((v != 0) << ((((w) + 1) * 16) + (n)))
35 #define SIMD64_SET(v, n) ((v != 0) << (32 + (n)))
36 /* Flags to pass as "n" above. */
41 /* Various status bit macros. */
42 #define NBIT8(x) ((x) & 0x80)
43 #define NBIT16(x) ((x) & 0x8000)
44 #define NBIT32(x) ((x) & 0x80000000)
45 #define NBIT64(x) ((x) & 0x8000000000000000ULL)
46 #define ZBIT8(x) (((x) & 0xff) == 0)
47 #define ZBIT16(x) (((x) & 0xffff) == 0)
48 #define ZBIT32(x) (((x) & 0xffffffff) == 0)
49 #define ZBIT64(x) (x == 0)
50 /* Sign extension macros. */
51 #define EXTEND8H(a) ((uint16_t) (int8_t) (a))
52 #define EXTEND8(a) ((uint32_t) (int8_t) (a))
53 #define EXTEND16(a) ((uint32_t) (int16_t) (a))
54 #define EXTEND16S(a) ((int32_t) (int16_t) (a))
55 #define EXTEND32(a) ((uint64_t) (int32_t) (a))
57 uint64_t HELPER(iwmmxt_maddsq
)(uint64_t a
, uint64_t b
)
60 EXTEND16S((a
>> 0) & 0xffff) * EXTEND16S((b
>> 0) & 0xffff) +
61 EXTEND16S((a
>> 16) & 0xffff) * EXTEND16S((b
>> 16) & 0xffff)
62 ) & 0xffffffff) | ((uint64_t) (
63 EXTEND16S((a
>> 32) & 0xffff) * EXTEND16S((b
>> 32) & 0xffff) +
64 EXTEND16S((a
>> 48) & 0xffff) * EXTEND16S((b
>> 48) & 0xffff)
69 uint64_t HELPER(iwmmxt_madduq
)(uint64_t a
, uint64_t b
)
72 ((a
>> 0) & 0xffff) * ((b
>> 0) & 0xffff) +
73 ((a
>> 16) & 0xffff) * ((b
>> 16) & 0xffff)
75 ((a
>> 32) & 0xffff) * ((b
>> 32) & 0xffff) +
76 ((a
>> 48) & 0xffff) * ((b
>> 48) & 0xffff)
81 uint64_t HELPER(iwmmxt_sadb
)(uint64_t a
, uint64_t b
)
83 #define abs(x) (((x) >= 0) ? x : -x)
84 #define SADB(SHR) abs((int) ((a >> SHR) & 0xff) - (int) ((b >> SHR) & 0xff))
86 SADB(0) + SADB(8) + SADB(16) + SADB(24) +
87 SADB(32) + SADB(40) + SADB(48) + SADB(56);
91 uint64_t HELPER(iwmmxt_sadw
)(uint64_t a
, uint64_t b
)
94 abs((int) ((a >> SHR) & 0xffff) - (int) ((b >> SHR) & 0xffff))
95 return SADW(0) + SADW(16) + SADW(32) + SADW(48);
99 uint64_t HELPER(iwmmxt_mulslw
)(uint64_t a
, uint64_t b
)
101 #define MULS(SHR) ((uint64_t) ((( \
102 EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \
103 ) >> 0) & 0xffff) << SHR)
104 return MULS(0) | MULS(16) | MULS(32) | MULS(48);
108 uint64_t HELPER(iwmmxt_mulshw
)(uint64_t a
, uint64_t b
)
110 #define MULS(SHR) ((uint64_t) ((( \
111 EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \
112 ) >> 16) & 0xffff) << SHR)
113 return MULS(0) | MULS(16) | MULS(32) | MULS(48);
117 uint64_t HELPER(iwmmxt_mululw
)(uint64_t a
, uint64_t b
)
119 #define MULU(SHR) ((uint64_t) ((( \
120 ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \
121 ) >> 0) & 0xffff) << SHR)
122 return MULU(0) | MULU(16) | MULU(32) | MULU(48);
126 uint64_t HELPER(iwmmxt_muluhw
)(uint64_t a
, uint64_t b
)
128 #define MULU(SHR) ((uint64_t) ((( \
129 ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \
130 ) >> 16) & 0xffff) << SHR)
131 return MULU(0) | MULU(16) | MULU(32) | MULU(48);
135 uint64_t HELPER(iwmmxt_macsw
)(uint64_t a
, uint64_t b
)
137 #define MACS(SHR) ( \
138 EXTEND16((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff))
139 return (int64_t) (MACS(0) + MACS(16) + MACS(32) + MACS(48));
143 uint64_t HELPER(iwmmxt_macuw
)(uint64_t a
, uint64_t b
)
145 #define MACU(SHR) ( \
146 (uint32_t) ((a >> SHR) & 0xffff) * \
147 (uint32_t) ((b >> SHR) & 0xffff))
148 return MACU(0) + MACU(16) + MACU(32) + MACU(48);
152 #define NZBIT8(x, i) \
153 SIMD8_SET(NBIT8((x) & 0xff), SIMD_NBIT, i) | \
154 SIMD8_SET(ZBIT8((x) & 0xff), SIMD_ZBIT, i)
155 #define NZBIT16(x, i) \
156 SIMD16_SET(NBIT16((x) & 0xffff), SIMD_NBIT, i) | \
157 SIMD16_SET(ZBIT16((x) & 0xffff), SIMD_ZBIT, i)
158 #define NZBIT32(x, i) \
159 SIMD32_SET(NBIT32((x) & 0xffffffff), SIMD_NBIT, i) | \
160 SIMD32_SET(ZBIT32((x) & 0xffffffff), SIMD_ZBIT, i)
162 SIMD64_SET(NBIT64(x), SIMD_NBIT) | \
163 SIMD64_SET(ZBIT64(x), SIMD_ZBIT)
164 #define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \
165 uint64_t HELPER(glue(iwmmxt_unpack, glue(S, b)))(CPUARMState *env, \
166 uint64_t a, uint64_t b) \
169 (((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | \
170 (((a >> SH1) & 0xff) << 16) | (((b >> SH1) & 0xff) << 24) | \
171 (((a >> SH2) & 0xff) << 32) | (((b >> SH2) & 0xff) << 40) | \
172 (((a >> SH3) & 0xff) << 48) | (((b >> SH3) & 0xff) << 56); \
173 env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
174 NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \
175 NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \
176 NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \
177 NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
180 uint64_t HELPER(glue(iwmmxt_unpack, glue(S, w)))(CPUARMState *env, \
181 uint64_t a, uint64_t b) \
184 (((a >> SH0) & 0xffff) << 0) | \
185 (((b >> SH0) & 0xffff) << 16) | \
186 (((a >> SH2) & 0xffff) << 32) | \
187 (((b >> SH2) & 0xffff) << 48); \
188 env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
189 NZBIT8(a >> 0, 0) | NZBIT8(a >> 16, 1) | \
190 NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \
193 uint64_t HELPER(glue(iwmmxt_unpack, glue(S, l)))(CPUARMState *env, \
194 uint64_t a, uint64_t b) \
197 (((a >> SH0) & 0xffffffff) << 0) | \
198 (((b >> SH0) & 0xffffffff) << 32); \
199 env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
200 NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \
203 uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ub)))(CPUARMState *env, \
207 (((x >> SH0) & 0xff) << 0) | \
208 (((x >> SH1) & 0xff) << 16) | \
209 (((x >> SH2) & 0xff) << 32) | \
210 (((x >> SH3) & 0xff) << 48); \
211 env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
212 NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \
213 NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
216 uint64_t HELPER(glue(iwmmxt_unpack, glue(S, uw)))(CPUARMState *env, \
220 (((x >> SH0) & 0xffff) << 0) | \
221 (((x >> SH2) & 0xffff) << 32); \
222 env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
223 NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
226 uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ul)))(CPUARMState *env, \
229 x = (((x >> SH0) & 0xffffffff) << 0); \
230 env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
233 uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sb)))(CPUARMState *env, \
237 ((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | \
238 ((uint64_t) EXTEND8H((x >> SH1) & 0xff) << 16) | \
239 ((uint64_t) EXTEND8H((x >> SH2) & 0xff) << 32) | \
240 ((uint64_t) EXTEND8H((x >> SH3) & 0xff) << 48); \
241 env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
242 NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \
243 NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
246 uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sw)))(CPUARMState *env, \
250 ((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | \
251 ((uint64_t) EXTEND16((x >> SH2) & 0xffff) << 32); \
252 env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
253 NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
256 uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sl)))(CPUARMState *env, \
259 x = EXTEND32((x >> SH0) & 0xffffffff); \
260 env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
263 IWMMXT_OP_UNPACK(l
, 0, 8, 16, 24)
264 IWMMXT_OP_UNPACK(h
, 32, 40, 48, 56)
266 #define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \
267 uint64_t HELPER(glue(iwmmxt_, glue(SUFF, b)))(CPUARMState *env, \
268 uint64_t a, uint64_t b) \
271 CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | \
272 CMP(16, Tb, O, 0xff) | CMP(24, Tb, O, 0xff) | \
273 CMP(32, Tb, O, 0xff) | CMP(40, Tb, O, 0xff) | \
274 CMP(48, Tb, O, 0xff) | CMP(56, Tb, O, 0xff); \
275 env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
276 NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \
277 NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \
278 NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \
279 NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
282 uint64_t HELPER(glue(iwmmxt_, glue(SUFF, w)))(CPUARMState *env, \
283 uint64_t a, uint64_t b) \
285 a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | \
286 CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \
287 env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
288 NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | \
289 NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \
292 uint64_t HELPER(glue(iwmmxt_, glue(SUFF, l)))(CPUARMState *env, \
293 uint64_t a, uint64_t b) \
295 a = CMP(0, Tl, O, 0xffffffff) | \
296 CMP(32, Tl, O, 0xffffffff); \
297 env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
298 NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \
301 #define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \
302 (TYPE) ((b >> SHR) & MASK)) ? (uint64_t) MASK : 0) << SHR)
303 IWMMXT_OP_CMP(cmpeq
, uint8_t, uint16_t, uint32_t, ==)
304 IWMMXT_OP_CMP(cmpgts
, int8_t, int16_t, int32_t, >)
305 IWMMXT_OP_CMP(cmpgtu
, uint8_t, uint16_t, uint32_t, >)
307 #define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \
308 (TYPE) ((b >> SHR) & MASK)) ? a : b) & ((uint64_t) MASK << SHR))
309 IWMMXT_OP_CMP(mins
, int8_t, int16_t, int32_t, <)
310 IWMMXT_OP_CMP(minu
, uint8_t, uint16_t, uint32_t, <)
311 IWMMXT_OP_CMP(maxs
, int8_t, int16_t, int32_t, >)
312 IWMMXT_OP_CMP(maxu
, uint8_t, uint16_t, uint32_t, >)
314 #define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \
315 OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR)
316 IWMMXT_OP_CMP(subn
, uint8_t, uint16_t, uint32_t, -)
317 IWMMXT_OP_CMP(addn
, uint8_t, uint16_t, uint32_t, +)
319 /* TODO Signed- and Unsigned-Saturation */
320 #define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \
321 OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR)
322 IWMMXT_OP_CMP(subu
, uint8_t, uint16_t, uint32_t, -)
323 IWMMXT_OP_CMP(addu
, uint8_t, uint16_t, uint32_t, +)
324 IWMMXT_OP_CMP(subs
, int8_t, int16_t, int32_t, -)
325 IWMMXT_OP_CMP(adds
, int8_t, int16_t, int32_t, +)
329 #define AVGB(SHR) ((( \
330 ((a >> SHR) & 0xff) + ((b >> SHR) & 0xff) + round) >> 1) << SHR)
331 #define IWMMXT_OP_AVGB(r) \
332 uint64_t HELPER(iwmmxt_avgb##r)(CPUARMState *env, uint64_t a, uint64_t b) \
334 const int round = r; \
335 a = AVGB(0) | AVGB(8) | AVGB(16) | AVGB(24) | \
336 AVGB(32) | AVGB(40) | AVGB(48) | AVGB(56); \
337 env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
338 SIMD8_SET(ZBIT8((a >> 0) & 0xff), SIMD_ZBIT, 0) | \
339 SIMD8_SET(ZBIT8((a >> 8) & 0xff), SIMD_ZBIT, 1) | \
340 SIMD8_SET(ZBIT8((a >> 16) & 0xff), SIMD_ZBIT, 2) | \
341 SIMD8_SET(ZBIT8((a >> 24) & 0xff), SIMD_ZBIT, 3) | \
342 SIMD8_SET(ZBIT8((a >> 32) & 0xff), SIMD_ZBIT, 4) | \
343 SIMD8_SET(ZBIT8((a >> 40) & 0xff), SIMD_ZBIT, 5) | \
344 SIMD8_SET(ZBIT8((a >> 48) & 0xff), SIMD_ZBIT, 6) | \
345 SIMD8_SET(ZBIT8((a >> 56) & 0xff), SIMD_ZBIT, 7); \
350 #undef IWMMXT_OP_AVGB
353 #define AVGW(SHR) ((( \
354 ((a >> SHR) & 0xffff) + ((b >> SHR) & 0xffff) + round) >> 1) << SHR)
355 #define IWMMXT_OP_AVGW(r) \
356 uint64_t HELPER(iwmmxt_avgw##r)(CPUARMState *env, uint64_t a, uint64_t b) \
358 const int round = r; \
359 a = AVGW(0) | AVGW(16) | AVGW(32) | AVGW(48); \
360 env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
361 SIMD16_SET(ZBIT16((a >> 0) & 0xffff), SIMD_ZBIT, 0) | \
362 SIMD16_SET(ZBIT16((a >> 16) & 0xffff), SIMD_ZBIT, 1) | \
363 SIMD16_SET(ZBIT16((a >> 32) & 0xffff), SIMD_ZBIT, 2) | \
364 SIMD16_SET(ZBIT16((a >> 48) & 0xffff), SIMD_ZBIT, 3); \
369 #undef IWMMXT_OP_AVGW
372 uint64_t HELPER(iwmmxt_msadb
)(uint64_t a
, uint64_t b
)
374 a
= ((((a
>> 0 ) & 0xffff) * ((b
>> 0) & 0xffff) +
375 ((a
>> 16) & 0xffff) * ((b
>> 16) & 0xffff)) & 0xffffffff) |
376 ((((a
>> 32) & 0xffff) * ((b
>> 32) & 0xffff) +
377 ((a
>> 48) & 0xffff) * ((b
>> 48) & 0xffff)) << 32);
381 uint64_t HELPER(iwmmxt_align
)(uint64_t a
, uint64_t b
, uint32_t n
)
384 a
|= b
<< (64 - (n
<< 3));
388 uint64_t HELPER(iwmmxt_insr
)(uint64_t x
, uint32_t a
, uint32_t b
, uint32_t n
)
390 x
&= ~((uint64_t) b
<< n
);
391 x
|= (uint64_t) (a
& b
) << n
;
395 uint32_t HELPER(iwmmxt_setpsr_nz
)(uint64_t x
)
397 return SIMD64_SET((x
== 0), SIMD_ZBIT
) |
398 SIMD64_SET((x
& (1ULL << 63)), SIMD_NBIT
);
401 uint64_t HELPER(iwmmxt_bcstb
)(uint32_t arg
)
405 ((uint64_t) arg
<< 0 ) | ((uint64_t) arg
<< 8 ) |
406 ((uint64_t) arg
<< 16) | ((uint64_t) arg
<< 24) |
407 ((uint64_t) arg
<< 32) | ((uint64_t) arg
<< 40) |
408 ((uint64_t) arg
<< 48) | ((uint64_t) arg
<< 56);
411 uint64_t HELPER(iwmmxt_bcstw
)(uint32_t arg
)
415 ((uint64_t) arg
<< 0 ) | ((uint64_t) arg
<< 16) |
416 ((uint64_t) arg
<< 32) | ((uint64_t) arg
<< 48);
419 uint64_t HELPER(iwmmxt_bcstl
)(uint32_t arg
)
421 return arg
| ((uint64_t) arg
<< 32);
424 uint64_t HELPER(iwmmxt_addcb
)(uint64_t x
)
427 ((x
>> 0) & 0xff) + ((x
>> 8) & 0xff) +
428 ((x
>> 16) & 0xff) + ((x
>> 24) & 0xff) +
429 ((x
>> 32) & 0xff) + ((x
>> 40) & 0xff) +
430 ((x
>> 48) & 0xff) + ((x
>> 56) & 0xff);
433 uint64_t HELPER(iwmmxt_addcw
)(uint64_t x
)
436 ((x
>> 0) & 0xffff) + ((x
>> 16) & 0xffff) +
437 ((x
>> 32) & 0xffff) + ((x
>> 48) & 0xffff);
440 uint64_t HELPER(iwmmxt_addcl
)(uint64_t x
)
442 return (x
& 0xffffffff) + (x
>> 32);
445 uint32_t HELPER(iwmmxt_msbb
)(uint64_t x
)
448 ((x
>> 7) & 0x01) | ((x
>> 14) & 0x02) |
449 ((x
>> 21) & 0x04) | ((x
>> 28) & 0x08) |
450 ((x
>> 35) & 0x10) | ((x
>> 42) & 0x20) |
451 ((x
>> 49) & 0x40) | ((x
>> 56) & 0x80);
454 uint32_t HELPER(iwmmxt_msbw
)(uint64_t x
)
457 ((x
>> 15) & 0x01) | ((x
>> 30) & 0x02) |
458 ((x
>> 45) & 0x04) | ((x
>> 52) & 0x08);
461 uint32_t HELPER(iwmmxt_msbl
)(uint64_t x
)
463 return ((x
>> 31) & 0x01) | ((x
>> 62) & 0x02);
466 /* FIXME: Split wCASF setting into a separate op to avoid env use. */
467 uint64_t HELPER(iwmmxt_srlw
)(CPUARMState
*env
, uint64_t x
, uint32_t n
)
469 x
= (((x
& (0xffffll
<< 0)) >> n
) & (0xffffll
<< 0)) |
470 (((x
& (0xffffll
<< 16)) >> n
) & (0xffffll
<< 16)) |
471 (((x
& (0xffffll
<< 32)) >> n
) & (0xffffll
<< 32)) |
472 (((x
& (0xffffll
<< 48)) >> n
) & (0xffffll
<< 48));
473 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] =
474 NZBIT16(x
>> 0, 0) | NZBIT16(x
>> 16, 1) |
475 NZBIT16(x
>> 32, 2) | NZBIT16(x
>> 48, 3);
479 uint64_t HELPER(iwmmxt_srll
)(CPUARMState
*env
, uint64_t x
, uint32_t n
)
481 x
= ((x
& (0xffffffffll
<< 0)) >> n
) |
482 ((x
>> n
) & (0xffffffffll
<< 32));
483 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] =
484 NZBIT32(x
>> 0, 0) | NZBIT32(x
>> 32, 1);
488 uint64_t HELPER(iwmmxt_srlq
)(CPUARMState
*env
, uint64_t x
, uint32_t n
)
491 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] = NZBIT64(x
);
495 uint64_t HELPER(iwmmxt_sllw
)(CPUARMState
*env
, uint64_t x
, uint32_t n
)
497 x
= (((x
& (0xffffll
<< 0)) << n
) & (0xffffll
<< 0)) |
498 (((x
& (0xffffll
<< 16)) << n
) & (0xffffll
<< 16)) |
499 (((x
& (0xffffll
<< 32)) << n
) & (0xffffll
<< 32)) |
500 (((x
& (0xffffll
<< 48)) << n
) & (0xffffll
<< 48));
501 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] =
502 NZBIT16(x
>> 0, 0) | NZBIT16(x
>> 16, 1) |
503 NZBIT16(x
>> 32, 2) | NZBIT16(x
>> 48, 3);
507 uint64_t HELPER(iwmmxt_slll
)(CPUARMState
*env
, uint64_t x
, uint32_t n
)
509 x
= ((x
<< n
) & (0xffffffffll
<< 0)) |
510 ((x
& (0xffffffffll
<< 32)) << n
);
511 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] =
512 NZBIT32(x
>> 0, 0) | NZBIT32(x
>> 32, 1);
516 uint64_t HELPER(iwmmxt_sllq
)(CPUARMState
*env
, uint64_t x
, uint32_t n
)
519 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] = NZBIT64(x
);
523 uint64_t HELPER(iwmmxt_sraw
)(CPUARMState
*env
, uint64_t x
, uint32_t n
)
525 x
= ((uint64_t) ((EXTEND16(x
>> 0) >> n
) & 0xffff) << 0) |
526 ((uint64_t) ((EXTEND16(x
>> 16) >> n
) & 0xffff) << 16) |
527 ((uint64_t) ((EXTEND16(x
>> 32) >> n
) & 0xffff) << 32) |
528 ((uint64_t) ((EXTEND16(x
>> 48) >> n
) & 0xffff) << 48);
529 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] =
530 NZBIT16(x
>> 0, 0) | NZBIT16(x
>> 16, 1) |
531 NZBIT16(x
>> 32, 2) | NZBIT16(x
>> 48, 3);
535 uint64_t HELPER(iwmmxt_sral
)(CPUARMState
*env
, uint64_t x
, uint32_t n
)
537 x
= (((EXTEND32(x
>> 0) >> n
) & 0xffffffff) << 0) |
538 (((EXTEND32(x
>> 32) >> n
) & 0xffffffff) << 32);
539 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] =
540 NZBIT32(x
>> 0, 0) | NZBIT32(x
>> 32, 1);
544 uint64_t HELPER(iwmmxt_sraq
)(CPUARMState
*env
, uint64_t x
, uint32_t n
)
546 x
= (int64_t) x
>> n
;
547 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] = NZBIT64(x
);
551 uint64_t HELPER(iwmmxt_rorw
)(CPUARMState
*env
, uint64_t x
, uint32_t n
)
553 x
= ((((x
& (0xffffll
<< 0)) >> n
) |
554 ((x
& (0xffffll
<< 0)) << (16 - n
))) & (0xffffll
<< 0)) |
555 ((((x
& (0xffffll
<< 16)) >> n
) |
556 ((x
& (0xffffll
<< 16)) << (16 - n
))) & (0xffffll
<< 16)) |
557 ((((x
& (0xffffll
<< 32)) >> n
) |
558 ((x
& (0xffffll
<< 32)) << (16 - n
))) & (0xffffll
<< 32)) |
559 ((((x
& (0xffffll
<< 48)) >> n
) |
560 ((x
& (0xffffll
<< 48)) << (16 - n
))) & (0xffffll
<< 48));
561 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] =
562 NZBIT16(x
>> 0, 0) | NZBIT16(x
>> 16, 1) |
563 NZBIT16(x
>> 32, 2) | NZBIT16(x
>> 48, 3);
567 uint64_t HELPER(iwmmxt_rorl
)(CPUARMState
*env
, uint64_t x
, uint32_t n
)
569 x
= ((x
& (0xffffffffll
<< 0)) >> n
) |
570 ((x
>> n
) & (0xffffffffll
<< 32)) |
571 ((x
<< (32 - n
)) & (0xffffffffll
<< 0)) |
572 ((x
& (0xffffffffll
<< 32)) << (32 - n
));
573 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] =
574 NZBIT32(x
>> 0, 0) | NZBIT32(x
>> 32, 1);
578 uint64_t HELPER(iwmmxt_rorq
)(CPUARMState
*env
, uint64_t x
, uint32_t n
)
580 x
= (x
>> n
) | (x
<< (64 - n
));
581 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] = NZBIT64(x
);
585 uint64_t HELPER(iwmmxt_shufh
)(CPUARMState
*env
, uint64_t x
, uint32_t n
)
587 x
= (((x
>> ((n
<< 4) & 0x30)) & 0xffff) << 0) |
588 (((x
>> ((n
<< 2) & 0x30)) & 0xffff) << 16) |
589 (((x
>> ((n
<< 0) & 0x30)) & 0xffff) << 32) |
590 (((x
>> ((n
>> 2) & 0x30)) & 0xffff) << 48);
591 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] =
592 NZBIT16(x
>> 0, 0) | NZBIT16(x
>> 16, 1) |
593 NZBIT16(x
>> 32, 2) | NZBIT16(x
>> 48, 3);
597 /* TODO: Unsigned-Saturation */
598 uint64_t HELPER(iwmmxt_packuw
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
600 a
= (((a
>> 0) & 0xff) << 0) | (((a
>> 16) & 0xff) << 8) |
601 (((a
>> 32) & 0xff) << 16) | (((a
>> 48) & 0xff) << 24) |
602 (((b
>> 0) & 0xff) << 32) | (((b
>> 16) & 0xff) << 40) |
603 (((b
>> 32) & 0xff) << 48) | (((b
>> 48) & 0xff) << 56);
604 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] =
605 NZBIT8(a
>> 0, 0) | NZBIT8(a
>> 8, 1) |
606 NZBIT8(a
>> 16, 2) | NZBIT8(a
>> 24, 3) |
607 NZBIT8(a
>> 32, 4) | NZBIT8(a
>> 40, 5) |
608 NZBIT8(a
>> 48, 6) | NZBIT8(a
>> 56, 7);
612 uint64_t HELPER(iwmmxt_packul
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
614 a
= (((a
>> 0) & 0xffff) << 0) | (((a
>> 32) & 0xffff) << 16) |
615 (((b
>> 0) & 0xffff) << 32) | (((b
>> 32) & 0xffff) << 48);
616 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] =
617 NZBIT16(a
>> 0, 0) | NZBIT16(a
>> 16, 1) |
618 NZBIT16(a
>> 32, 2) | NZBIT16(a
>> 48, 3);
622 uint64_t HELPER(iwmmxt_packuq
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
624 a
= (a
& 0xffffffff) | ((b
& 0xffffffff) << 32);
625 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] =
626 NZBIT32(a
>> 0, 0) | NZBIT32(a
>> 32, 1);
630 /* TODO: Signed-Saturation */
631 uint64_t HELPER(iwmmxt_packsw
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
633 a
= (((a
>> 0) & 0xff) << 0) | (((a
>> 16) & 0xff) << 8) |
634 (((a
>> 32) & 0xff) << 16) | (((a
>> 48) & 0xff) << 24) |
635 (((b
>> 0) & 0xff) << 32) | (((b
>> 16) & 0xff) << 40) |
636 (((b
>> 32) & 0xff) << 48) | (((b
>> 48) & 0xff) << 56);
637 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] =
638 NZBIT8(a
>> 0, 0) | NZBIT8(a
>> 8, 1) |
639 NZBIT8(a
>> 16, 2) | NZBIT8(a
>> 24, 3) |
640 NZBIT8(a
>> 32, 4) | NZBIT8(a
>> 40, 5) |
641 NZBIT8(a
>> 48, 6) | NZBIT8(a
>> 56, 7);
645 uint64_t HELPER(iwmmxt_packsl
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
647 a
= (((a
>> 0) & 0xffff) << 0) | (((a
>> 32) & 0xffff) << 16) |
648 (((b
>> 0) & 0xffff) << 32) | (((b
>> 32) & 0xffff) << 48);
649 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] =
650 NZBIT16(a
>> 0, 0) | NZBIT16(a
>> 16, 1) |
651 NZBIT16(a
>> 32, 2) | NZBIT16(a
>> 48, 3);
655 uint64_t HELPER(iwmmxt_packsq
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
657 a
= (a
& 0xffffffff) | ((b
& 0xffffffff) << 32);
658 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCASF
] =
659 NZBIT32(a
>> 0, 0) | NZBIT32(a
>> 32, 1);
663 uint64_t HELPER(iwmmxt_muladdsl
)(uint64_t c
, uint32_t a
, uint32_t b
)
665 return c
+ ((int32_t) EXTEND32(a
) * (int32_t) EXTEND32(b
));
668 uint64_t HELPER(iwmmxt_muladdsw
)(uint64_t c
, uint32_t a
, uint32_t b
)
670 c
+= EXTEND32(EXTEND16S((a
>> 0) & 0xffff) *
671 EXTEND16S((b
>> 0) & 0xffff));
672 c
+= EXTEND32(EXTEND16S((a
>> 16) & 0xffff) *
673 EXTEND16S((b
>> 16) & 0xffff));
677 uint64_t HELPER(iwmmxt_muladdswl
)(uint64_t c
, uint32_t a
, uint32_t b
)
679 return c
+ (EXTEND32(EXTEND16S(a
& 0xffff) *
680 EXTEND16S(b
& 0xffff)));