fuzev2: prevent button light flickering when accessing µSD
[kugel-rb.git] / firmware / export / jz_mxu.h
blobb833aedceb91d5a6ec5d28b19973e654aa50c5ad
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2008 by Maurus Cuelenaere
11 * Copyright (C) 2006-2007 by Ingenic Semiconductor Inc.
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version 2
16 * of the License, or (at your option) any later version.
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
21 ****************************************************************************/
23 /* Jz47xx Ingenic Media Extension Instruction Set
25 These are ~60 SIMD instructions for the Jz47xx MIPS core.
27 To compile assembly files using these instructions, they
28 must be piped through a bash script called mxu_as.
31 #ifndef JZ_MXU_H_
32 #define JZ_MXU_H_
34 #define ptn0 0
35 #define ptn1 1
36 #define ptn2 2
37 #define ptn3 3
39 #ifdef C_VERSION
41 /* MXU registers */
43 #ifndef MXU_REGS_USE_ARRAY
45 #define xr0 0
46 static int xr1, xr2, xr3, xr4, xr5, xr6, xr7, xr8, xr9;
47 static int xr10, xr11, xr12, xr13, xr14, xr15, xr16;
49 #else
51 static int mxu_xr[17] = {0};
53 #define xr0 mxu_xr[ 0]
54 #define xr1 mxu_xr[ 1]
55 #define xr2 mxu_xr[ 2]
56 #define xr3 mxu_xr[ 3]
57 #define xr4 mxu_xr[ 4]
58 #define xr5 mxu_xr[ 5]
59 #define xr6 mxu_xr[ 6]
60 #define xr7 mxu_xr[ 7]
61 #define xr8 mxu_xr[ 8]
62 #define xr9 mxu_xr[ 9]
63 #define xr10 mxu_xr[10]
64 #define xr11 mxu_xr[11]
65 #define xr12 mxu_xr[12]
66 #define xr13 mxu_xr[13]
67 #define xr14 mxu_xr[14]
68 #define xr15 mxu_xr[15]
69 #define xr16 mxu_xr[16]
71 #endif
73 #else /* C_VERSION */
75 #define xr0 0
76 #define xr1 1
77 #define xr2 2
78 #define xr3 3
79 #define xr4 4
80 #define xr5 5
81 #define xr6 6
82 #define xr7 7
83 #define xr8 8
84 #define xr9 9
85 #define xr10 10
86 #define xr11 11
87 #define xr12 12
88 #define xr13 13
89 #define xr14 14
90 #define xr15 15
91 #define xr16 16
93 #endif /* C_VERSION */
95 #ifdef C_VERSION
97 #define S32I2M(xr, r) if (&xr != mxu_xr) xr = r
98 #define S32M2I(xr) xr
99 #define S32LDD(xr, p, o) if (&xr != mxu_xr) xr = *(long*)((unsigned long)p + o)
100 #define S32STD(xr, p, o) *(long*)((unsigned long)p + o) = xr
102 #define S32LDDV(xr, p, o, s) if (&xr != mxu_xr) xr = *(long*)((unsigned long)p + ((o) << s))
103 #define S32STDV(xr, p, o, s) *(long*)((unsigned long)p + ((o) << s)) = xr
105 #define S32LDIV(xra, rb, rc, strd2) \
107 if (&xra != mxu_xr) xra = *(long*)((unsigned long)rb + ((rc) << strd2));\
108 rb = (char*)rb + ((rc) << strd2);\
111 #define S32SDIV(xra, rb, rc, strd2) \
113 *(long*)((unsigned long)rb + ((rc) << strd2)) = xra;\
114 rb = (char*)rb + ((rc) << strd2);\
117 #define S32LDI(xra, rb, o) \
119 if (&xra != mxu_xr) xra = *(long*)((unsigned long)rb + o);\
120 rb = (char*)rb + o;\
123 #define S32SDI(xra, rb, o) \
125 *(long*)((unsigned long)rb + o) = xra;\
126 rb = (char*)rb + o;\
129 #define S32LDIV(xra, rb, rc, strd2) \
131 if (&xra != mxu_xr) xra = *(long*)((unsigned long)rb + ((rc) << strd2));\
132 rb = (char*)rb + ((rc) << strd2);\
135 #define S32SDIV(xra, rb, rc, strd2) \
137 *(long*)((unsigned long)rb + ((rc) << strd2)) = xra;\
138 rb = (char*)rb + ((rc) << strd2);\
141 #define Q16ADD_AS_WW(a, b, c, d) \
143 short bh = b >> 16;\
144 short bl = b & 0xFFFF;\
145 short ch = c >> 16;\
146 short cl = c & 0xFFFF;\
147 int ah = bh + ch;\
148 int al = bl + cl;\
149 int dh = bh - ch;\
150 int dl = bl - cl;\
151 if (&a != mxu_xr) a = (ah << 16) | (al & 0xFFFF);\
152 if (&d != mxu_xr) d = (dh << 16) | (dl & 0xFFFF);\
155 #define Q16ADD_AS_XW(a, b, c, d) \
157 short bh = b >> 16;\
158 short bl = b & 0xFFFF;\
159 short ch = c >> 16;\
160 short cl = c & 0xFFFF;\
161 int ah = bl + ch;\
162 int al = bh + cl;\
163 int dh = bl - ch;\
164 int dl = bh - cl;\
165 if (&a != mxu_xr) a = (ah << 16) | (al & 0xFFFF);\
166 if (&d != mxu_xr) d = (dh << 16) | (dl & 0xFFFF);\
169 #define Q16ADD_AA_WW(a, b, c, d) \
171 short bh = b >> 16;\
172 short bl = b & 0xFFFF;\
173 short ch = c >> 16;\
174 short cl = c & 0xFFFF;\
175 int ah = bh + ch;\
176 int al = bl + cl;\
177 if (&a != mxu_xr) a = (ah << 16) | (al & 0xFFFF);\
178 if (&d != mxu_xr) d = (ah << 16) | (al & 0xFFFF);\
181 #define D16MUL_LW(a, b, c, d)\
183 short bl = b & 0xFFFF;\
184 short cl = c & 0xFFFF;\
185 short ch = c >> 16;\
186 if (&a != mxu_xr) a = ch * bl;\
187 if (&d != mxu_xr) d = cl * bl;\
190 #define D16MUL_WW(a, b, c, d)\
192 short bh = b >> 16;\
193 short bl = b & 0xFFFF;\
194 short ch = c >> 16;\
195 short cl = c & 0xFFFF;\
196 if (&a != mxu_xr) a = ch * bh;\
197 if (&d != mxu_xr) d = cl * bl;\
200 #define D16MAC_AA_LW(a, b, c, d)\
202 short bl = b & 0xFFFF;\
203 short cl = c & 0xFFFF;\
204 short ch = c >> 16;\
205 if (&a != mxu_xr) a += ch * bl;\
206 if (&d != mxu_xr) d += cl * bl;\
209 #define D16MUL_HW(a, b, c, d)\
211 short bh = b >> 16;\
212 short cl = c & 0xFFFF;\
213 short ch = c >> 16;\
214 if (&a != mxu_xr) a = ch * bh;\
215 if (&d != mxu_xr) d = cl * bh;\
218 #define D16MAC_AA_HW(a, b, c, d)\
220 short bh = b >> 16;\
221 short cl = c & 0xFFFF;\
222 short ch = c >> 16;\
223 if (&a != mxu_xr) a += ch * bh;\
224 if (&d != mxu_xr) d += cl * bh;\
227 #define D32SLL(a, b, c, d, sft)\
229 if (&a != mxu_xr) a = b << sft;\
230 if (&d != mxu_xr) d = c << sft;\
233 #define D32SARL(a, b, c, sft) if (&a != mxu_xr) a = (((long)b >> sft) << 16) | (((long)c >> sft) & 0xFFFF)
235 #define S32SFL(a, b, c, d, ptn) \
237 unsigned char b3 = (unsigned char)((unsigned long)b >> 24);\
238 unsigned char b2 = (unsigned char)((unsigned long)b >> 16);\
239 unsigned char b1 = (unsigned char)((unsigned long)b >> 8);\
240 unsigned char b0 = (unsigned char)((unsigned long)b >> 0);\
241 unsigned char c3 = (unsigned char)((unsigned long)c >> 24);\
242 unsigned char c2 = (unsigned char)((unsigned long)c >> 16);\
243 unsigned char c1 = (unsigned char)((unsigned long)c >> 8);\
244 unsigned char c0 = (unsigned char)((unsigned long)c >> 0);\
245 unsigned char a3, a2, a1, a0, d3, d2, d1, d0;\
246 if (ptn0 == ptn) \
248 a3 = b3;\
249 a2 = c3;\
250 a1 = b2;\
251 a0 = c2;\
252 d3 = b1;\
253 d2 = c1;\
254 d1 = b0;\
255 d0 = c0;\
257 else if (ptn1 == ptn)\
259 a3 = b3;\
260 a2 = b1;\
261 a1 = c3;\
262 a0 = c1;\
263 d3 = b2;\
264 d2 = b0;\
265 d1 = c2;\
266 d0 = c0;\
268 else if (ptn2 == ptn)\
270 a3 = b3;\
271 a2 = c3;\
272 a1 = b1;\
273 a0 = c1;\
274 d3 = b2;\
275 d2 = c2;\
276 d1 = b0;\
277 d0 = c0;\
279 else if (ptn3 == ptn)\
281 a3 = b3;\
282 a2 = b2;\
283 a1 = c3;\
284 a0 = c2;\
285 d3 = b1;\
286 d2 = b0;\
287 d1 = c1;\
288 d0 = c0;\
290 if (&a != mxu_xr) a = ((unsigned long)a3 << 24) | ((unsigned long)a2 << 16) | ((unsigned long)a1 << 8) | (unsigned long)a0;\
291 if (&d != mxu_xr) d = ((unsigned long)d3 << 24) | ((unsigned long)d2 << 16) | ((unsigned long)d1 << 8) | (unsigned long)d0;\
294 #define D32SAR(a, b, c, d, sft)\
296 if (&a != mxu_xr) a = (long)b >> sft;\
297 if (&d != mxu_xr) d = (long)c >> sft;\
300 #define D32SLR(a, b, c, d, sft)\
302 if (&a != mxu_xr) a = (unsigned long)b >> sft;\
303 if (&d != mxu_xr) d = (unsigned long)c >> sft;\
305 #define Q16SLL(a,b,c,d,sft)\
307 short bh=b>>16;\
308 short bl=b&0xffff;\
309 short ch=c>>16;\
310 short cl=c&0xffff;\
311 if(&a!=mxu_xr) a=((bh<<sft)<<16)|(((long)bl<<sft) & 0xffff);\
312 if(&d!=mxu_xr) d=((dh<<sft)<<16)|(((long)bl<<sft) & 0xffff);\
315 #define Q16SAR(a,b,c,d,sft)\
317 short bh = b >> 16;\
318 short bl = b & 0xffff;\
319 short ch = c >> 16;\
320 short cl = c & 0xffff;\
321 if(&a!=mxu_xr) a=(((short)bh>>sft)<<16)|((long)((short)b1>>sft) & 0xffff);\
322 if(&d!=mxu_xr) d=(((short)ch>>sft)<<16)|((long)((short)cl>>sft) & 0xffff);\
325 #define D32ACC_AA(a, b, c, d)\
327 int _b = b;\
328 int _c = c;\
329 int _a = a;\
330 int _d = d;\
331 if (&a != mxu_xr) a = _a + _b + _c;\
332 if (&d != mxu_xr) d = _d + _b + _c;\
335 #define D32ACC_AS(a, b, c, d)\
337 int _b = b;\
338 int _c = c;\
339 int _a = a;\
340 int _d = d;\
341 if (&a != mxu_xr) a = _a + _b + _c;\
342 if (&d != mxu_xr) d = _d + _b - _c;\
345 #define D32ADD_AS(a, b, c, d)\
347 int _b = b;\
348 int _c = c;\
349 if (&a != mxu_xr) a = _b + _c;\
350 if (&d != mxu_xr) d = _b - _c;\
353 #define D32ADD_SS(a, b, c, d)\
355 int _b = b;\
356 int _c = c;\
357 if (&a != mxu_xr) a = _b - _c;\
358 if (&d != mxu_xr) d = _b - _c;\
361 #define D32ADD_AA(a, b, c, d)\
363 int _b = b;\
364 int _c = c;\
365 if (&a != mxu_xr) a = _b + _c;\
366 if (&d != mxu_xr) d = _b + _c;\
369 #define D16MADL_AA_WW(a, b, c, d) \
370 do { \
371 short _ah = a >> 16;\
372 short _al = (a << 16) >> 16;\
373 short _bh = b >> 16;\
374 short _bl = (b << 16) >> 16;\
375 short _ch = c >> 16;\
376 short _cl = (c << 16) >> 16;\
377 int L32, R32; \
378 L32 = _bh * _ch;\
379 R32 = _bl * _cl; \
380 _ah += (L32 << 16) >> 16; \
381 _al += (R32 << 16) >> 16; \
382 if (&d != mxu_xr) d = (_ah << 16) + (_al & 0xffff);\
383 } while (0)
385 #define D16MACF_AA_WW(a, b, c, d) \
386 do { \
387 short _bh = b >> 16;\
388 short _bl = (b << 16) >> 16;\
389 short _ch = c >> 16;\
390 short _cl = (c << 16) >> 16;\
391 int L32, R32; \
392 L32 = (_bh * _ch) << 1;\
393 R32 = (_bl * _cl) << 1; \
394 L32 = a + L32; \
395 R32 = d + R32; \
396 if (&a != mxu_xr) a = ((((L32 >> 15) + 1) >> 1) << 16) + ((((R32 >> 15) + 1) >> 1) & 0xffff);\
397 } while (0)
399 #define D16MAC_AA_WW(a, b, c, d) \
400 do { \
401 short _bh = b >> 16;\
402 short _bl = (b << 16) >> 16;\
403 short _ch = c >> 16;\
404 short _cl = (c << 16) >> 16;\
405 int L32, R32; \
406 L32 = (_bh * _ch);\
407 R32 = (_bl * _cl); \
408 if (&a != mxu_xr) a = a + L32;\
409 if (&d != mxu_xr) d = d + R32;\
410 } while (0)
412 #define D16MAC_SS_WW(a, b, c, d) \
413 do { \
414 short _bh = b >> 16;\
415 short _bl = (b << 16) >> 16;\
416 short _ch = c >> 16;\
417 short _cl = (c << 16) >> 16;\
418 int L32, R32; \
419 L32 = (_bh * _ch);\
420 R32 = (_bl * _cl); \
421 if (&a != mxu_xr) a = a - L32;\
422 if (&d != mxu_xr) d = d - R32;\
423 } while (0)
425 #define D16MAC_SA_HW(a, b, c, d) \
426 do { \
427 short _bh = b >> 16;\
428 short _bl = (b << 16) >> 16;\
429 short _ch = c >> 16;\
430 short _cl = (c << 16) >> 16;\
431 int L32, R32; \
432 L32 = (_bh * _ch);\
433 R32 = (_bh * _cl); \
434 if (&a != mxu_xr) a = a - L32;\
435 if (&d != mxu_xr) d = d + R32;\
436 } while (0)
438 #define D16MAC_SS_HW(a, b, c, d) \
439 do { \
440 short _bh = b >> 16;\
441 short _bl = (b << 16) >> 16;\
442 short _ch = c >> 16;\
443 short _cl = (c << 16) >> 16;\
444 int L32, R32; \
445 L32 = (_bh * _ch);\
446 R32 = (_bh * _cl); \
447 if (&a != mxu_xr) a = a - L32;\
448 if (&d != mxu_xr) d = d - R32;\
449 } while (0)
451 #define D16MAC_AS_HW(a, b, c, d) \
452 do { \
453 short _bh = b >> 16;\
454 short _bl = (b << 16) >> 16;\
455 short _ch = c >> 16;\
456 short _cl = (c << 16) >> 16;\
457 int L32, R32; \
458 L32 = (_bh * _ch);\
459 R32 = (_bh * _cl); \
460 if (&a != mxu_xr) a = a + L32;\
461 if (&d != mxu_xr) d = d - R32;\
462 } while (0)
464 #define D16MAC_AS_LW(a, b, c, d) \
465 do { \
466 short _bh = b >> 16;\
467 short _bl = (b << 16) >> 16;\
468 short _ch = c >> 16;\
469 short _cl = (c << 16) >> 16;\
470 int L32, R32; \
471 L32 = (_bl * _ch);\
472 R32 = (_bl * _cl); \
473 if (&a != mxu_xr) a = a + L32;\
474 if (&d != mxu_xr) d = d - R32;\
475 } while (0)
478 #define D16MAC_SA_LW(a, b, c, d) \
479 do { \
480 short _bh = b >> 16;\
481 short _bl = (b << 16) >> 16;\
482 short _ch = c >> 16;\
483 short _cl = (c << 16) >> 16;\
484 int L32, R32; \
485 L32 = (_bl * _ch);\
486 R32 = (_bl * _cl); \
487 if (&a != mxu_xr) a = a - L32;\
488 if (&d != mxu_xr) d = d + R32;\
489 } while (0)
491 #define D16MAC_SS_LW(a, b, c, d) \
492 do { \
493 short _bh = b >> 16;\
494 short _bl = (b << 16) >> 16;\
495 short _ch = c >> 16;\
496 short _cl = (c << 16) >> 16;\
497 int L32, R32; \
498 L32 = (_bl * _ch);\
499 R32 = (_bl * _cl); \
500 if (&a != mxu_xr) a = a - L32;\
501 if (&d != mxu_xr) d = d - R32;\
502 } while (0)
505 #define Q8ADDE_AA(xra, xrb, xrc, xrd) \
507 unsigned char b3 = (unsigned char)((unsigned long)xrb >> 24);\
508 unsigned char b2 = (unsigned char)((unsigned long)xrb >> 16);\
509 unsigned char b1 = (unsigned char)((unsigned long)xrb >> 8);\
510 unsigned char b0 = (unsigned char)((unsigned long)xrb >> 0);\
511 unsigned char c3 = (unsigned char)((unsigned long)xrc >> 24);\
512 unsigned char c2 = (unsigned char)((unsigned long)xrc >> 16);\
513 unsigned char c1 = (unsigned char)((unsigned long)xrc >> 8);\
514 unsigned char c0 = (unsigned char)((unsigned long)xrc >> 0);\
515 short ah, al, dh, dl;\
516 ah = b3 + c3;\
517 al = b2 + c2;\
518 dh = b1 + c1;\
519 dl = b0 + c0;\
520 if (&xra != mxu_xr) xra = ((unsigned long)ah << 16) | (unsigned short)al;\
521 if (&xrd != mxu_xr) xrd = ((unsigned long)dh << 16) | (unsigned short)dl;\
524 #define Q16SAT(xra, xrb, xrc) \
526 short bh = xrb >> 16;\
527 short bl = xrb & 0xFFFF;\
528 short ch = xrc >> 16;\
529 short cl = xrc & 0xFFFF;\
530 if (bh > 255) bh = 255;\
531 if (bh < 0) bh = 0;\
532 if (bl > 255) bl = 255;\
533 if (bl < 0) bl = 0;\
534 if (ch > 255) ch = 255;\
535 if (ch < 0) ch = 0;\
536 if (cl > 255) cl = 255;\
537 if (cl < 0) cl = 0;\
538 if (&xra != mxu_xr) xra = ((unsigned)bh << 24) | ((unsigned)bl << 16) | ((unsigned)ch << 8) | (unsigned)cl;\
541 #define Q8SAD(xra, xrb, xrc, xrd) \
543 short b3 = (unsigned char)((unsigned long)xrb >> 24);\
544 short b2 = (unsigned char)((unsigned long)xrb >> 16);\
545 short b1 = (unsigned char)((unsigned long)xrb >> 8);\
546 short b0 = (unsigned char)((unsigned long)xrb >> 0);\
547 short c3 = (unsigned char)((unsigned long)xrc >> 24);\
548 short c2 = (unsigned char)((unsigned long)xrc >> 16);\
549 short c1 = (unsigned char)((unsigned long)xrc >> 8);\
550 short c0 = (unsigned char)((unsigned long)xrc >> 0);\
551 int int0, int1, int2, int3;\
552 int3 = labs(b3 - c3);\
553 int2 = labs(b2 - c2);\
554 int1 = labs(b1 - c1);\
555 int0 = labs(b0 - c0);\
556 if (&xra != mxu_xr) xra = int0 + int1 + int2 + int3;\
557 if (&xrd != mxu_xr) xrd += int0 + int1 + int2 + int3;\
560 #define Q8AVGR(xra, xrb, xrc) \
562 short b3 = (unsigned char)((unsigned long)xrb >> 24);\
563 short b2 = (unsigned char)((unsigned long)xrb >> 16);\
564 short b1 = (unsigned char)((unsigned long)xrb >> 8);\
565 short b0 = (unsigned char)((unsigned long)xrb >> 0);\
566 short c3 = (unsigned char)((unsigned long)xrc >> 24);\
567 short c2 = (unsigned char)((unsigned long)xrc >> 16);\
568 short c1 = (unsigned char)((unsigned long)xrc >> 8);\
569 short c0 = (unsigned char)((unsigned long)xrc >> 0);\
570 unsigned char a3, a2, a1, a0;\
571 a3 = (unsigned char)((b3 + c3 + 1) >> 1);\
572 a2 = (unsigned char)((b2 + c2 + 1) >> 1);\
573 a1 = (unsigned char)((b1 + c1 + 1) >> 1);\
574 a0 = (unsigned char)((b0 + c0 + 1) >> 1);\
575 if (&xra != mxu_xr) xra = ((unsigned long)a3 << 24) | ((unsigned long)a2 << 16) | ((unsigned long)a1 << 8) | (unsigned long)a0;\
578 #define S32ALN(xra, xrb, xrc, rs) \
580 if (0 == rs)\
582 if (&xra != mxu_xr) xra = xrb;\
584 else if (1 == rs)\
586 if (&xra != mxu_xr) xra = (xrb << 8) | ((unsigned long)xrc >> 24);\
588 else if (2 == rs)\
590 if (&xra != mxu_xr) xra = (xrb << 16) | ((unsigned long)xrc >> 16);\
592 else if (3 == rs)\
594 if (&xra != mxu_xr) xra = (xrb << 24) | ((unsigned long)xrc >> 8);\
596 else if (4 == rs)\
598 if (&xra != mxu_xr) xra = xrc;\
602 #else /* C_VERSION */
604 /***********************************LD/SD***********************************/
605 #define S32LDD(xra,rb,s12) \
606 do { \
607 __asm__ __volatile ("S32LDD xr%0,%z1,%2" \
609 :"K"(xra),"d" (rb),"I"(s12)); \
610 } while (0)
612 #define S32STD(xra,rb,s12) \
613 do { \
614 __asm__ __volatile ("S32STD xr%0,%z1,%2" \
616 :"K"(xra),"d" (rb),"I"(s12):"memory"); \
617 } while (0)
619 #define S32LDDV(xra,rb,rc,strd2) \
620 do { \
621 __asm__ __volatile ("S32LDDV xr%0,%z1,%z2,%3" \
623 :"K"(xra),"d" (rb),"d"(rc),"K"(strd2)); \
624 } while (0)
626 #define S32STDV(xra,rb,rc,strd2) \
627 do { \
628 __asm__ __volatile ("S32STDV xr%0,%z1,%z2,%3" \
630 :"K"(xra),"d" (rb),"d"(rc),"K"(strd2):"memory"); \
631 } while (0)
633 #define S32LDI(xra,rb,s12) \
634 do { \
635 __asm__ __volatile ("S32LDI xr%1,%z0,%2" \
636 :"+d" (rb) \
637 :"K"(xra),"I"(s12)); \
638 } while (0)
640 #define S32SDI(xra,rb,s12) \
641 do { \
642 __asm__ __volatile ("S32SDI xr%1,%z0,%2" \
643 :"+d" (rb) \
644 :"K"(xra),"I"(s12):"memory"); \
645 } while (0)
647 #define S32LDIV(xra,rb,rc,strd2) \
648 do { \
649 __asm__ __volatile ("S32LDIV xr%1,%z0,%z2,%3" \
650 :"+d" (rb) \
651 :"K"(xra),"d"(rc),"K"(strd2)); \
652 } while (0)
654 #define S32SDIV(xra,rb,rc,strd2) \
655 do { \
656 __asm__ __volatile ("S32SDIV xr%1,%z0,%z2,%3" \
657 :"+d" (rb) \
658 :"K"(xra),"d"(rc),"K"(strd2):"memory"); \
659 } while (0)
661 /***********************************D16MUL***********************************/
662 #define D16MUL_WW(xra,xrb,xrc,xrd) \
663 do { \
664 __asm__ __volatile ("D16MUL xr%0,xr%1,xr%2,xr%3,WW" \
666 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
667 } while (0)
669 #define D16MUL_LW(xra,xrb,xrc,xrd) \
670 do { \
671 __asm__ __volatile ("D16MUL xr%0,xr%1,xr%2,xr%3,LW" \
673 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
674 } while (0)
676 #define D16MUL_HW(xra,xrb,xrc,xrd) \
677 do { \
678 __asm__ __volatile ("D16MUL xr%0,xr%1,xr%2,xr%3,HW" \
680 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
681 } while (0)
683 #define D16MUL_XW(xra,xrb,xrc,xrd) \
684 do { \
685 __asm__ __volatile ("D16MUL xr%0,xr%1,xr%2,xr%3,XW" \
687 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
688 } while (0)
690 /**********************************D16MULF*******************************/
691 #define D16MULF_WW(xra,xrb,xrc) \
692 do { \
693 __asm__ __volatile ("D16MULF xr%0,xr%1,xr%2,WW" \
695 :"K"(xra),"K"(xrb),"K"(xrc)); \
696 } while (0)
698 #define D16MULF_LW(xra,xrb,xrc) \
699 do { \
700 __asm__ __volatile ("D16MULF xr%0,xr%1,xr%2,LW" \
702 :"K"(xra),"K"(xrb),"K"(xrc)); \
703 } while (0)
705 #define D16MULF_HW(xra,xrb,xrc) \
706 do { \
707 __asm__ __volatile ("D16MULF xr%0,xr%1,xr%2,HW" \
709 :"K"(xra),"K"(xrb),"K"(xrc)); \
710 } while (0)
712 #define D16MULF_XW(xra,xrb,xrc) \
713 do { \
714 __asm__ __volatile ("D16MULF xr%0,xr%1,xr%2,XW" \
716 :"K"(xra),"K"(xrb),"K"(xrc)); \
717 } while (0)
719 /***********************************D16MAC********************************/
720 #define D16MAC_AA_WW(xra,xrb,xrc,xrd) \
721 do { \
722 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,AA,WW" \
724 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
725 } while (0)
727 #define D16MAC_AA_LW(xra,xrb,xrc,xrd) \
728 do { \
729 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,AA,LW" \
731 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
732 } while (0)
734 #define D16MAC_AA_HW(xra,xrb,xrc,xrd) \
735 do { \
736 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,AA,HW" \
738 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
739 } while (0)
741 #define D16MAC_AA_XW(xra,xrb,xrc,xrd) \
742 do { \
743 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,AA,XW" \
745 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
746 } while (0)
748 #define D16MAC_AS_WW(xra,xrb,xrc,xrd) \
749 do { \
750 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,AS,WW" \
752 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
753 } while (0)
755 #define D16MAC_AS_LW(xra,xrb,xrc,xrd) \
756 do { \
757 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,AS,LW" \
759 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
760 } while (0)
762 #define D16MAC_AS_HW(xra,xrb,xrc,xrd) \
763 do { \
764 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,AS,HW" \
766 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
767 } while (0)
769 #define D16MAC_AS_XW(xra,xrb,xrc,xrd) \
770 do { \
771 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,AS,XW" \
773 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
774 } while (0)
776 #define D16MAC_SA_WW(xra,xrb,xrc,xrd) \
777 do { \
778 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,SA,WW" \
780 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
781 } while (0)
783 #define D16MAC_SA_LW(xra,xrb,xrc,xrd) \
784 do { \
785 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,SA,LW" \
787 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
788 } while (0)
790 #define D16MAC_SA_HW(xra,xrb,xrc,xrd) \
791 do { \
792 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,SA,HW" \
794 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
795 } while (0)
797 #define D16MAC_SA_XW(xra,xrb,xrc,xrd) \
798 do { \
799 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,SA,XW" \
801 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
802 } while (0)
804 #define D16MAC_SS_WW(xra,xrb,xrc,xrd) \
805 do { \
806 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,SS,WW" \
808 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
809 } while (0)
811 #define D16MAC_SS_LW(xra,xrb,xrc,xrd) \
812 do { \
813 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,SS,LW" \
815 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
816 } while (0)
818 #define D16MAC_SS_HW(xra,xrb,xrc,xrd) \
819 do { \
820 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,SS,HW" \
822 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
823 } while (0)
825 #define D16MAC_SS_XW(xra,xrb,xrc,xrd) \
826 do { \
827 __asm__ __volatile ("D16MAC xr%0,xr%1,xr%2,xr%3,SS,XW" \
829 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
830 } while (0)
832 /**********************************D16MACF*******************************/
833 #define D16MACF_AA_WW(xra,xrb,xrc,xrd) \
834 do { \
835 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,AA,WW" \
837 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
838 } while (0)
840 #define D16MACF_AA_LW(xra,xrb,xrc,xrd) \
841 do { \
842 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,AA,LW" \
844 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
845 } while (0)
847 #define D16MACF_AA_HW(xra,xrb,xrc,xrd) \
848 do { \
849 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,AA,HW" \
851 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
852 } while (0)
854 #define D16MACF_AA_XW(xra,xrb,xrc,xrd) \
855 do { \
856 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,AA,XW" \
858 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
859 } while (0)
861 #define D16MACF_AS_WW(xra,xrb,xrc,xrd) \
862 do { \
863 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,AS,WW" \
865 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
866 } while (0)
868 #define D16MACF_AS_LW(xra,xrb,xrc,xrd) \
869 do { \
870 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,AS,LW" \
872 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
873 } while (0)
875 #define D16MACF_AS_HW(xra,xrb,xrc,xrd) \
876 do { \
877 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,AS,HW" \
879 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
880 } while (0)
882 #define D16MACF_AS_XW(xra,xrb,xrc,xrd) \
883 do { \
884 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,AS,XW" \
886 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
887 } while (0)
889 #define D16MACF_SA_WW(xra,xrb,xrc,xrd) \
890 do { \
891 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,SA,WW" \
893 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
894 } while (0)
896 #define D16MACF_SA_LW(xra,xrb,xrc,xrd) \
897 do { \
898 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,SA,LW" \
900 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
901 } while (0)
903 #define D16MACF_SA_HW(xra,xrb,xrc,xrd) \
904 do { \
905 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,SA,HW" \
907 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
908 } while (0)
910 #define D16MACF_SA_XW(xra,xrb,xrc,xrd) \
911 do { \
912 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,SA,XW" \
914 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
915 } while (0)
917 #define D16MACF_SS_WW(xra,xrb,xrc,xrd) \
918 do { \
919 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,SS,WW" \
921 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
922 } while (0)
924 #define D16MACF_SS_LW(xra,xrb,xrc,xrd) \
925 do { \
926 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,SS,LW" \
928 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
929 } while (0)
931 #define D16MACF_SS_HW(xra,xrb,xrc,xrd) \
932 do { \
933 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,SS,HW" \
935 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
936 } while (0)
938 #define D16MACF_SS_XW(xra,xrb,xrc,xrd) \
939 do { \
940 __asm__ __volatile ("D16MACF xr%0,xr%1,xr%2,xr%3,SS,XW" \
942 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
943 } while (0)
945 /**********************************D16MADL*******************************/
946 #define D16MADL_AA_WW(xra,xrb,xrc,xrd) \
947 do { \
948 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,AA,WW" \
950 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
951 } while (0)
953 #define D16MADL_AA_LW(xra,xrb,xrc,xrd) \
954 do { \
955 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,AA,LW" \
957 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
958 } while (0)
960 #define D16MADL_AA_HW(xra,xrb,xrc,xrd) \
961 do { \
962 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,AA,HW" \
964 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
965 } while (0)
967 #define D16MADL_AA_XW(xra,xrb,xrc,xrd) \
968 do { \
969 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,AA,XW" \
971 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
972 } while (0)
974 #define D16MADL_AS_WW(xra,xrb,xrc,xrd) \
975 do { \
976 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,AS,WW" \
978 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
979 } while (0)
981 #define D16MADL_AS_LW(xra,xrb,xrc,xrd) \
982 do { \
983 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,AS,LW" \
985 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
986 } while (0)
988 #define D16MADL_AS_HW(xra,xrb,xrc,xrd) \
989 do { \
990 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,AS,HW" \
992 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
993 } while (0)
995 #define D16MADL_AS_XW(xra,xrb,xrc,xrd) \
996 do { \
997 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,AS,XW" \
999 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1000 } while (0)
1002 #define D16MADL_SA_WW(xra,xrb,xrc,xrd) \
1003 do { \
1004 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,SA,WW" \
1006 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1007 } while (0)
1009 #define D16MADL_SA_LW(xra,xrb,xrc,xrd) \
1010 do { \
1011 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,SA,LW" \
1013 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1014 } while (0)
1016 #define D16MADL_SA_HW(xra,xrb,xrc,xrd) \
1017 do { \
1018 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,SA,HW" \
1020 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1021 } while (0)
1023 #define D16MADL_SA_XW(xra,xrb,xrc,xrd) \
1024 do { \
1025 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,SA,XW" \
1027 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1028 } while (0)
1030 #define D16MADL_SS_WW(xra,xrb,xrc,xrd) \
1031 do { \
1032 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,SS,WW" \
1034 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1035 } while (0)
1037 #define D16MADL_SS_LW(xra,xrb,xrc,xrd) \
1038 do { \
1039 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,SS,LW" \
1041 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1042 } while (0)
1044 #define D16MADL_SS_HW(xra,xrb,xrc,xrd) \
1045 do { \
1046 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,SS,HW" \
1048 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1049 } while (0)
1051 #define D16MADL_SS_XW(xra,xrb,xrc,xrd) \
1052 do { \
1053 __asm__ __volatile ("D16MADL xr%0,xr%1,xr%2,xr%3,SS,XW" \
1055 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1056 } while (0)
1058 /***********************************S16MAD*******************************/
1059 #define S16MAD_A_HH(xra,xrb,xrc,xrd) \
1060 do { \
1061 __asm__ __volatile ("S16MAD xr%0,xr%1,xr%2,xr%3,A,0" \
1063 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1064 } while (0)
1066 #define S16MAD_A_LL(xra,xrb,xrc,xrd) \
1067 do { \
1068 __asm__ __volatile ("S16MAD xr%0,xr%1,xr%2,xr%3,A,1" \
1070 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1071 } while (0)
1073 #define S16MAD_A_HL(xra,xrb,xrc,xrd) \
1074 do { \
1075 __asm__ __volatile ("S16MAD xr%0,xr%1,xr%2,xr%3,A,2" \
1077 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1078 } while (0)
1080 #define S16MAD_A_LH(xra,xrb,xrc,xrd) \
1081 do { \
1082 __asm__ __volatile ("S16MAD xr%0,xr%1,xr%2,xr%3,A,3" \
1084 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1085 } while (0)
1087 #define S16MAD_S_HH(xra,xrb,xrc,xrd) \
1088 do { \
1089 __asm__ __volatile ("S16MAD xr%0,xr%1,xr%2,xr%3,S,0" \
1091 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1092 } while (0)
1094 #define S16MAD_S_LL(xra,xrb,xrc,xrd) \
1095 do { \
1096 __asm__ __volatile ("S16MAD xr%0,xr%1,xr%2,xr%3,S,1" \
1098 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1099 } while (0)
1101 #define S16MAD_S_HL(xra,xrb,xrc,xrd) \
1102 do { \
1103 __asm__ __volatile ("S16MAD xr%0,xr%1,xr%2,xr%3,S,2" \
1105 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1106 } while (0)
1108 #define S16MAD_S_LH(xra,xrb,xrc,xrd) \
1109 do { \
1110 __asm__ __volatile ("S16MAD xr%0,xr%1,xr%2,xr%3,S,3" \
1112 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1113 } while (0)
1115 /***********************************Q8MUL********************************/
1116 #define Q8MUL(xra,xrb,xrc,xrd) \
1117 do { \
1118 __asm__ __volatile ("Q8MUL xr%0,xr%1,xr%2,xr%3" \
1120 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1121 } while (0)
1123 /***********************************Q8MAC********************************/
1124 #define Q8MAC_AA(xra,xrb,xrc,xrd) \
1125 do { \
1126 __asm__ __volatile ("Q8MAC xr%0,xr%1,xr%2,xr%3,AA" \
1128 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1129 } while (0)
1131 #define Q8MAC_AS(xra,xrb,xrc,xrd) \
1132 do { \
1133 __asm__ __volatile ("Q8MAC xr%0,xr%1,xr%2,xr%3,AS" \
1135 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1136 } while (0)
1138 #define Q8MAC_SA(xra,xrb,xrc,xrd) \
1139 do { \
1140 __asm__ __volatile ("Q8MAC xr%0,xr%1,xr%2,xr%3,SA" \
1142 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1143 } while (0)
1145 #define Q8MAC_SS(xra,xrb,xrc,xrd) \
1146 do { \
1147 __asm__ __volatile ("Q8MAC xr%0,xr%1,xr%2,xr%3,SS" \
1149 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1150 } while (0)
1152 /***********************************Q8MADL********************************/
1153 #define Q8MADL_AA(xra,xrb,xrc,xrd) \
1154 do { \
1155 __asm__ __volatile ("Q8MADL xr%0,xr%1,xr%2,xr%3,AA" \
1157 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1158 } while (0)
1160 #define Q8MADL_AS(xra,xrb,xrc,xrd) \
1161 do { \
1162 __asm__ __volatile ("Q8MADL xr%0,xr%1,xr%2,xr%3,AS" \
1164 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1165 } while (0)
1167 #define Q8MADL_SA(xra,xrb,xrc,xrd) \
1168 do { \
1169 __asm__ __volatile ("Q8MADL xr%0,xr%1,xr%2,xr%3,SA" \
1171 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1172 } while (0)
1174 #define Q8MADL_SS(xra,xrb,xrc,xrd) \
1175 do { \
1176 __asm__ __volatile ("Q8MADL xr%0,xr%1,xr%2,xr%3,SS" \
1178 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1179 } while (0)
1181 /***********************************D32ADD********************************/
1182 #define D32ADD_AA(xra,xrb,xrc,xrd) \
1183 do { \
1184 __asm__ __volatile ("D32ADD xr%0,xr%1,xr%2,xr%3,AA" \
1186 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1187 } while (0)
1189 #define D32ADD_AS(xra,xrb,xrc,xrd) \
1190 do { \
1191 __asm__ __volatile ("D32ADD xr%0,xr%1,xr%2,xr%3,AS" \
1193 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1194 } while (0)
1196 #define D32ADD_SA(xra,xrb,xrc,xrd) \
1197 do { \
1198 __asm__ __volatile ("D32ADD xr%0,xr%1,xr%2,xr%3,SA" \
1200 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1201 } while (0)
1203 #define D32ADD_SS(xra,xrb,xrc,xrd) \
1204 do { \
1205 __asm__ __volatile ("D32ADD xr%0,xr%1,xr%2,xr%3,SS" \
1207 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1208 } while (0)
1210 /***********************************D32ACC********************************/
1211 #define D32ACC_AA(xra,xrb,xrc,xrd) \
1212 do { \
1213 __asm__ __volatile ("D32ACC xr%0,xr%1,xr%2,xr%3,AA" \
1215 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1216 } while (0)
1218 #define D32ACC_AS(xra,xrb,xrc,xrd) \
1219 do { \
1220 __asm__ __volatile ("D32ACC xr%0,xr%1,xr%2,xr%3,AS" \
1222 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1223 } while (0)
1225 #define D32ACC_SA(xra,xrb,xrc,xrd) \
1226 do { \
1227 __asm__ __volatile ("D32ACC xr%0,xr%1,xr%2,xr%3,SA" \
1229 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1230 } while (0)
1232 #define D32ACC_SS(xra,xrb,xrc,xrd) \
1233 do { \
1234 __asm__ __volatile ("D32ACC xr%0,xr%1,xr%2,xr%3,SS" \
1236 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1237 } while (0)
1239 /***********************************S32CPS********************************/
1240 #define S32CPS(xra,xrb,xrc) \
1241 do { \
1242 __asm__ __volatile ("S32CPS xr%0,xr%1,xr%2" \
1244 :"K"(xra),"K"(xrb),"K"(xrc)); \
1245 } while (0)
1247 #define S32ABS(xra,xrb) \
1248 do { \
1249 __asm__ __volatile ("S32CPS xr%0,xr%1,xr%2" \
1251 :"K"(xra),"K"(xrb),"K"(xrb)); \
1252 } while (0)
1254 /***********************************Q16ADD********************************/
1255 #define Q16ADD_AA_WW(xra,xrb,xrc,xrd) \
1256 do { \
1257 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,AA,WW" \
1259 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1260 } while (0)
1262 #define Q16ADD_AA_LW(xra,xrb,xrc,xrd) \
1263 do { \
1264 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,AA,LW" \
1266 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1267 } while (0)
1269 #define Q16ADD_AA_HW(xra,xrb,xrc,xrd) \
1270 do { \
1271 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,AA,HW" \
1273 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1274 } while (0)
1276 #define Q16ADD_AA_XW(xra,xrb,xrc,xrd) \
1277 do { \
1278 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,AA,XW" \
1280 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1281 } while (0)
1282 #define Q16ADD_AS_WW(xra,xrb,xrc,xrd) \
1283 do { \
1284 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,AS,WW" \
1286 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1287 } while (0)
1289 #define Q16ADD_AS_LW(xra,xrb,xrc,xrd) \
1290 do { \
1291 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,AS,LW" \
1293 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1294 } while (0)
1296 #define Q16ADD_AS_HW(xra,xrb,xrc,xrd) \
1297 do { \
1298 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,AS,HW" \
1300 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1301 } while (0)
1303 #define Q16ADD_AS_XW(xra,xrb,xrc,xrd) \
1304 do { \
1305 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,AS,XW" \
1307 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1308 } while (0)
1310 #define Q16ADD_SA_WW(xra,xrb,xrc,xrd) \
1311 do { \
1312 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,SA,WW" \
1314 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1315 } while (0)
1317 #define Q16ADD_SA_LW(xra,xrb,xrc,xrd) \
1318 do { \
1319 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,SA,LW" \
1321 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1322 } while (0)
1324 #define Q16ADD_SA_HW(xra,xrb,xrc,xrd) \
1325 do { \
1326 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,SA,HW" \
1328 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1329 } while (0)
1331 #define Q16ADD_SA_XW(xra,xrb,xrc,xrd) \
1332 do { \
1333 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,SA,XW" \
1335 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1336 } while (0)
1338 #define Q16ADD_SS_WW(xra,xrb,xrc,xrd) \
1339 do { \
1340 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,SS,WW" \
1342 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1343 } while (0)
1345 #define Q16ADD_SS_LW(xra,xrb,xrc,xrd) \
1346 do { \
1347 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,SS,LW" \
1349 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1350 } while (0)
1352 #define Q16ADD_SS_HW(xra,xrb,xrc,xrd) \
1353 do { \
1354 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,SS,HW" \
1356 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1357 } while (0)
1359 #define Q16ADD_SS_XW(xra,xrb,xrc,xrd) \
1360 do { \
1361 __asm__ __volatile ("Q16ADD xr%0,xr%1,xr%2,xr%3,SS,XW" \
1363 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1364 } while (0)
1366 /***********************************Q16ACC********************************/
1367 #define Q16ACC_AA(xra,xrb,xrc,xrd) \
1368 do { \
1369 __asm__ __volatile ("Q16ACC xr%0,xr%1,xr%2,xr%3,AA" \
1371 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1372 } while (0)
1374 #define Q16ACC_AS(xra,xrb,xrc,xrd) \
1375 do { \
1376 __asm__ __volatile ("Q16ACC xr%0,xr%1,xr%2,xr%3,AS" \
1378 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1379 } while (0)
1381 #define Q16ACC_SA(xra,xrb,xrc,xrd) \
1382 do { \
1383 __asm__ __volatile ("Q16ACC xr%0,xr%1,xr%2,xr%3,SA" \
1385 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1386 } while (0)
1388 #define Q16ACC_SS(xra,xrb,xrc,xrd) \
1389 do { \
1390 __asm__ __volatile ("Q16ACC xr%0,xr%1,xr%2,xr%3,SS" \
1392 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1393 } while (0)
1395 /***********************************D16CPS********************************/
1396 #define D16CPS(xra,xrb,xrc) \
1397 do { \
1398 __asm__ __volatile ("D16CPS xr%0,xr%1,xr%2" \
1400 :"K"(xra),"K"(xrb),"K"(xrc)); \
1401 } while (0)
1403 #define D16ABS(xra,xrb) \
1404 do { \
1405 __asm__ __volatile ("D16CPS xr%0,xr%1,xr%2" \
1407 :"K"(xra),"K"(xrb),"K"(xrb)); \
1408 } while (0)
1410 /*******************************D16AVG/D16AVGR*****************************/
1411 #define D16AVG(xra,xrb,xrc) \
1412 do { \
1413 __asm__ __volatile ("D16AVG xr%0,xr%1,xr%2" \
1415 :"K"(xra),"K"(xrb),"K"(xrc)); \
1416 } while (0)
1417 #define D16AVGR(xra,xrb,xrc) \
1418 do { \
1419 __asm__ __volatile ("D16AVGR xr%0,xr%1,xr%2" \
1421 :"K"(xra),"K"(xrb),"K"(xrc)); \
1422 } while (0)
1424 /************************************Q8ADD********************************/
1425 #define Q8ADD_AA(xra,xrb,xrc) \
1426 do { \
1427 __asm__ __volatile ("Q8ADD xr%0,xr%1,xr%2,AA" \
1429 :"K"(xra),"K"(xrb),"K"(xrc)); \
1430 } while (0)
1432 #define Q8ADD_AS(xra,xrb,xrc) \
1433 do { \
1434 __asm__ __volatile ("Q8ADD xr%0,xr%1,xr%2,AS" \
1436 :"K"(xra),"K"(xrb),"K"(xrc)); \
1437 } while (0)
1439 #define Q8ADD_SA(xra,xrb,xrc) \
1440 do { \
1441 __asm__ __volatile ("Q8ADD xr%0,xr%1,xr%2,SA" \
1443 :"K"(xra),"K"(xrb),"K"(xrc)); \
1444 } while (0)
1446 #define Q8ADD_SS(xra,xrb,xrc) \
1447 do { \
1448 __asm__ __volatile ("Q8ADD xr%0,xr%1,xr%2,SS" \
1450 :"K"(xra),"K"(xrb),"K"(xrc)); \
1451 } while (0)
1453 /************************************Q8ADDE********************************/
1454 #define Q8ADDE_AA(xra,xrb,xrc,xrd) \
1455 do { \
1456 __asm__ __volatile ("Q8ADDE xr%0,xr%1,xr%2,xr%3,AA" \
1458 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1459 } while (0)
1461 #define Q8ADDE_AS(xra,xrb,xrc,xrd) \
1462 do { \
1463 __asm__ __volatile ("Q8ADDE xr%0,xr%1,xr%2,xr%3,AS" \
1465 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1466 } while (0)
1468 #define Q8ADDE_SA(xra,xrb,xrc,xrd) \
1469 do { \
1470 __asm__ __volatile ("Q8ADDE xr%0,xr%1,xr%2,xr%3,SA" \
1472 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1473 } while (0)
1475 #define Q8ADDE_SS(xra,xrb,xrc,xrd) \
1476 do { \
1477 __asm__ __volatile ("Q8ADDE xr%0,xr%1,xr%2,xr%3,SS" \
1479 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1480 } while (0)
1482 /************************************Q8ACCE********************************/
1483 #define Q8ACCE_AA(xra,xrb,xrc,xrd) \
1484 do { \
1485 __asm__ __volatile ("Q8ACCE xr%0,xr%1,xr%2,xr%3,AA" \
1487 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1488 } while (0)
1490 #define Q8ACCE_AS(xra,xrb,xrc,xrd) \
1491 do { \
1492 __asm__ __volatile ("Q8ACCE xr%0,xr%1,xr%2,xr%3,AS" \
1494 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1495 } while (0)
1497 #define Q8ACCE_SA(xra,xrb,xrc,xrd) \
1498 do { \
1499 __asm__ __volatile ("Q8ACCE xr%0,xr%1,xr%2,xr%3,SA" \
1501 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1502 } while (0)
1504 #define Q8ACCE_SS(xra,xrb,xrc,xrd) \
1505 do { \
1506 __asm__ __volatile ("Q8ACCE xr%0,xr%1,xr%2,xr%3,SS" \
1508 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1509 } while (0)
1511 /************************************Q8ABD********************************/
1512 #define Q8ABD(xra,xrb,xrc) \
1513 do { \
1514 __asm__ __volatile ("Q8ABD xr%0,xr%1,xr%2" \
1516 :"K"(xra),"K"(xrb),"K"(xrc)); \
1517 } while (0)
1519 /************************************Q8SLT********************************/
1520 #define Q8SLT(xra,xrb,xrc) \
1521 do { \
1522 __asm__ __volatile ("Q8SLT xr%0,xr%1,xr%2" \
1524 :"K"(xra),"K"(xrb),"K"(xrc)); \
1525 } while (0)
1527 /************************************Q8SAD********************************/
1528 #define Q8SAD(xra,xrb,xrc,xrd) \
1529 do { \
1530 __asm__ __volatile ("Q8SAD xr%0,xr%1,xr%2,xr%3" \
1532 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd)); \
1533 } while (0)
1535 /********************************Q8AVG/Q8AVGR*****************************/
1536 #define Q8AVG(xra,xrb,xrc) \
1537 do { \
1538 __asm__ __volatile ("Q8AVG xr%0,xr%1,xr%2" \
1540 :"K"(xra),"K"(xrb),"K"(xrc)); \
1541 } while (0)
1542 #define Q8AVGR(xra,xrb,xrc) \
1543 do { \
1544 __asm__ __volatile ("Q8AVGR xr%0,xr%1,xr%2" \
1546 :"K"(xra),"K"(xrb),"K"(xrc)); \
1547 } while (0)
1549 /**********************************D32SHIFT******************************/
1550 #define D32SLL(xra,xrb,xrc,xrd,SFT4) \
1551 do { \
1552 __asm__ __volatile ("D32SLL xr%0,xr%1,xr%2,xr%3,%4" \
1554 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd),"K"(SFT4)); \
1555 } while (0)
1557 #define D32SLR(xra,xrb,xrc,xrd,SFT4) \
1558 do { \
1559 __asm__ __volatile ("D32SLR xr%0,xr%1,xr%2,xr%3,%4" \
1561 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd),"K"(SFT4)); \
1562 } while (0)
1564 #define D32SAR(xra,xrb,xrc,xrd,SFT4) \
1565 do { \
1566 __asm__ __volatile ("D32SAR xr%0,xr%1,xr%2,xr%3,%4" \
1568 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd),"K"(SFT4)); \
1569 } while (0)
1571 #define D32SARL(xra,xrb,xrc,SFT4) \
1572 do { \
1573 __asm__ __volatile ("D32SARL xr%0,xr%1,xr%2,%3" \
1575 :"K"(xra),"K"(xrb),"K"(xrc),"K"(SFT4)); \
1576 } while (0)
1578 #define D32SLLV(xra,xrd,rb) \
1579 do { \
1580 __asm__ __volatile ("D32SLLV xr%0,xr%1,%z2" \
1582 :"K"(xra),"K"(xrd),"d"(rb)); \
1583 } while (0)
1585 #define D32SLRV(xra,xrd,rb) \
1586 do { \
1587 __asm__ __volatile ("D32SLRV xr%0,xr%1,%z2" \
1589 :"K"(xra),"K"(xrd),"d"(rb)); \
1590 } while (0)
1592 #define D32SARV(xra,xrd,rb) \
1593 do { \
1594 __asm__ __volatile ("D32SARV xr%0,xr%1,%z2" \
1596 :"K"(xra),"K"(xrd),"d"(rb)); \
1597 } while (0)
1599 #define D32SARW(xra,xrb,xrc,rb) \
1600 do { \
1601 __asm__ __volatile ("D32SARW xr%0,xr%1,xr%2,%3" \
1603 :"K"(xra),"K"(xrb),"K"(xrc),"d"(rb)); \
1604 } while (0)
1606 /**********************************Q16SHIFT******************************/
1607 #define Q16SLL(xra,xrb,xrc,xrd,SFT4) \
1608 do { \
1609 __asm__ __volatile ("Q16SLL xr%0,xr%1,xr%2,xr%3,%4" \
1611 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd),"K"(SFT4)); \
1612 } while (0)
1614 #define Q16SLR(xra,xrb,xrc,xrd,SFT4) \
1615 do { \
1616 __asm__ __volatile ("Q16SLR xr%0,xr%1,xr%2,xr%3,%4" \
1618 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd),"K"(SFT4)); \
1619 } while (0)
1621 #define Q16SAR(xra,xrb,xrc,xrd,SFT4) \
1622 do { \
1623 __asm__ __volatile ("Q16SAR xr%0,xr%1,xr%2,xr%3,%4" \
1625 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd),"K"(SFT4)); \
1626 } while (0)
1628 #define Q16SLLV(xra,xrd,rb) \
1629 do { \
1630 __asm__ __volatile ("Q16SLLV xr%0,xr%1,%z2" \
1632 :"K"(xra),"K"(xrd),"d"(rb)); \
1633 } while (0)
1635 #define Q16SLRV(xra,xrd,rb) \
1636 do { \
1637 __asm__ __volatile ("Q16SLRV xr%0,xr%1,%z2" \
1639 :"K"(xra),"K"(xrd),"d"(rb)); \
1640 } while (0)
1642 #define Q16SARV(xra,xrd,rb) \
1643 do { \
1644 __asm__ __volatile ("Q16SARV xr%0,xr%1,%z2" \
1646 :"K"(xra),"K"(xrd),"d"(rb)); \
1647 } while (0)
1649 /*********************************MAX/MIN*********************************/
1650 #define S32MAX(xra,xrb,xrc) \
1651 do { \
1652 __asm__ __volatile ("S32MAX xr%0,xr%1,xr%2" \
1654 :"K"(xra),"K"(xrb),"K"(xrc)); \
1655 } while (0)
1657 #define S32MIN(xra,xrb,xrc) \
1658 do { \
1659 __asm__ __volatile ("S32MIN xr%0,xr%1,xr%2" \
1661 :"K"(xra),"K"(xrb),"K"(xrc)); \
1662 } while (0)
1664 #define D16MAX(xra,xrb,xrc) \
1665 do { \
1666 __asm__ __volatile ("D16MAX xr%0,xr%1,xr%2" \
1668 :"K"(xra),"K"(xrb),"K"(xrc)); \
1669 } while (0)
1671 #define D16MIN(xra,xrb,xrc) \
1672 do { \
1673 __asm__ __volatile ("D16MIN xr%0,xr%1,xr%2" \
1675 :"K"(xra),"K"(xrb),"K"(xrc)); \
1676 } while (0)
1678 #define Q8MAX(xra,xrb,xrc) \
1679 do { \
1680 __asm__ __volatile ("Q8MAX xr%0,xr%1,xr%2" \
1682 :"K"(xra),"K"(xrb),"K"(xrc)); \
1683 } while (0)
1685 #define Q8MIN(xra,xrb,xrc) \
1686 do { \
1687 __asm__ __volatile ("Q8MIN xr%0,xr%1,xr%2" \
1689 :"K"(xra),"K"(xrb),"K"(xrc)); \
1690 } while (0)
1692 /*************************************MOVE********************************/
1693 #define S32I2M(xra,rb) \
1694 do { \
1695 __asm__ __volatile ("S32I2M xr%0,%z1" \
1697 :"K"(xra),"d"(rb)); \
1698 } while (0)
1700 #define S32M2I(xra) \
1701 __extension__ ({ \
1702 int __d; \
1703 __asm__ __volatile ("S32M2I xr%1, %0" \
1704 :"=d"(__d) \
1705 :"K"(xra)); \
1706 __d; \
1709 /*********************************S32SFL**********************************/
1710 #define S32SFL(xra,xrb,xrc,xrd,optn2) \
1711 do { \
1712 __asm__ __volatile ("S32SFL xr%0,xr%1,xr%2,xr%3,ptn%4" \
1714 :"K"(xra),"K"(xrb),"K"(xrc),"K"(xrd),"K"(optn2)); \
1715 } while (0)
1717 /*********************************S32ALN**********************************/
1718 #define S32ALN(xra,xrb,xrc,rs) \
1719 do { \
1720 __asm__ __volatile ("S32ALN xr%0,xr%1,xr%2,%z3" \
1722 :"K"(xra),"K"(xrb),"K"(xrc),"d"(rs)); \
1723 } while (0)
1725 /*********************************Q16SAT**********************************/
1726 #define Q16SAT(xra,xrb,xrc) \
1727 do { \
1728 __asm__ __volatile ("Q16SAT xr%0,xr%1,xr%2" \
1730 :"K"(xra),"K"(xrb),"K"(xrc)); \
1731 } while (0)
1733 // cache ops
1735 // cache
1736 #define Index_Invalidate_I 0x00
1737 #define Index_Writeback_Inv_D 0x01
1738 #define Index_Load_Tag_I 0x04
1739 #define Index_Load_Tag_D 0x05
1740 #define Index_Store_Tag_I 0x08
1741 #define Index_Store_Tag_D 0x09
1742 #define Hit_Invalidate_I 0x10
1743 #define Hit_Invalidate_D 0x11
1744 #define Hit_Writeback_Inv_D 0x15
1745 #define Hit_Writeback_I 0x18
1746 #define Hit_Writeback_D 0x19
1748 // pref
1749 #define PrefLoad 0
1750 #define PrefStore 1
1751 #define PrefLoadStreamed 4
1752 #define PrefStoreStreamed 5
1753 #define PrefLoadRetained 6
1754 #define PrefStoreRetained 7
1755 #define PrefWBInval 25
1756 #define PrefNudge 25
1757 #define PrefPreForStore 30
1759 #define mips_pref(base, offset, op) \
1760 __asm__ __volatile__( \
1761 " .set noreorder \n" \
1762 " pref %1, %2(%0) \n" \
1763 " .set reorder" \
1765 : "r" (base), "i" (op), "i" (offset))
1767 #define cache_op(op, addr) \
1768 __asm__ __volatile__( \
1769 " .set noreorder \n" \
1770 " cache %0, %1 \n" \
1771 " .set reorder" \
1773 : "i" (op), "m" (*(unsigned char *)(addr)))
1775 #define i_pref(hint,base,offset) \
1776 ({ __asm__ __volatile__("pref %0,%2(%1)"::"i"(hint),"r"(base),"i"(offset):"memory");})
1778 struct unaligned_32 { unsigned int l; } __attribute__((packed));
1779 #define LD32(a) (((const struct unaligned_32 *) (a))->l)
1780 #define ST32(a, b) (((struct unaligned_32 *) (a))->l) = (b)
1782 #define REVERSE_LD32(xra, xrb, rb, s12) \
1783 __extension__ ({ \
1784 int __d; \
1785 __asm__ __volatile ("S32LDD xr%1,%z3,%4\n\t" \
1786 "S32SFL xr%1,xr%1, xr%1, xr%2, ptn0\n\t" \
1787 "S32SFL xr%1,xr%2, xr%1, xr%2, ptn3\n\t" \
1788 "S32SFL xr%1,xr%2, xr%1, xr%2, ptn2\n\t" \
1789 "S32M2I xr%1,%0" \
1790 :"=d"(__d) \
1791 :"K"(xra), "K"(xrb), "d"(rb), "I"(s12)); \
1792 __d; \
1795 #define IU_CLZ(rb) \
1796 __extension__ ({ \
1797 int __d; \
1798 __asm__ __volatile ("clz %0, %1" \
1799 :"=d"(__d) \
1800 :"d"(rb)); \
1801 __d; \
1804 #endif /* C_VERSION */
1806 #endif /* JZ_MXU_H_ */