ao_coreaudio: byteswap AC-3 streams on little-endian
[mplayer/greg.git] / libmpeg2 / motion_comp_mmx.c
blobd89c2bd5917c10b1f58be5dbe9b3c4343c9edcdf
1 /*
2 * motion_comp_mmx.c
3 * Copyright (C) 2000-2003 Michel Lespinasse <walken@zoy.org>
4 * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
6 * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
7 * See http://libmpeg2.sourceforge.net/ for updates.
9 * mpeg2dec is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * mpeg2dec is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include "config.h"
26 #if ARCH_X86 || ARCH_X86_64
28 #include <inttypes.h>
30 #include "mpeg2.h"
31 #include "attributes.h"
32 #include "mpeg2_internal.h"
33 #include "mmx.h"
35 #define CPU_MMXEXT 0
36 #define CPU_3DNOW 1
39 #if HAVE_MMX
40 /* MMX code - needs a rewrite */
43 * Motion Compensation frequently needs to average values using the
44 * formula (x+y+1)>>1. Both MMXEXT and 3Dnow include one instruction
45 * to compute this, but it's been left out of classic MMX.
47 * We need to be careful of overflows when doing this computation.
48 * Rather than unpacking data to 16-bits, which reduces parallelism,
49 * we use the following formulas:
51 * (x+y)>>1 == (x&y)+((x^y)>>1)
52 * (x+y+1)>>1 == (x|y)-((x^y)>>1)
55 /* some rounding constants */
56 static mmx_t mask1 = {0xfefefefefefefefeLL};
57 static mmx_t round4 = {0x0002000200020002LL};
60 * This code should probably be compiled with loop unrolling
61 * (ie, -funroll-loops in gcc)becuase some of the loops
62 * use a small static number of iterations. This was written
63 * with the assumption the compiler knows best about when
64 * unrolling will help
67 static inline void mmx_zero_reg (void)
69 /* load 0 into mm0 */
70 pxor_r2r (mm0, mm0);
73 static inline void mmx_average_2_U8 (uint8_t * dest, const uint8_t * src1,
74 const uint8_t * src2)
76 /* *dest = (*src1 + *src2 + 1)/ 2; */
78 movq_m2r (*src1, mm1); /* load 8 src1 bytes */
79 movq_r2r (mm1, mm2); /* copy 8 src1 bytes */
81 movq_m2r (*src2, mm3); /* load 8 src2 bytes */
82 movq_r2r (mm3, mm4); /* copy 8 src2 bytes */
84 pxor_r2r (mm1, mm3); /* xor src1 and src2 */
85 pand_m2r (mask1, mm3); /* mask lower bits */
86 psrlq_i2r (1, mm3); /* /2 */
87 por_r2r (mm2, mm4); /* or src1 and src2 */
88 psubb_r2r (mm3, mm4); /* subtract subresults */
89 movq_r2m (mm4, *dest); /* store result in dest */
92 static inline void mmx_interp_average_2_U8 (uint8_t * dest,
93 const uint8_t * src1,
94 const uint8_t * src2)
96 /* *dest = (*dest + (*src1 + *src2 + 1)/ 2 + 1)/ 2; */
98 movq_m2r (*dest, mm1); /* load 8 dest bytes */
99 movq_r2r (mm1, mm2); /* copy 8 dest bytes */
101 movq_m2r (*src1, mm3); /* load 8 src1 bytes */
102 movq_r2r (mm3, mm4); /* copy 8 src1 bytes */
104 movq_m2r (*src2, mm5); /* load 8 src2 bytes */
105 movq_r2r (mm5, mm6); /* copy 8 src2 bytes */
107 pxor_r2r (mm3, mm5); /* xor src1 and src2 */
108 pand_m2r (mask1, mm5); /* mask lower bits */
109 psrlq_i2r (1, mm5); /* /2 */
110 por_r2r (mm4, mm6); /* or src1 and src2 */
111 psubb_r2r (mm5, mm6); /* subtract subresults */
112 movq_r2r (mm6, mm5); /* copy subresult */
114 pxor_r2r (mm1, mm5); /* xor srcavg and dest */
115 pand_m2r (mask1, mm5); /* mask lower bits */
116 psrlq_i2r (1, mm5); /* /2 */
117 por_r2r (mm2, mm6); /* or srcavg and dest */
118 psubb_r2r (mm5, mm6); /* subtract subresults */
119 movq_r2m (mm6, *dest); /* store result in dest */
122 static inline void mmx_average_4_U8 (uint8_t * dest, const uint8_t * src1,
123 const uint8_t * src2,
124 const uint8_t * src3,
125 const uint8_t * src4)
127 /* *dest = (*src1 + *src2 + *src3 + *src4 + 2)/ 4; */
129 movq_m2r (*src1, mm1); /* load 8 src1 bytes */
130 movq_r2r (mm1, mm2); /* copy 8 src1 bytes */
132 punpcklbw_r2r (mm0, mm1); /* unpack low src1 bytes */
133 punpckhbw_r2r (mm0, mm2); /* unpack high src1 bytes */
135 movq_m2r (*src2, mm3); /* load 8 src2 bytes */
136 movq_r2r (mm3, mm4); /* copy 8 src2 bytes */
138 punpcklbw_r2r (mm0, mm3); /* unpack low src2 bytes */
139 punpckhbw_r2r (mm0, mm4); /* unpack high src2 bytes */
141 paddw_r2r (mm3, mm1); /* add lows */
142 paddw_r2r (mm4, mm2); /* add highs */
144 /* now have partials in mm1 and mm2 */
146 movq_m2r (*src3, mm3); /* load 8 src3 bytes */
147 movq_r2r (mm3, mm4); /* copy 8 src3 bytes */
149 punpcklbw_r2r (mm0, mm3); /* unpack low src3 bytes */
150 punpckhbw_r2r (mm0, mm4); /* unpack high src3 bytes */
152 paddw_r2r (mm3, mm1); /* add lows */
153 paddw_r2r (mm4, mm2); /* add highs */
155 movq_m2r (*src4, mm5); /* load 8 src4 bytes */
156 movq_r2r (mm5, mm6); /* copy 8 src4 bytes */
158 punpcklbw_r2r (mm0, mm5); /* unpack low src4 bytes */
159 punpckhbw_r2r (mm0, mm6); /* unpack high src4 bytes */
161 paddw_r2r (mm5, mm1); /* add lows */
162 paddw_r2r (mm6, mm2); /* add highs */
164 /* now have subtotal in mm1 and mm2 */
166 paddw_m2r (round4, mm1);
167 psraw_i2r (2, mm1); /* /4 */
168 paddw_m2r (round4, mm2);
169 psraw_i2r (2, mm2); /* /4 */
171 packuswb_r2r (mm2, mm1); /* pack (w/ saturation) */
172 movq_r2m (mm1, *dest); /* store result in dest */
175 static inline void mmx_interp_average_4_U8 (uint8_t * dest,
176 const uint8_t * src1,
177 const uint8_t * src2,
178 const uint8_t * src3,
179 const uint8_t * src4)
181 /* *dest = (*dest + (*src1 + *src2 + *src3 + *src4 + 2)/ 4 + 1)/ 2; */
183 movq_m2r (*src1, mm1); /* load 8 src1 bytes */
184 movq_r2r (mm1, mm2); /* copy 8 src1 bytes */
186 punpcklbw_r2r (mm0, mm1); /* unpack low src1 bytes */
187 punpckhbw_r2r (mm0, mm2); /* unpack high src1 bytes */
189 movq_m2r (*src2, mm3); /* load 8 src2 bytes */
190 movq_r2r (mm3, mm4); /* copy 8 src2 bytes */
192 punpcklbw_r2r (mm0, mm3); /* unpack low src2 bytes */
193 punpckhbw_r2r (mm0, mm4); /* unpack high src2 bytes */
195 paddw_r2r (mm3, mm1); /* add lows */
196 paddw_r2r (mm4, mm2); /* add highs */
198 /* now have partials in mm1 and mm2 */
200 movq_m2r (*src3, mm3); /* load 8 src3 bytes */
201 movq_r2r (mm3, mm4); /* copy 8 src3 bytes */
203 punpcklbw_r2r (mm0, mm3); /* unpack low src3 bytes */
204 punpckhbw_r2r (mm0, mm4); /* unpack high src3 bytes */
206 paddw_r2r (mm3, mm1); /* add lows */
207 paddw_r2r (mm4, mm2); /* add highs */
209 movq_m2r (*src4, mm5); /* load 8 src4 bytes */
210 movq_r2r (mm5, mm6); /* copy 8 src4 bytes */
212 punpcklbw_r2r (mm0, mm5); /* unpack low src4 bytes */
213 punpckhbw_r2r (mm0, mm6); /* unpack high src4 bytes */
215 paddw_r2r (mm5, mm1); /* add lows */
216 paddw_r2r (mm6, mm2); /* add highs */
218 paddw_m2r (round4, mm1);
219 psraw_i2r (2, mm1); /* /4 */
220 paddw_m2r (round4, mm2);
221 psraw_i2r (2, mm2); /* /4 */
223 /* now have subtotal/4 in mm1 and mm2 */
225 movq_m2r (*dest, mm3); /* load 8 dest bytes */
226 movq_r2r (mm3, mm4); /* copy 8 dest bytes */
228 packuswb_r2r (mm2, mm1); /* pack (w/ saturation) */
229 movq_r2r (mm1,mm2); /* copy subresult */
231 pxor_r2r (mm1, mm3); /* xor srcavg and dest */
232 pand_m2r (mask1, mm3); /* mask lower bits */
233 psrlq_i2r (1, mm3); /* /2 */
234 por_r2r (mm2, mm4); /* or srcavg and dest */
235 psubb_r2r (mm3, mm4); /* subtract subresults */
236 movq_r2m (mm4, *dest); /* store result in dest */
239 /*-----------------------------------------------------------------------*/
241 static inline void MC_avg_mmx (const int width, int height, uint8_t * dest,
242 const uint8_t * ref, const int stride)
244 mmx_zero_reg ();
246 do {
247 mmx_average_2_U8 (dest, dest, ref);
249 if (width == 16)
250 mmx_average_2_U8 (dest+8, dest+8, ref+8);
252 dest += stride;
253 ref += stride;
254 } while (--height);
257 static void MC_avg_o_16_mmx (uint8_t * dest, const uint8_t * ref,
258 int stride, int height)
260 MC_avg_mmx (16, height, dest, ref, stride);
263 static void MC_avg_o_8_mmx (uint8_t * dest, const uint8_t * ref,
264 int stride, int height)
266 MC_avg_mmx (8, height, dest, ref, stride);
269 /*-----------------------------------------------------------------------*/
271 static inline void MC_put_mmx (const int width, int height, uint8_t * dest,
272 const uint8_t * ref, const int stride)
274 mmx_zero_reg ();
276 do {
277 movq_m2r (* ref, mm1); /* load 8 ref bytes */
278 movq_r2m (mm1,* dest); /* store 8 bytes at curr */
280 if (width == 16)
282 movq_m2r (* (ref+8), mm1); /* load 8 ref bytes */
283 movq_r2m (mm1,* (dest+8)); /* store 8 bytes at curr */
286 dest += stride;
287 ref += stride;
288 } while (--height);
291 static void MC_put_o_16_mmx (uint8_t * dest, const uint8_t * ref,
292 int stride, int height)
294 MC_put_mmx (16, height, dest, ref, stride);
297 static void MC_put_o_8_mmx (uint8_t * dest, const uint8_t * ref,
298 int stride, int height)
300 MC_put_mmx (8, height, dest, ref, stride);
303 /*-----------------------------------------------------------------------*/
305 /* Half pixel interpolation in the x direction */
306 static inline void MC_avg_x_mmx (const int width, int height, uint8_t * dest,
307 const uint8_t * ref, const int stride)
309 mmx_zero_reg ();
311 do {
312 mmx_interp_average_2_U8 (dest, ref, ref+1);
314 if (width == 16)
315 mmx_interp_average_2_U8 (dest+8, ref+8, ref+9);
317 dest += stride;
318 ref += stride;
319 } while (--height);
322 static void MC_avg_x_16_mmx (uint8_t * dest, const uint8_t * ref,
323 int stride, int height)
325 MC_avg_x_mmx (16, height, dest, ref, stride);
328 static void MC_avg_x_8_mmx (uint8_t * dest, const uint8_t * ref,
329 int stride, int height)
331 MC_avg_x_mmx (8, height, dest, ref, stride);
334 /*-----------------------------------------------------------------------*/
336 static inline void MC_put_x_mmx (const int width, int height, uint8_t * dest,
337 const uint8_t * ref, const int stride)
339 mmx_zero_reg ();
341 do {
342 mmx_average_2_U8 (dest, ref, ref+1);
344 if (width == 16)
345 mmx_average_2_U8 (dest+8, ref+8, ref+9);
347 dest += stride;
348 ref += stride;
349 } while (--height);
352 static void MC_put_x_16_mmx (uint8_t * dest, const uint8_t * ref,
353 int stride, int height)
355 MC_put_x_mmx (16, height, dest, ref, stride);
358 static void MC_put_x_8_mmx (uint8_t * dest, const uint8_t * ref,
359 int stride, int height)
361 MC_put_x_mmx (8, height, dest, ref, stride);
364 /*-----------------------------------------------------------------------*/
366 static inline void MC_avg_xy_mmx (const int width, int height, uint8_t * dest,
367 const uint8_t * ref, const int stride)
369 const uint8_t * ref_next = ref + stride;
371 mmx_zero_reg ();
373 do {
374 mmx_interp_average_4_U8 (dest, ref, ref+1, ref_next, ref_next+1);
376 if (width == 16)
377 mmx_interp_average_4_U8 (dest+8, ref+8, ref+9,
378 ref_next+8, ref_next+9);
380 dest += stride;
381 ref += stride;
382 ref_next += stride;
383 } while (--height);
386 static void MC_avg_xy_16_mmx (uint8_t * dest, const uint8_t * ref,
387 int stride, int height)
389 MC_avg_xy_mmx (16, height, dest, ref, stride);
392 static void MC_avg_xy_8_mmx (uint8_t * dest, const uint8_t * ref,
393 int stride, int height)
395 MC_avg_xy_mmx (8, height, dest, ref, stride);
398 /*-----------------------------------------------------------------------*/
400 static inline void MC_put_xy_mmx (const int width, int height, uint8_t * dest,
401 const uint8_t * ref, const int stride)
403 const uint8_t * ref_next = ref + stride;
405 mmx_zero_reg ();
407 do {
408 mmx_average_4_U8 (dest, ref, ref+1, ref_next, ref_next+1);
410 if (width == 16)
411 mmx_average_4_U8 (dest+8, ref+8, ref+9, ref_next+8, ref_next+9);
413 dest += stride;
414 ref += stride;
415 ref_next += stride;
416 } while (--height);
419 static void MC_put_xy_16_mmx (uint8_t * dest, const uint8_t * ref,
420 int stride, int height)
422 MC_put_xy_mmx (16, height, dest, ref, stride);
425 static void MC_put_xy_8_mmx (uint8_t * dest, const uint8_t * ref,
426 int stride, int height)
428 MC_put_xy_mmx (8, height, dest, ref, stride);
431 /*-----------------------------------------------------------------------*/
433 static inline void MC_avg_y_mmx (const int width, int height, uint8_t * dest,
434 const uint8_t * ref, const int stride)
436 const uint8_t * ref_next = ref + stride;
438 mmx_zero_reg ();
440 do {
441 mmx_interp_average_2_U8 (dest, ref, ref_next);
443 if (width == 16)
444 mmx_interp_average_2_U8 (dest+8, ref+8, ref_next+8);
446 dest += stride;
447 ref += stride;
448 ref_next += stride;
449 } while (--height);
452 static void MC_avg_y_16_mmx (uint8_t * dest, const uint8_t * ref,
453 int stride, int height)
455 MC_avg_y_mmx (16, height, dest, ref, stride);
458 static void MC_avg_y_8_mmx (uint8_t * dest, const uint8_t * ref,
459 int stride, int height)
461 MC_avg_y_mmx (8, height, dest, ref, stride);
464 /*-----------------------------------------------------------------------*/
466 static inline void MC_put_y_mmx (const int width, int height, uint8_t * dest,
467 const uint8_t * ref, const int stride)
469 const uint8_t * ref_next = ref + stride;
471 mmx_zero_reg ();
473 do {
474 mmx_average_2_U8 (dest, ref, ref_next);
476 if (width == 16)
477 mmx_average_2_U8 (dest+8, ref+8, ref_next+8);
479 dest += stride;
480 ref += stride;
481 ref_next += stride;
482 } while (--height);
485 static void MC_put_y_16_mmx (uint8_t * dest, const uint8_t * ref,
486 int stride, int height)
488 MC_put_y_mmx (16, height, dest, ref, stride);
491 static void MC_put_y_8_mmx (uint8_t * dest, const uint8_t * ref,
492 int stride, int height)
494 MC_put_y_mmx (8, height, dest, ref, stride);
498 MPEG2_MC_EXTERN (mmx)
500 #endif /* HAVE_MMX */
507 /* CPU_MMXEXT/CPU_3DNOW adaptation layer */
509 #define pavg_r2r(src,dest) \
510 do { \
511 if (cpu == CPU_MMXEXT) \
512 pavgb_r2r (src, dest); \
513 else \
514 pavgusb_r2r (src, dest); \
515 } while (0)
517 #define pavg_m2r(src,dest) \
518 do { \
519 if (cpu == CPU_MMXEXT) \
520 pavgb_m2r (src, dest); \
521 else \
522 pavgusb_m2r (src, dest); \
523 } while (0)
526 /* CPU_MMXEXT code */
529 static inline void MC_put1_8 (int height, uint8_t * dest, const uint8_t * ref,
530 const int stride)
532 do {
533 movq_m2r (*ref, mm0);
534 movq_r2m (mm0, *dest);
535 ref += stride;
536 dest += stride;
537 } while (--height);
540 static inline void MC_put1_16 (int height, uint8_t * dest, const uint8_t * ref,
541 const int stride)
543 do {
544 movq_m2r (*ref, mm0);
545 movq_m2r (*(ref+8), mm1);
546 ref += stride;
547 movq_r2m (mm0, *dest);
548 movq_r2m (mm1, *(dest+8));
549 dest += stride;
550 } while (--height);
553 static inline void MC_avg1_8 (int height, uint8_t * dest, const uint8_t * ref,
554 const int stride, const int cpu)
556 do {
557 movq_m2r (*ref, mm0);
558 pavg_m2r (*dest, mm0);
559 ref += stride;
560 movq_r2m (mm0, *dest);
561 dest += stride;
562 } while (--height);
565 static inline void MC_avg1_16 (int height, uint8_t * dest, const uint8_t * ref,
566 const int stride, const int cpu)
568 do {
569 movq_m2r (*ref, mm0);
570 movq_m2r (*(ref+8), mm1);
571 pavg_m2r (*dest, mm0);
572 pavg_m2r (*(dest+8), mm1);
573 movq_r2m (mm0, *dest);
574 ref += stride;
575 movq_r2m (mm1, *(dest+8));
576 dest += stride;
577 } while (--height);
580 static inline void MC_put2_8 (int height, uint8_t * dest, const uint8_t * ref,
581 const int stride, const int offset,
582 const int cpu)
584 do {
585 movq_m2r (*ref, mm0);
586 pavg_m2r (*(ref+offset), mm0);
587 ref += stride;
588 movq_r2m (mm0, *dest);
589 dest += stride;
590 } while (--height);
593 static inline void MC_put2_16 (int height, uint8_t * dest, const uint8_t * ref,
594 const int stride, const int offset,
595 const int cpu)
597 do {
598 movq_m2r (*ref, mm0);
599 movq_m2r (*(ref+8), mm1);
600 pavg_m2r (*(ref+offset), mm0);
601 pavg_m2r (*(ref+offset+8), mm1);
602 movq_r2m (mm0, *dest);
603 ref += stride;
604 movq_r2m (mm1, *(dest+8));
605 dest += stride;
606 } while (--height);
609 static inline void MC_avg2_8 (int height, uint8_t * dest, const uint8_t * ref,
610 const int stride, const int offset,
611 const int cpu)
613 do {
614 movq_m2r (*ref, mm0);
615 pavg_m2r (*(ref+offset), mm0);
616 pavg_m2r (*dest, mm0);
617 ref += stride;
618 movq_r2m (mm0, *dest);
619 dest += stride;
620 } while (--height);
623 static inline void MC_avg2_16 (int height, uint8_t * dest, const uint8_t * ref,
624 const int stride, const int offset,
625 const int cpu)
627 do {
628 movq_m2r (*ref, mm0);
629 movq_m2r (*(ref+8), mm1);
630 pavg_m2r (*(ref+offset), mm0);
631 pavg_m2r (*(ref+offset+8), mm1);
632 pavg_m2r (*dest, mm0);
633 pavg_m2r (*(dest+8), mm1);
634 ref += stride;
635 movq_r2m (mm0, *dest);
636 movq_r2m (mm1, *(dest+8));
637 dest += stride;
638 } while (--height);
641 static mmx_t mask_one = {0x0101010101010101LL};
643 static inline void MC_put4_8 (int height, uint8_t * dest, const uint8_t * ref,
644 const int stride, const int cpu)
646 movq_m2r (*ref, mm0);
647 movq_m2r (*(ref+1), mm1);
648 movq_r2r (mm0, mm7);
649 pxor_r2r (mm1, mm7);
650 pavg_r2r (mm1, mm0);
651 ref += stride;
653 do {
654 movq_m2r (*ref, mm2);
655 movq_r2r (mm0, mm5);
657 movq_m2r (*(ref+1), mm3);
658 movq_r2r (mm2, mm6);
660 pxor_r2r (mm3, mm6);
661 pavg_r2r (mm3, mm2);
663 por_r2r (mm6, mm7);
664 pxor_r2r (mm2, mm5);
666 pand_r2r (mm5, mm7);
667 pavg_r2r (mm2, mm0);
669 pand_m2r (mask_one, mm7);
671 psubusb_r2r (mm7, mm0);
673 ref += stride;
674 movq_r2m (mm0, *dest);
675 dest += stride;
677 movq_r2r (mm6, mm7); /* unroll ! */
678 movq_r2r (mm2, mm0); /* unroll ! */
679 } while (--height);
682 static inline void MC_put4_16 (int height, uint8_t * dest, const uint8_t * ref,
683 const int stride, const int cpu)
685 do {
686 movq_m2r (*ref, mm0);
687 movq_m2r (*(ref+stride+1), mm1);
688 movq_r2r (mm0, mm7);
689 movq_m2r (*(ref+1), mm2);
690 pxor_r2r (mm1, mm7);
691 movq_m2r (*(ref+stride), mm3);
692 movq_r2r (mm2, mm6);
693 pxor_r2r (mm3, mm6);
694 pavg_r2r (mm1, mm0);
695 pavg_r2r (mm3, mm2);
696 por_r2r (mm6, mm7);
697 movq_r2r (mm0, mm6);
698 pxor_r2r (mm2, mm6);
699 pand_r2r (mm6, mm7);
700 pand_m2r (mask_one, mm7);
701 pavg_r2r (mm2, mm0);
702 psubusb_r2r (mm7, mm0);
703 movq_r2m (mm0, *dest);
705 movq_m2r (*(ref+8), mm0);
706 movq_m2r (*(ref+stride+9), mm1);
707 movq_r2r (mm0, mm7);
708 movq_m2r (*(ref+9), mm2);
709 pxor_r2r (mm1, mm7);
710 movq_m2r (*(ref+stride+8), mm3);
711 movq_r2r (mm2, mm6);
712 pxor_r2r (mm3, mm6);
713 pavg_r2r (mm1, mm0);
714 pavg_r2r (mm3, mm2);
715 por_r2r (mm6, mm7);
716 movq_r2r (mm0, mm6);
717 pxor_r2r (mm2, mm6);
718 pand_r2r (mm6, mm7);
719 pand_m2r (mask_one, mm7);
720 pavg_r2r (mm2, mm0);
721 psubusb_r2r (mm7, mm0);
722 ref += stride;
723 movq_r2m (mm0, *(dest+8));
724 dest += stride;
725 } while (--height);
728 static inline void MC_avg4_8 (int height, uint8_t * dest, const uint8_t * ref,
729 const int stride, const int cpu)
731 do {
732 movq_m2r (*ref, mm0);
733 movq_m2r (*(ref+stride+1), mm1);
734 movq_r2r (mm0, mm7);
735 movq_m2r (*(ref+1), mm2);
736 pxor_r2r (mm1, mm7);
737 movq_m2r (*(ref+stride), mm3);
738 movq_r2r (mm2, mm6);
739 pxor_r2r (mm3, mm6);
740 pavg_r2r (mm1, mm0);
741 pavg_r2r (mm3, mm2);
742 por_r2r (mm6, mm7);
743 movq_r2r (mm0, mm6);
744 pxor_r2r (mm2, mm6);
745 pand_r2r (mm6, mm7);
746 pand_m2r (mask_one, mm7);
747 pavg_r2r (mm2, mm0);
748 psubusb_r2r (mm7, mm0);
749 movq_m2r (*dest, mm1);
750 pavg_r2r (mm1, mm0);
751 ref += stride;
752 movq_r2m (mm0, *dest);
753 dest += stride;
754 } while (--height);
757 static inline void MC_avg4_16 (int height, uint8_t * dest, const uint8_t * ref,
758 const int stride, const int cpu)
760 do {
761 movq_m2r (*ref, mm0);
762 movq_m2r (*(ref+stride+1), mm1);
763 movq_r2r (mm0, mm7);
764 movq_m2r (*(ref+1), mm2);
765 pxor_r2r (mm1, mm7);
766 movq_m2r (*(ref+stride), mm3);
767 movq_r2r (mm2, mm6);
768 pxor_r2r (mm3, mm6);
769 pavg_r2r (mm1, mm0);
770 pavg_r2r (mm3, mm2);
771 por_r2r (mm6, mm7);
772 movq_r2r (mm0, mm6);
773 pxor_r2r (mm2, mm6);
774 pand_r2r (mm6, mm7);
775 pand_m2r (mask_one, mm7);
776 pavg_r2r (mm2, mm0);
777 psubusb_r2r (mm7, mm0);
778 movq_m2r (*dest, mm1);
779 pavg_r2r (mm1, mm0);
780 movq_r2m (mm0, *dest);
782 movq_m2r (*(ref+8), mm0);
783 movq_m2r (*(ref+stride+9), mm1);
784 movq_r2r (mm0, mm7);
785 movq_m2r (*(ref+9), mm2);
786 pxor_r2r (mm1, mm7);
787 movq_m2r (*(ref+stride+8), mm3);
788 movq_r2r (mm2, mm6);
789 pxor_r2r (mm3, mm6);
790 pavg_r2r (mm1, mm0);
791 pavg_r2r (mm3, mm2);
792 por_r2r (mm6, mm7);
793 movq_r2r (mm0, mm6);
794 pxor_r2r (mm2, mm6);
795 pand_r2r (mm6, mm7);
796 pand_m2r (mask_one, mm7);
797 pavg_r2r (mm2, mm0);
798 psubusb_r2r (mm7, mm0);
799 movq_m2r (*(dest+8), mm1);
800 pavg_r2r (mm1, mm0);
801 ref += stride;
802 movq_r2m (mm0, *(dest+8));
803 dest += stride;
804 } while (--height);
807 #if HAVE_MMX2
809 static void MC_avg_o_16_mmxext (uint8_t * dest, const uint8_t * ref,
810 int stride, int height)
812 MC_avg1_16 (height, dest, ref, stride, CPU_MMXEXT);
815 static void MC_avg_o_8_mmxext (uint8_t * dest, const uint8_t * ref,
816 int stride, int height)
818 MC_avg1_8 (height, dest, ref, stride, CPU_MMXEXT);
821 static void MC_put_o_16_mmxext (uint8_t * dest, const uint8_t * ref,
822 int stride, int height)
824 MC_put1_16 (height, dest, ref, stride);
827 static void MC_put_o_8_mmxext (uint8_t * dest, const uint8_t * ref,
828 int stride, int height)
830 MC_put1_8 (height, dest, ref, stride);
833 static void MC_avg_x_16_mmxext (uint8_t * dest, const uint8_t * ref,
834 int stride, int height)
836 MC_avg2_16 (height, dest, ref, stride, 1, CPU_MMXEXT);
839 static void MC_avg_x_8_mmxext (uint8_t * dest, const uint8_t * ref,
840 int stride, int height)
842 MC_avg2_8 (height, dest, ref, stride, 1, CPU_MMXEXT);
845 static void MC_put_x_16_mmxext (uint8_t * dest, const uint8_t * ref,
846 int stride, int height)
848 MC_put2_16 (height, dest, ref, stride, 1, CPU_MMXEXT);
851 static void MC_put_x_8_mmxext (uint8_t * dest, const uint8_t * ref,
852 int stride, int height)
854 MC_put2_8 (height, dest, ref, stride, 1, CPU_MMXEXT);
857 static void MC_avg_y_16_mmxext (uint8_t * dest, const uint8_t * ref,
858 int stride, int height)
860 MC_avg2_16 (height, dest, ref, stride, stride, CPU_MMXEXT);
863 static void MC_avg_y_8_mmxext (uint8_t * dest, const uint8_t * ref,
864 int stride, int height)
866 MC_avg2_8 (height, dest, ref, stride, stride, CPU_MMXEXT);
869 static void MC_put_y_16_mmxext (uint8_t * dest, const uint8_t * ref,
870 int stride, int height)
872 MC_put2_16 (height, dest, ref, stride, stride, CPU_MMXEXT);
875 static void MC_put_y_8_mmxext (uint8_t * dest, const uint8_t * ref,
876 int stride, int height)
878 MC_put2_8 (height, dest, ref, stride, stride, CPU_MMXEXT);
881 static void MC_avg_xy_16_mmxext (uint8_t * dest, const uint8_t * ref,
882 int stride, int height)
884 MC_avg4_16 (height, dest, ref, stride, CPU_MMXEXT);
887 static void MC_avg_xy_8_mmxext (uint8_t * dest, const uint8_t * ref,
888 int stride, int height)
890 MC_avg4_8 (height, dest, ref, stride, CPU_MMXEXT);
893 static void MC_put_xy_16_mmxext (uint8_t * dest, const uint8_t * ref,
894 int stride, int height)
896 MC_put4_16 (height, dest, ref, stride, CPU_MMXEXT);
899 static void MC_put_xy_8_mmxext (uint8_t * dest, const uint8_t * ref,
900 int stride, int height)
902 MC_put4_8 (height, dest, ref, stride, CPU_MMXEXT);
906 MPEG2_MC_EXTERN (mmxext)
908 #endif /* HAVE_MMX2 */
910 #if HAVE_AMD3DNOW
912 static void MC_avg_o_16_3dnow (uint8_t * dest, const uint8_t * ref,
913 int stride, int height)
915 MC_avg1_16 (height, dest, ref, stride, CPU_3DNOW);
918 static void MC_avg_o_8_3dnow (uint8_t * dest, const uint8_t * ref,
919 int stride, int height)
921 MC_avg1_8 (height, dest, ref, stride, CPU_3DNOW);
924 static void MC_put_o_16_3dnow (uint8_t * dest, const uint8_t * ref,
925 int stride, int height)
927 MC_put1_16 (height, dest, ref, stride);
930 static void MC_put_o_8_3dnow (uint8_t * dest, const uint8_t * ref,
931 int stride, int height)
933 MC_put1_8 (height, dest, ref, stride);
936 static void MC_avg_x_16_3dnow (uint8_t * dest, const uint8_t * ref,
937 int stride, int height)
939 MC_avg2_16 (height, dest, ref, stride, 1, CPU_3DNOW);
942 static void MC_avg_x_8_3dnow (uint8_t * dest, const uint8_t * ref,
943 int stride, int height)
945 MC_avg2_8 (height, dest, ref, stride, 1, CPU_3DNOW);
948 static void MC_put_x_16_3dnow (uint8_t * dest, const uint8_t * ref,
949 int stride, int height)
951 MC_put2_16 (height, dest, ref, stride, 1, CPU_3DNOW);
954 static void MC_put_x_8_3dnow (uint8_t * dest, const uint8_t * ref,
955 int stride, int height)
957 MC_put2_8 (height, dest, ref, stride, 1, CPU_3DNOW);
960 static void MC_avg_y_16_3dnow (uint8_t * dest, const uint8_t * ref,
961 int stride, int height)
963 MC_avg2_16 (height, dest, ref, stride, stride, CPU_3DNOW);
966 static void MC_avg_y_8_3dnow (uint8_t * dest, const uint8_t * ref,
967 int stride, int height)
969 MC_avg2_8 (height, dest, ref, stride, stride, CPU_3DNOW);
972 static void MC_put_y_16_3dnow (uint8_t * dest, const uint8_t * ref,
973 int stride, int height)
975 MC_put2_16 (height, dest, ref, stride, stride, CPU_3DNOW);
978 static void MC_put_y_8_3dnow (uint8_t * dest, const uint8_t * ref,
979 int stride, int height)
981 MC_put2_8 (height, dest, ref, stride, stride, CPU_3DNOW);
984 static void MC_avg_xy_16_3dnow (uint8_t * dest, const uint8_t * ref,
985 int stride, int height)
987 MC_avg4_16 (height, dest, ref, stride, CPU_3DNOW);
990 static void MC_avg_xy_8_3dnow (uint8_t * dest, const uint8_t * ref,
991 int stride, int height)
993 MC_avg4_8 (height, dest, ref, stride, CPU_3DNOW);
996 static void MC_put_xy_16_3dnow (uint8_t * dest, const uint8_t * ref,
997 int stride, int height)
999 MC_put4_16 (height, dest, ref, stride, CPU_3DNOW);
1002 static void MC_put_xy_8_3dnow (uint8_t * dest, const uint8_t * ref,
1003 int stride, int height)
1005 MC_put4_8 (height, dest, ref, stride, CPU_3DNOW);
1009 MPEG2_MC_EXTERN (3dnow)
1011 #endif /* HAVE_AMD3DNOW */
1013 #endif