2 * Alpha optimized DSP utils
3 * Copyright (c) 2002 Falk Hueffner <falk@debian.org>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavcodec/dsputil.h"
25 void get_pixels_mvi(DCTELEM
*restrict block
,
26 const uint8_t *restrict pixels
, int line_size
)
34 stq(unpkbw(p
), block
);
35 stq(unpkbw(p
>> 32), block
+ 4);
42 void diff_pixels_mvi(DCTELEM
*block
, const uint8_t *s1
, const uint8_t *s2
,
45 uint64_t mask
= 0x4040;
50 uint64_t x
, y
, c
, d
, a
;
57 a
= zap(mask
, c
); /* We use 0x4040404040404040 here... */
58 d
+= 4 * a
; /* ...so we can use s4addq here. */
61 stq(unpkbw(d
) | (unpkbw(signs
) << 8), block
);
62 stq(unpkbw(d
>> 32) | (unpkbw(signs
>> 32) << 8), block
+ 4);
70 static inline uint64_t avg2(uint64_t a
, uint64_t b
)
72 return (a
| b
) - (((a
^ b
) & BYTE_VEC(0xfe)) >> 1);
75 static inline uint64_t avg4(uint64_t l1
, uint64_t l2
, uint64_t l3
, uint64_t l4
)
77 uint64_t r1
= ((l1
& ~BYTE_VEC(0x03)) >> 2)
78 + ((l2
& ~BYTE_VEC(0x03)) >> 2)
79 + ((l3
& ~BYTE_VEC(0x03)) >> 2)
80 + ((l4
& ~BYTE_VEC(0x03)) >> 2);
81 uint64_t r2
= (( (l1
& BYTE_VEC(0x03))
82 + (l2
& BYTE_VEC(0x03))
83 + (l3
& BYTE_VEC(0x03))
84 + (l4
& BYTE_VEC(0x03))
85 + BYTE_VEC(0x02)) >> 2) & BYTE_VEC(0x03);
89 int pix_abs8x8_mvi(void *v
, uint8_t *pix1
, uint8_t *pix2
, int line_size
, int h
)
93 if ((size_t) pix2
& 0x7) {
94 /* works only when pix2 is actually unaligned */
95 do { /* do 8 pixel a time */
100 result
+= perr(p1
, p2
);
111 result
+= perr(p1
, p2
);
121 #if 0 /* now done in assembly */
122 int pix_abs16x16_mvi(uint8_t *pix1
, uint8_t *pix2
, int line_size
)
127 if ((size_t) pix2
& 0x7) {
128 /* works only when pix2 is actually unaligned */
129 do { /* do 16 pixel a time */
130 uint64_t p1_l
, p1_r
, p2_l
, p2_r
;
134 p1_r
= ldq(pix1
+ 8);
136 p2_l
= extql(ldq_u(pix2
), pix2
) | extqh(t
, pix2
);
137 p2_r
= extql(t
, pix2
) | extqh(ldq_u(pix2
+ 16), pix2
);
141 result
+= perr(p1_l
, p2_l
)
146 uint64_t p1_l
, p1_r
, p2_l
, p2_r
;
149 p1_r
= ldq(pix1
+ 8);
151 p2_r
= ldq(pix2
+ 8);
155 result
+= perr(p1_l
, p2_l
)
164 int pix_abs16x16_x2_mvi(void *v
, uint8_t *pix1
, uint8_t *pix2
, int line_size
, int h
)
167 uint64_t disalign
= (size_t) pix2
& 0x7;
172 uint64_t p1_l
, p1_r
, p2_l
, p2_r
;
176 p1_r
= ldq(pix1
+ 8);
179 p2_l
= avg2(l
, (l
>> 8) | ((uint64_t) r
<< 56));
180 p2_r
= avg2(r
, (r
>> 8) | ((uint64_t) pix2
[16] << 56));
184 result
+= perr(p1_l
, p2_l
)
189 /* |.......l|lllllllr|rrrrrrr*|
190 This case is special because disalign1 would be 8, which
191 gets treated as 0 by extqh. At least it is a bit faster
194 uint64_t p1_l
, p1_r
, p2_l
, p2_r
;
198 p1_r
= ldq(pix1
+ 8);
201 r
= ldq_u(pix2
+ 16);
202 p2_l
= avg2(extql(l
, disalign
) | extqh(m
, disalign
), m
);
203 p2_r
= avg2(extql(m
, disalign
) | extqh(r
, disalign
), r
);
207 result
+= perr(p1_l
, p2_l
)
213 uint64_t disalign1
= disalign
+ 1;
214 uint64_t p1_l
, p1_r
, p2_l
, p2_r
;
218 p1_r
= ldq(pix1
+ 8);
221 r
= ldq_u(pix2
+ 16);
222 p2_l
= avg2(extql(l
, disalign
) | extqh(m
, disalign
),
223 extql(l
, disalign1
) | extqh(m
, disalign1
));
224 p2_r
= avg2(extql(m
, disalign
) | extqh(r
, disalign
),
225 extql(m
, disalign1
) | extqh(r
, disalign1
));
229 result
+= perr(p1_l
, p2_l
)
237 int pix_abs16x16_y2_mvi(void *v
, uint8_t *pix1
, uint8_t *pix2
, int line_size
, int h
)
241 if ((size_t) pix2
& 0x7) {
242 uint64_t t
, p2_l
, p2_r
;
244 p2_l
= extql(ldq_u(pix2
), pix2
) | extqh(t
, pix2
);
245 p2_r
= extql(t
, pix2
) | extqh(ldq_u(pix2
+ 16), pix2
);
248 uint64_t p1_l
, p1_r
, np2_l
, np2_r
;
252 p1_r
= ldq(pix1
+ 8);
255 np2_l
= extql(ldq_u(pix2
), pix2
) | extqh(t
, pix2
);
256 np2_r
= extql(t
, pix2
) | extqh(ldq_u(pix2
+ 16), pix2
);
258 result
+= perr(p1_l
, avg2(p2_l
, np2_l
))
259 + perr(p1_r
, avg2(p2_r
, np2_r
));
269 p2_r
= ldq(pix2
+ 8);
271 uint64_t p1_l
, p1_r
, np2_l
, np2_r
;
274 p1_r
= ldq(pix1
+ 8);
277 np2_r
= ldq(pix2
+ 8);
279 result
+= perr(p1_l
, avg2(p2_l
, np2_l
))
280 + perr(p1_r
, avg2(p2_r
, np2_r
));
290 int pix_abs16x16_xy2_mvi(void *v
, uint8_t *pix1
, uint8_t *pix2
, int line_size
, int h
)
295 uint64_t p2_l
, p2_r
, p2_x
;
298 p1_r
= ldq(pix1
+ 8);
300 if ((size_t) pix2
& 0x7) { /* could be optimized a lot */
302 p2_r
= uldq(pix2
+ 8);
303 p2_x
= (uint64_t) pix2
[16] << 56;
306 p2_r
= ldq(pix2
+ 8);
307 p2_x
= ldq(pix2
+ 16) << 56;
311 uint64_t np1_l
, np1_r
;
312 uint64_t np2_l
, np2_r
, np2_x
;
318 np1_r
= ldq(pix1
+ 8);
320 if ((size_t) pix2
& 0x7) { /* could be optimized a lot */
322 np2_r
= uldq(pix2
+ 8);
323 np2_x
= (uint64_t) pix2
[16] << 56;
326 np2_r
= ldq(pix2
+ 8);
327 np2_x
= ldq(pix2
+ 16) << 56;
331 avg4( p2_l
, ( p2_l
>> 8) | ((uint64_t) p2_r
<< 56),
332 np2_l
, (np2_l
>> 8) | ((uint64_t) np2_r
<< 56)))
334 avg4( p2_r
, ( p2_r
>> 8) | ((uint64_t) p2_x
),
335 np2_r
, (np2_r
>> 8) | ((uint64_t) np2_x
)));