2 * AltiVec-enhanced yuv2yuvX
4 * Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org>
5 * based on the equivalent C code in swscale.c
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #define vzero vec_splat_s32(0)
27 altivec_packIntArrayToCharArray(int *val
, uint8_t* dest
, int dstW
) {
29 vector
unsigned int altivec_vectorShiftInt19
=
30 vec_add(vec_splat_u32(10), vec_splat_u32(9));
31 if ((unsigned long)dest
% 16) {
32 /* badly aligned store, we force store alignment */
33 /* and will handle load misalignment on val w/ vec_perm */
34 vector
unsigned char perm1
;
36 for (i
= 0 ; (i
< dstW
) &&
37 (((unsigned long)dest
+ i
) % 16) ; i
++) {
39 dest
[i
] = (t
< 0) ? 0 : ((t
> 255) ? 255 : t
);
41 perm1
= vec_lvsl(i
<< 2, val
);
42 v1
= vec_ld(i
<< 2, val
);
43 for ( ; i
< (dstW
- 15); i
+=16) {
45 vector
signed int v2
= vec_ld(offset
+ 16, val
);
46 vector
signed int v3
= vec_ld(offset
+ 32, val
);
47 vector
signed int v4
= vec_ld(offset
+ 48, val
);
48 vector
signed int v5
= vec_ld(offset
+ 64, val
);
49 vector
signed int v12
= vec_perm(v1
, v2
, perm1
);
50 vector
signed int v23
= vec_perm(v2
, v3
, perm1
);
51 vector
signed int v34
= vec_perm(v3
, v4
, perm1
);
52 vector
signed int v45
= vec_perm(v4
, v5
, perm1
);
54 vector
signed int vA
= vec_sra(v12
, altivec_vectorShiftInt19
);
55 vector
signed int vB
= vec_sra(v23
, altivec_vectorShiftInt19
);
56 vector
signed int vC
= vec_sra(v34
, altivec_vectorShiftInt19
);
57 vector
signed int vD
= vec_sra(v45
, altivec_vectorShiftInt19
);
58 vector
unsigned short vs1
= vec_packsu(vA
, vB
);
59 vector
unsigned short vs2
= vec_packsu(vC
, vD
);
60 vector
unsigned char vf
= vec_packsu(vs1
, vs2
);
64 } else { // dest is properly aligned, great
65 for (i
= 0; i
< (dstW
- 15); i
+=16) {
67 vector
signed int v1
= vec_ld(offset
, val
);
68 vector
signed int v2
= vec_ld(offset
+ 16, val
);
69 vector
signed int v3
= vec_ld(offset
+ 32, val
);
70 vector
signed int v4
= vec_ld(offset
+ 48, val
);
71 vector
signed int v5
= vec_sra(v1
, altivec_vectorShiftInt19
);
72 vector
signed int v6
= vec_sra(v2
, altivec_vectorShiftInt19
);
73 vector
signed int v7
= vec_sra(v3
, altivec_vectorShiftInt19
);
74 vector
signed int v8
= vec_sra(v4
, altivec_vectorShiftInt19
);
75 vector
unsigned short vs1
= vec_packsu(v5
, v6
);
76 vector
unsigned short vs2
= vec_packsu(v7
, v8
);
77 vector
unsigned char vf
= vec_packsu(vs1
, vs2
);
81 for ( ; i
< dstW
; i
++) {
83 dest
[i
] = (t
< 0) ? 0 : ((t
> 255) ? 255 : t
);
88 yuv2yuvX_altivec_real(int16_t *lumFilter
, int16_t **lumSrc
, int lumFilterSize
,
89 int16_t *chrFilter
, int16_t **chrSrc
, int chrFilterSize
,
90 uint8_t *dest
, uint8_t *uDest
, uint8_t *vDest
, int dstW
, int chrDstW
)
92 const vector
signed int vini
= {(1 << 18), (1 << 18), (1 << 18), (1 << 18)};
95 int __attribute__ ((aligned (16))) val
[dstW
];
97 for (i
= 0; i
< (dstW
-7); i
+=4) {
98 vec_st(vini
, i
<< 2, val
);
100 for (; i
< dstW
; i
++) {
104 for (j
= 0; j
< lumFilterSize
; j
++) {
105 vector
signed short l1
, vLumFilter
= vec_ld(j
<< 1, lumFilter
);
106 vector
unsigned char perm
, perm0
= vec_lvsl(j
<< 1, lumFilter
);
107 vLumFilter
= vec_perm(vLumFilter
, vLumFilter
, perm0
);
108 vLumFilter
= vec_splat(vLumFilter
, 0); // lumFilter[j] is loaded 8 times in vLumFilter
110 perm
= vec_lvsl(0, lumSrc
[j
]);
111 l1
= vec_ld(0, lumSrc
[j
]);
113 for (i
= 0; i
< (dstW
- 7); i
+=8) {
115 vector
signed short l2
= vec_ld((i
<< 1) + 16, lumSrc
[j
]);
117 vector
signed int v1
= vec_ld(offset
, val
);
118 vector
signed int v2
= vec_ld(offset
+ 16, val
);
120 vector
signed short ls
= vec_perm(l1
, l2
, perm
); // lumSrc[j][i] ... lumSrc[j][i+7]
122 vector
signed int i1
= vec_mule(vLumFilter
, ls
);
123 vector
signed int i2
= vec_mulo(vLumFilter
, ls
);
125 vector
signed int vf1
= vec_mergeh(i1
, i2
);
126 vector
signed int vf2
= vec_mergel(i1
, i2
); // lumSrc[j][i] * lumFilter[j] ... lumSrc[j][i+7] * lumFilter[j]
128 vector
signed int vo1
= vec_add(v1
, vf1
);
129 vector
signed int vo2
= vec_add(v2
, vf2
);
131 vec_st(vo1
, offset
, val
);
132 vec_st(vo2
, offset
+ 16, val
);
136 for ( ; i
< dstW
; i
++) {
137 val
[i
] += lumSrc
[j
][i
] * lumFilter
[j
];
140 altivec_packIntArrayToCharArray(val
, dest
, dstW
);
143 int __attribute__ ((aligned (16))) u
[chrDstW
];
144 int __attribute__ ((aligned (16))) v
[chrDstW
];
146 for (i
= 0; i
< (chrDstW
-7); i
+=4) {
147 vec_st(vini
, i
<< 2, u
);
148 vec_st(vini
, i
<< 2, v
);
150 for (; i
< chrDstW
; i
++) {
155 for (j
= 0; j
< chrFilterSize
; j
++) {
156 vector
signed short l1
, l1_V
, vChrFilter
= vec_ld(j
<< 1, chrFilter
);
157 vector
unsigned char perm
, perm0
= vec_lvsl(j
<< 1, chrFilter
);
158 vChrFilter
= vec_perm(vChrFilter
, vChrFilter
, perm0
);
159 vChrFilter
= vec_splat(vChrFilter
, 0); // chrFilter[j] is loaded 8 times in vChrFilter
161 perm
= vec_lvsl(0, chrSrc
[j
]);
162 l1
= vec_ld(0, chrSrc
[j
]);
163 l1_V
= vec_ld(2048 << 1, chrSrc
[j
]);
165 for (i
= 0; i
< (chrDstW
- 7); i
+=8) {
167 vector
signed short l2
= vec_ld((i
<< 1) + 16, chrSrc
[j
]);
168 vector
signed short l2_V
= vec_ld(((i
+ 2048) << 1) + 16, chrSrc
[j
]);
170 vector
signed int v1
= vec_ld(offset
, u
);
171 vector
signed int v2
= vec_ld(offset
+ 16, u
);
172 vector
signed int v1_V
= vec_ld(offset
, v
);
173 vector
signed int v2_V
= vec_ld(offset
+ 16, v
);
175 vector
signed short ls
= vec_perm(l1
, l2
, perm
); // chrSrc[j][i] ... chrSrc[j][i+7]
176 vector
signed short ls_V
= vec_perm(l1_V
, l2_V
, perm
); // chrSrc[j][i+2048] ... chrSrc[j][i+2055]
178 vector
signed int i1
= vec_mule(vChrFilter
, ls
);
179 vector
signed int i2
= vec_mulo(vChrFilter
, ls
);
180 vector
signed int i1_V
= vec_mule(vChrFilter
, ls_V
);
181 vector
signed int i2_V
= vec_mulo(vChrFilter
, ls_V
);
183 vector
signed int vf1
= vec_mergeh(i1
, i2
);
184 vector
signed int vf2
= vec_mergel(i1
, i2
); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j]
185 vector
signed int vf1_V
= vec_mergeh(i1_V
, i2_V
);
186 vector
signed int vf2_V
= vec_mergel(i1_V
, i2_V
); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j]
188 vector
signed int vo1
= vec_add(v1
, vf1
);
189 vector
signed int vo2
= vec_add(v2
, vf2
);
190 vector
signed int vo1_V
= vec_add(v1_V
, vf1_V
);
191 vector
signed int vo2_V
= vec_add(v2_V
, vf2_V
);
193 vec_st(vo1
, offset
, u
);
194 vec_st(vo2
, offset
+ 16, u
);
195 vec_st(vo1_V
, offset
, v
);
196 vec_st(vo2_V
, offset
+ 16, v
);
201 for ( ; i
< chrDstW
; i
++) {
202 u
[i
] += chrSrc
[j
][i
] * chrFilter
[j
];
203 v
[i
] += chrSrc
[j
][i
+ 2048] * chrFilter
[j
];
206 altivec_packIntArrayToCharArray(u
, uDest
, chrDstW
);
207 altivec_packIntArrayToCharArray(v
, vDest
, chrDstW
);
211 static inline void hScale_altivec_real(int16_t *dst
, int dstW
, uint8_t *src
, int srcW
, int xInc
, int16_t *filter
, int16_t *filterPos
, int filterSize
) {
213 int __attribute__ ((aligned (16))) tempo
[4];
215 if (filterSize
% 4) {
216 for (i
=0; i
<dstW
; i
++) {
218 register int srcPos
= filterPos
[i
];
219 register int val
= 0;
220 for (j
=0; j
<filterSize
; j
++) {
221 val
+= ((int)src
[srcPos
+ j
])*filter
[filterSize
*i
+ j
];
223 dst
[i
] = av_clip(val
>>7, 0, (1<<15)-1);
227 switch (filterSize
) {
230 for (i
=0; i
<dstW
; i
++) {
231 register int srcPos
= filterPos
[i
];
233 vector
unsigned char src_v0
= vec_ld(srcPos
, src
);
234 vector
unsigned char src_v1
, src_vF
;
235 vector
signed short src_v
, filter_v
;
236 vector
signed int val_vEven
, val_s
;
237 if ((((int)src
+ srcPos
)% 16) > 12) {
238 src_v1
= vec_ld(srcPos
+ 16, src
);
240 src_vF
= vec_perm(src_v0
, src_v1
, vec_lvsl(srcPos
, src
));
242 src_v
= // vec_unpackh sign-extends...
243 (vector
signed short)(vec_mergeh((vector
unsigned char)vzero
, src_vF
));
244 // now put our elements in the even slots
245 src_v
= vec_mergeh(src_v
, (vector
signed short)vzero
);
247 filter_v
= vec_ld(i
<< 3, filter
);
248 // the 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2)
250 // the neat trick : we only care for half the elements,
251 // high or low depending on (i<<3)%16 (it's 0 or 8 here),
252 // and we're going to use vec_mule, so we chose
253 // carefully how to "unpack" the elements into the even slots
255 filter_v
= vec_mergel(filter_v
, (vector
signed short)vzero
);
257 filter_v
= vec_mergeh(filter_v
, (vector
signed short)vzero
);
259 val_vEven
= vec_mule(src_v
, filter_v
);
260 val_s
= vec_sums(val_vEven
, vzero
);
261 vec_st(val_s
, 0, tempo
);
262 dst
[i
] = av_clip(tempo
[3]>>7, 0, (1<<15)-1);
269 for (i
=0; i
<dstW
; i
++) {
270 register int srcPos
= filterPos
[i
];
272 vector
unsigned char src_v0
= vec_ld(srcPos
, src
);
273 vector
unsigned char src_v1
, src_vF
;
274 vector
signed short src_v
, filter_v
;
275 vector
signed int val_v
, val_s
;
276 if ((((int)src
+ srcPos
)% 16) > 8) {
277 src_v1
= vec_ld(srcPos
+ 16, src
);
279 src_vF
= vec_perm(src_v0
, src_v1
, vec_lvsl(srcPos
, src
));
281 src_v
= // vec_unpackh sign-extends...
282 (vector
signed short)(vec_mergeh((vector
unsigned char)vzero
, src_vF
));
283 filter_v
= vec_ld(i
<< 4, filter
);
284 // the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2)
286 val_v
= vec_msums(src_v
, filter_v
, (vector
signed int)vzero
);
287 val_s
= vec_sums(val_v
, vzero
);
288 vec_st(val_s
, 0, tempo
);
289 dst
[i
] = av_clip(tempo
[3]>>7, 0, (1<<15)-1);
296 for (i
=0; i
<dstW
; i
++) {
297 register int srcPos
= filterPos
[i
];
299 vector
unsigned char src_v0
= vec_ld(srcPos
, src
);
300 vector
unsigned char src_v1
= vec_ld(srcPos
+ 16, src
);
301 vector
unsigned char src_vF
= vec_perm(src_v0
, src_v1
, vec_lvsl(srcPos
, src
));
303 vector
signed short src_vA
= // vec_unpackh sign-extends...
304 (vector
signed short)(vec_mergeh((vector
unsigned char)vzero
, src_vF
));
305 vector
signed short src_vB
= // vec_unpackh sign-extends...
306 (vector
signed short)(vec_mergel((vector
unsigned char)vzero
, src_vF
));
308 vector
signed short filter_v0
= vec_ld(i
<< 5, filter
);
309 vector
signed short filter_v1
= vec_ld((i
<< 5) + 16, filter
);
310 // the 5 above are 4 (filterSize == 16) + 1 (sizeof(short) == 2)
312 vector
signed int val_acc
= vec_msums(src_vA
, filter_v0
, (vector
signed int)vzero
);
313 vector
signed int val_v
= vec_msums(src_vB
, filter_v1
, val_acc
);
315 vector
signed int val_s
= vec_sums(val_v
, vzero
);
317 vec_st(val_s
, 0, tempo
);
318 dst
[i
] = av_clip(tempo
[3]>>7, 0, (1<<15)-1);
325 for (i
=0; i
<dstW
; i
++) {
327 register int srcPos
= filterPos
[i
];
329 vector
signed int val_s
, val_v
= (vector
signed int)vzero
;
330 vector
signed short filter_v0R
= vec_ld(i
* 2 * filterSize
, filter
);
331 vector
unsigned char permF
= vec_lvsl((i
* 2 * filterSize
), filter
);
333 vector
unsigned char src_v0
= vec_ld(srcPos
, src
);
334 vector
unsigned char permS
= vec_lvsl(srcPos
, src
);
336 for (j
= 0 ; j
< filterSize
- 15; j
+= 16) {
337 vector
unsigned char src_v1
= vec_ld(srcPos
+ j
+ 16, src
);
338 vector
unsigned char src_vF
= vec_perm(src_v0
, src_v1
, permS
);
340 vector
signed short src_vA
= // vec_unpackh sign-extends...
341 (vector
signed short)(vec_mergeh((vector
unsigned char)vzero
, src_vF
));
342 vector
signed short src_vB
= // vec_unpackh sign-extends...
343 (vector
signed short)(vec_mergel((vector
unsigned char)vzero
, src_vF
));
345 vector
signed short filter_v1R
= vec_ld((i
* 2 * filterSize
) + (j
* 2) + 16, filter
);
346 vector
signed short filter_v2R
= vec_ld((i
* 2 * filterSize
) + (j
* 2) + 32, filter
);
347 vector
signed short filter_v0
= vec_perm(filter_v0R
, filter_v1R
, permF
);
348 vector
signed short filter_v1
= vec_perm(filter_v1R
, filter_v2R
, permF
);
350 vector
signed int val_acc
= vec_msums(src_vA
, filter_v0
, val_v
);
351 val_v
= vec_msums(src_vB
, filter_v1
, val_acc
);
353 filter_v0R
= filter_v2R
;
357 if (j
< filterSize
-7) {
358 // loading src_v0 is useless, it's already done above
359 //vector unsigned char src_v0 = vec_ld(srcPos + j, src);
360 vector
unsigned char src_v1
, src_vF
;
361 vector
signed short src_v
, filter_v1R
, filter_v
;
362 if ((((int)src
+ srcPos
)% 16) > 8) {
363 src_v1
= vec_ld(srcPos
+ j
+ 16, src
);
365 src_vF
= vec_perm(src_v0
, src_v1
, permS
);
367 src_v
= // vec_unpackh sign-extends...
368 (vector
signed short)(vec_mergeh((vector
unsigned char)vzero
, src_vF
));
369 // loading filter_v0R is useless, it's already done above
370 //vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter);
371 filter_v1R
= vec_ld((i
* 2 * filterSize
) + (j
* 2) + 16, filter
);
372 filter_v
= vec_perm(filter_v0R
, filter_v1R
, permF
);
374 val_v
= vec_msums(src_v
, filter_v
, val_v
);
377 val_s
= vec_sums(val_v
, vzero
);
379 vec_st(val_s
, 0, tempo
);
380 dst
[i
] = av_clip(tempo
[3]>>7, 0, (1<<15)-1);
387 static inline int yv12toyuy2_unscaled_altivec(SwsContext
*c
, uint8_t* src
[], int srcStride
[], int srcSliceY
,
388 int srcSliceH
, uint8_t* dstParam
[], int dstStride_a
[]) {
389 uint8_t *dst
=dstParam
[0] + dstStride_a
[0]*srcSliceY
;
390 // yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
391 uint8_t *ysrc
= src
[0];
392 uint8_t *usrc
= src
[1];
393 uint8_t *vsrc
= src
[2];
394 const int width
= c
->srcW
;
395 const int height
= srcSliceH
;
396 const int lumStride
= srcStride
[0];
397 const int chromStride
= srcStride
[1];
398 const int dstStride
= dstStride_a
[0];
399 const vector
unsigned char yperm
= vec_lvsl(0, ysrc
);
400 const int vertLumPerChroma
= 2;
401 register unsigned int y
;
404 yv12toyuy2(ysrc
, usrc
, vsrc
, dst
, c
->srcW
, srcSliceH
, lumStride
, chromStride
, dstStride
);
410 1) dst is 16 bytes-aligned
411 2) dstStride is a multiple of 16
412 3) width is a multiple of 16
413 4) lum&chrom stride are multiple of 8
416 for (y
=0; y
<height
; y
++) {
418 for (i
= 0; i
< width
- 31; i
+= 32) {
419 const unsigned int j
= i
>> 1;
420 vector
unsigned char v_yA
= vec_ld(i
, ysrc
);
421 vector
unsigned char v_yB
= vec_ld(i
+ 16, ysrc
);
422 vector
unsigned char v_yC
= vec_ld(i
+ 32, ysrc
);
423 vector
unsigned char v_y1
= vec_perm(v_yA
, v_yB
, yperm
);
424 vector
unsigned char v_y2
= vec_perm(v_yB
, v_yC
, yperm
);
425 vector
unsigned char v_uA
= vec_ld(j
, usrc
);
426 vector
unsigned char v_uB
= vec_ld(j
+ 16, usrc
);
427 vector
unsigned char v_u
= vec_perm(v_uA
, v_uB
, vec_lvsl(j
, usrc
));
428 vector
unsigned char v_vA
= vec_ld(j
, vsrc
);
429 vector
unsigned char v_vB
= vec_ld(j
+ 16, vsrc
);
430 vector
unsigned char v_v
= vec_perm(v_vA
, v_vB
, vec_lvsl(j
, vsrc
));
431 vector
unsigned char v_uv_a
= vec_mergeh(v_u
, v_v
);
432 vector
unsigned char v_uv_b
= vec_mergel(v_u
, v_v
);
433 vector
unsigned char v_yuy2_0
= vec_mergeh(v_y1
, v_uv_a
);
434 vector
unsigned char v_yuy2_1
= vec_mergel(v_y1
, v_uv_a
);
435 vector
unsigned char v_yuy2_2
= vec_mergeh(v_y2
, v_uv_b
);
436 vector
unsigned char v_yuy2_3
= vec_mergel(v_y2
, v_uv_b
);
437 vec_st(v_yuy2_0
, (i
<< 1), dst
);
438 vec_st(v_yuy2_1
, (i
<< 1) + 16, dst
);
439 vec_st(v_yuy2_2
, (i
<< 1) + 32, dst
);
440 vec_st(v_yuy2_3
, (i
<< 1) + 48, dst
);
443 const unsigned int j
= i
>> 1;
444 vector
unsigned char v_y1
= vec_ld(i
, ysrc
);
445 vector
unsigned char v_u
= vec_ld(j
, usrc
);
446 vector
unsigned char v_v
= vec_ld(j
, vsrc
);
447 vector
unsigned char v_uv_a
= vec_mergeh(v_u
, v_v
);
448 vector
unsigned char v_yuy2_0
= vec_mergeh(v_y1
, v_uv_a
);
449 vector
unsigned char v_yuy2_1
= vec_mergel(v_y1
, v_uv_a
);
450 vec_st(v_yuy2_0
, (i
<< 1), dst
);
451 vec_st(v_yuy2_1
, (i
<< 1) + 16, dst
);
453 if ((y
&(vertLumPerChroma
-1)) == vertLumPerChroma
-1) {
464 static inline int yv12touyvy_unscaled_altivec(SwsContext
*c
, uint8_t* src
[], int srcStride
[], int srcSliceY
,
465 int srcSliceH
, uint8_t* dstParam
[], int dstStride_a
[]) {
466 uint8_t *dst
=dstParam
[0] + dstStride_a
[0]*srcSliceY
;
467 // yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
468 uint8_t *ysrc
= src
[0];
469 uint8_t *usrc
= src
[1];
470 uint8_t *vsrc
= src
[2];
471 const int width
= c
->srcW
;
472 const int height
= srcSliceH
;
473 const int lumStride
= srcStride
[0];
474 const int chromStride
= srcStride
[1];
475 const int dstStride
= dstStride_a
[0];
476 const int vertLumPerChroma
= 2;
477 const vector
unsigned char yperm
= vec_lvsl(0, ysrc
);
478 register unsigned int y
;
481 yv12touyvy(ysrc
, usrc
, vsrc
, dst
, c
->srcW
, srcSliceH
, lumStride
, chromStride
, dstStride
);
487 1) dst is 16 bytes-aligned
488 2) dstStride is a multiple of 16
489 3) width is a multiple of 16
490 4) lum&chrom stride are multiple of 8
493 for (y
=0; y
<height
; y
++) {
495 for (i
= 0; i
< width
- 31; i
+= 32) {
496 const unsigned int j
= i
>> 1;
497 vector
unsigned char v_yA
= vec_ld(i
, ysrc
);
498 vector
unsigned char v_yB
= vec_ld(i
+ 16, ysrc
);
499 vector
unsigned char v_yC
= vec_ld(i
+ 32, ysrc
);
500 vector
unsigned char v_y1
= vec_perm(v_yA
, v_yB
, yperm
);
501 vector
unsigned char v_y2
= vec_perm(v_yB
, v_yC
, yperm
);
502 vector
unsigned char v_uA
= vec_ld(j
, usrc
);
503 vector
unsigned char v_uB
= vec_ld(j
+ 16, usrc
);
504 vector
unsigned char v_u
= vec_perm(v_uA
, v_uB
, vec_lvsl(j
, usrc
));
505 vector
unsigned char v_vA
= vec_ld(j
, vsrc
);
506 vector
unsigned char v_vB
= vec_ld(j
+ 16, vsrc
);
507 vector
unsigned char v_v
= vec_perm(v_vA
, v_vB
, vec_lvsl(j
, vsrc
));
508 vector
unsigned char v_uv_a
= vec_mergeh(v_u
, v_v
);
509 vector
unsigned char v_uv_b
= vec_mergel(v_u
, v_v
);
510 vector
unsigned char v_uyvy_0
= vec_mergeh(v_uv_a
, v_y1
);
511 vector
unsigned char v_uyvy_1
= vec_mergel(v_uv_a
, v_y1
);
512 vector
unsigned char v_uyvy_2
= vec_mergeh(v_uv_b
, v_y2
);
513 vector
unsigned char v_uyvy_3
= vec_mergel(v_uv_b
, v_y2
);
514 vec_st(v_uyvy_0
, (i
<< 1), dst
);
515 vec_st(v_uyvy_1
, (i
<< 1) + 16, dst
);
516 vec_st(v_uyvy_2
, (i
<< 1) + 32, dst
);
517 vec_st(v_uyvy_3
, (i
<< 1) + 48, dst
);
520 const unsigned int j
= i
>> 1;
521 vector
unsigned char v_y1
= vec_ld(i
, ysrc
);
522 vector
unsigned char v_u
= vec_ld(j
, usrc
);
523 vector
unsigned char v_v
= vec_ld(j
, vsrc
);
524 vector
unsigned char v_uv_a
= vec_mergeh(v_u
, v_v
);
525 vector
unsigned char v_uyvy_0
= vec_mergeh(v_uv_a
, v_y1
);
526 vector
unsigned char v_uyvy_1
= vec_mergel(v_uv_a
, v_y1
);
527 vec_st(v_uyvy_0
, (i
<< 1), dst
);
528 vec_st(v_uyvy_1
, (i
<< 1) + 16, dst
);
530 if ((y
&(vertLumPerChroma
-1)) == vertLumPerChroma
-1) {