playlist/item: playlist_Add: fix documentation
[vlc.git] / modules / video_chroma / i420_yuy2.h
bloba950610a839b10867e170f6346b0aa499f219af4
1 /*****************************************************************************
2 * i420_yuy2.h : YUV to YUV conversion module for vlc
3 *****************************************************************************
4 * Copyright (C) 2000, 2001 VLC authors and VideoLAN
5 * $Id$
7 * Authors: Samuel Hocevar <sam@zoy.org>
8 * Damien Fouilleul <damien@videolan.org>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU Lesser General Public License as published by
12 * the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
23 *****************************************************************************/
25 #ifdef MODULE_NAME_IS_i420_yuy2_mmx
27 #if defined(CAN_COMPILE_MMX)
29 /* MMX assembly */
31 #define MMX_CALL(MMX_INSTRUCTIONS) \
32 do { \
33 __asm__ __volatile__( \
34 ".p2align 3 \n\t \
35 movd (%0), %%mm1 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
36 movd (%1), %%mm2 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
37 movq (%2), %%mm0 # Load 8 Y y7 y6 y5 y4 y3 y2 y1 y0 \n\
38 movq (%3), %%mm3 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
39 " \
40 : \
41 : "r" (p_u), "r" (p_v), \
42 "r" (p_y1), "r" (p_y2) \
43 : "mm0", "mm1", "mm2", "mm3"); \
44 __asm__ __volatile__( \
45 ".p2align 3 \n\t" \
46 MMX_INSTRUCTIONS \
47 : \
48 : "r" (p_line1), "r" (p_line2) \
49 : "mm0", "mm1", "mm2", "mm3"); \
50 p_line1 += 16; p_line2 += 16; \
51 p_y1 += 8; p_y2 += 8; \
52 p_u += 4; p_v += 4; \
53 } while(0)
55 #define MMX_END __asm__ __volatile__ ( "emms" )
57 #define MMX_YUV420_YUYV " \n\
58 punpcklbw %%mm2, %%mm1 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
59 movq %%mm0, %%mm2 # y7 y6 y5 y4 y3 y2 y1 y0 \n\
60 punpcklbw %%mm1, %%mm2 # v1 y3 u1 y2 v0 y1 u0 y0 \n\
61 movq %%mm2, (%0) # Store low YUYV \n\
62 punpckhbw %%mm1, %%mm0 # v3 y7 u3 y6 v2 y5 u2 y4 \n\
63 movq %%mm0, 8(%0) # Store high YUYV \n\
64 movq %%mm3, %%mm4 # Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
65 punpcklbw %%mm1, %%mm4 # v1 Y3 u1 Y2 v0 Y1 u0 Y0 \n\
66 movq %%mm4, (%1) # Store low YUYV \n\
67 punpckhbw %%mm1, %%mm3 # v3 Y7 u3 Y6 v2 Y5 u2 Y4 \n\
68 movq %%mm3, 8(%1) # Store high YUYV \n\
71 #define MMX_YUV420_YVYU " \n\
72 punpcklbw %%mm1, %%mm2 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
73 movq %%mm0, %%mm1 # y7 y6 y5 y4 y3 y2 y1 y0 \n\
74 punpcklbw %%mm2, %%mm1 # u1 y3 v1 y2 u0 y1 v0 y0 \n\
75 movq %%mm1, (%0) # Store low YUYV \n\
76 punpckhbw %%mm2, %%mm0 # u3 y7 v3 y6 u2 y5 v2 y4 \n\
77 movq %%mm0, 8(%0) # Store high YUYV \n\
78 movq %%mm3, %%mm4 # Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
79 punpcklbw %%mm2, %%mm4 # u1 Y3 v1 Y2 u0 Y1 v0 Y0 \n\
80 movq %%mm4, (%1) # Store low YUYV \n\
81 punpckhbw %%mm2, %%mm3 # u3 Y7 v3 Y6 u2 Y5 v2 Y4 \n\
82 movq %%mm3, 8(%1) # Store high YUYV \n\
85 #define MMX_YUV420_UYVY " \n\
86 punpcklbw %%mm2, %%mm1 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
87 movq %%mm1, %%mm2 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
88 punpcklbw %%mm0, %%mm2 # y3 v1 y2 u1 y1 v0 y0 u0 \n\
89 movq %%mm2, (%0) # Store low UYVY \n\
90 movq %%mm1, %%mm2 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
91 punpckhbw %%mm0, %%mm2 # y3 v1 y2 u1 y1 v0 y0 u0 \n\
92 movq %%mm2, 8(%0) # Store high UYVY \n\
93 movq %%mm1, %%mm4 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
94 punpcklbw %%mm3, %%mm4 # Y3 v1 Y2 u1 Y1 v0 Y0 u0 \n\
95 movq %%mm4, (%1) # Store low UYVY \n\
96 punpckhbw %%mm3, %%mm1 # Y7 v3 Y6 u3 Y5 v2 Y4 u2 \n\
97 movq %%mm1, 8(%1) # Store high UYVY \n\
100 /* FIXME: this code does not work ! Chroma seems to be wrong. */
101 #define MMX_YUV420_Y211 " \n\
102 movd (%4), %%mm2 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
103 movd (%5), %%mm3 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
104 pand i_00ffw, %%mm0 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\
105 packuswb %%mm0, %%mm0 # pack Y y6 y4 y2 y0 y6 y4 y2 y0 \n\
106 pand i_00ffw, %%mm2 # get U even 00 u6 00 u4 00 u2 00 u0 \n\
107 packuswb %%mm2, %%mm2 # pack U 00 00 u2 u0 00 00 u2 u0 \n\
108 pand i_00ffw, %%mm3 # get V even 00 v6 00 v4 00 v2 00 v0 \n\
109 packuswb %%mm3, %%mm3 # pack V 00 00 v2 v0 00 00 v2 v0 \n\
110 punpcklbw %%mm3, %%mm2 # 00 00 00 00 v2 u2 v0 u0 \n\
111 psubsw i_80w, %%mm2 # U,V -= 128 \n\
112 punpcklbw %%mm2, %%mm0 # v2 y6 u2 y4 v0 y2 u0 y0 \n\
113 movq %%mm0, (%0) # Store YUYV \n\
114 pand i_00ffw, %%mm1 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\
115 packuswb %%mm1, %%mm1 # pack Y Y6 Y4 Y2 Y0 Y6 Y4 Y2 Y0 \n\
116 punpcklbw %%mm2, %%mm1 # v2 Y6 u2 Y4 v0 Y2 u0 Y0 \n\
117 movq %%mm1, (%1) # Store YUYV \n\
119 #elif defined(HAVE_MMX_INTRINSICS)
121 /* MMX intrinsics */
123 #include <mmintrin.h>
125 #define MMX_CALL(MMX_INSTRUCTIONS) \
126 do { \
127 __m64 mm0, mm1, mm2, mm3, mm4; \
128 MMX_INSTRUCTIONS \
129 p_line1 += 16; p_line2 += 16; \
130 p_y1 += 8; p_y2 += 8; \
131 p_u += 4; p_v += 4; \
132 } while(0)
134 #define MMX_END _mm_empty()
136 #define MMX_YUV420_YUYV \
137 mm1 = _mm_cvtsi32_si64(*(int*)p_u); \
138 mm2 = _mm_cvtsi32_si64(*(int*)p_v); \
139 mm0 = (__m64)*(uint64_t*)p_y1; \
140 mm3 = (__m64)*(uint64_t*)p_y2; \
141 mm1 = _mm_unpacklo_pi8(mm1, mm2); \
142 mm2 = mm0; \
143 mm2 = _mm_unpacklo_pi8(mm2, mm1); \
144 *(uint64_t*)p_line1 = (uint64_t)mm2; \
145 mm0 = _mm_unpackhi_pi8(mm0, mm1); \
146 *(uint64_t*)(p_line1+8) = (uint64_t)mm0;\
147 mm4 = mm3; \
148 mm4 = _mm_unpacklo_pi8(mm4, mm1); \
149 *(uint64_t*)p_line2 = (uint64_t)mm4; \
150 mm3 = _mm_unpackhi_pi8(mm3, mm1); \
151 *(uint64_t*)(p_line2+8) = (uint64_t)mm3;
153 #define MMX_YUV420_YVYU \
154 mm2 = _mm_cvtsi32_si64(*(int*)p_u); \
155 mm1 = _mm_cvtsi32_si64(*(int*)p_v); \
156 mm0 = (__m64)*(uint64_t*)p_y1; \
157 mm3 = (__m64)*(uint64_t*)p_y2; \
158 mm1 = _mm_unpacklo_pi8(mm1, mm2); \
159 mm2 = mm0; \
160 mm2 = _mm_unpacklo_pi8(mm2, mm1); \
161 *(uint64_t*)p_line1 = (uint64_t)mm2; \
162 mm0 = _mm_unpackhi_pi8(mm0, mm1); \
163 *(uint64_t*)(p_line1+8) = (uint64_t)mm0;\
164 mm4 = mm3; \
165 mm4 = _mm_unpacklo_pi8(mm4, mm1); \
166 *(uint64_t*)p_line2 = (uint64_t)mm4; \
167 mm3 = _mm_unpackhi_pi8(mm3, mm1); \
168 *(uint64_t*)(p_line2+8) = (uint64_t)mm3;
170 #define MMX_YUV420_UYVY \
171 mm1 = _mm_cvtsi32_si64(*(int*)p_u); \
172 mm2 = _mm_cvtsi32_si64(*(int*)p_v); \
173 mm0 = (__m64)*(uint64_t*)p_y1; \
174 mm3 = (__m64)*(uint64_t*)p_y2; \
175 mm1 = _mm_unpacklo_pi8(mm1, mm2); \
176 mm2 = mm1; \
177 mm2 = _mm_unpacklo_pi8(mm2, mm0); \
178 *(uint64_t*)p_line1 = (uint64_t)mm2; \
179 mm2 = mm1; \
180 mm2 = _mm_unpackhi_pi8(mm2, mm0); \
181 *(uint64_t*)(p_line1+8) = (uint64_t)mm2;\
182 mm4 = mm1; \
183 mm4 = _mm_unpacklo_pi8(mm4, mm3); \
184 *(uint64_t*)p_line2 = (uint64_t)mm4; \
185 mm1 = _mm_unpackhi_pi8(mm1, mm3); \
186 *(uint64_t*)(p_line2+8) = (uint64_t)mm1;
188 #endif
190 #elif defined( MODULE_NAME_IS_i420_yuy2_sse2 )
192 #if defined(CAN_COMPILE_SSE2)
194 /* SSE2 assembly */
196 #define SSE2_CALL(SSE2_INSTRUCTIONS) \
197 do { \
198 __asm__ __volatile__( \
199 ".p2align 3 \n\t \
200 movq (%0), %%xmm1 # Load 8 Cb u7 u6 u5 u4 u3 u2 u1 u0 \n\
201 movq (%1), %%xmm2 # Load 8 Cr v7 06 v5 v4 v3 v2 v1 v0 \n\
204 : "r" (p_u), "r" (p_v) \
205 : "xmm1", "xmm2"); \
206 __asm__ __volatile__( \
207 ".p2align 3 \n\t" \
208 SSE2_INSTRUCTIONS \
210 : "r" (p_line1), "r" (p_line2), \
211 "r" (p_y1), "r" (p_y2) \
212 : "xmm0", "xmm1", "xmm2", "xmm3", "xmm4"); \
213 p_line1 += 32; p_line2 += 32; \
214 p_y1 += 16; p_y2 += 16; \
215 p_u += 8; p_v += 8; \
216 } while(0)
218 #define SSE2_END __asm__ __volatile__ ( "sfence" ::: "memory" )
220 #define SSE2_YUV420_YUYV_ALIGNED " \n\
221 movdqa (%2), %%xmm0 # Load 16 Y y15 y14 y13 .. y2 y1 y0 \n\
222 movdqa (%3), %%xmm3 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
223 punpcklbw %%xmm2, %%xmm1 # v7 u7 v6 u6 .. u1 v0 u0 \n\
224 movdqa %%xmm0, %%xmm2 # y15 y14 y13 .. y2 y1 y0 \n\
225 punpcklbw %%xmm1, %%xmm2 # v3 y7 u3 .. v0 y1 u0 y0 \n\
226 movntdq %%xmm2, (%0) # Store low YUYV \n\
227 punpckhbw %%xmm1, %%xmm0 # v3 y7 u3 y6 v2 y5 u2 y4 \n\
228 movntdq %%xmm0, 16(%0) # Store high YUYV \n\
229 movdqa %%xmm3, %%xmm4 # Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
230 punpcklbw %%xmm1, %%xmm4 # v1 Y3 u1 Y2 v0 Y1 u0 Y0 \n\
231 movntdq %%xmm4, (%1) # Store low YUYV \n\
232 punpckhbw %%xmm1, %%xmm3 # v3 Y7 u3 Y6 v2 Y5 u2 Y4 \n\
233 movntdq %%xmm3, 16(%1) # Store high YUYV \n\
236 #define SSE2_YUV420_YUYV_UNALIGNED " \n\
237 movdqu (%2), %%xmm0 # Load 16 Y y7 y6 y5 y4 y3 y2 y1 y0 \n\
238 movdqu (%3), %%xmm3 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
239 prefetchnta (%0) # Tell CPU not to cache output YUYV data \n\
240 prefetchnta (%1) # Tell CPU not to cache output YUYV data \n\
241 punpcklbw %%xmm2, %%xmm1 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
242 movdqa %%xmm0, %%xmm2 # y7 y6 y5 y4 y3 y2 y1 y0 \n\
243 punpcklbw %%xmm1, %%xmm2 # v1 y3 u1 y2 v0 y1 u0 y0 \n\
244 movdqu %%xmm2, (%0) # Store low YUYV \n\
245 punpckhbw %%xmm1, %%xmm0 # v3 y7 u3 y6 v2 y5 u2 y4 \n\
246 movdqu %%xmm0, 16(%0) # Store high YUYV \n\
247 movdqa %%xmm3, %%xmm4 # Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
248 punpcklbw %%xmm1, %%xmm4 # v1 Y3 u1 Y2 v0 Y1 u0 Y0 \n\
249 movdqu %%xmm4, (%1) # Store low YUYV \n\
250 punpckhbw %%xmm1, %%xmm3 # v3 Y7 u3 Y6 v2 Y5 u2 Y4 \n\
251 movdqu %%xmm3, 16(%1) # Store high YUYV \n\
254 #define SSE2_YUV420_YVYU_ALIGNED " \n\
255 movdqa (%2), %%xmm0 # Load 16 Y y7 y6 y5 y4 y3 y2 y1 y0 \n\
256 movdqa (%3), %%xmm3 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
257 punpcklbw %%xmm1, %%xmm2 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
258 movdqa %%xmm0, %%xmm1 # y7 y6 y5 y4 y3 y2 y1 y0 \n\
259 punpcklbw %%xmm2, %%xmm1 # u1 y3 v1 y2 u0 y1 v0 y0 \n\
260 movntdq %%xmm1, (%0) # Store low YUYV \n\
261 punpckhbw %%xmm2, %%xmm0 # u3 y7 v3 y6 u2 y5 v2 y4 \n\
262 movntdq %%xmm0, 16(%0) # Store high YUYV \n\
263 movdqa %%xmm3, %%xmm4 # Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
264 punpcklbw %%xmm2, %%xmm4 # u1 Y3 v1 Y2 u0 Y1 v0 Y0 \n\
265 movntdq %%xmm4, (%1) # Store low YUYV \n\
266 punpckhbw %%xmm2, %%xmm3 # u3 Y7 v3 Y6 u2 Y5 v2 Y4 \n\
267 movntdq %%xmm3, 16(%1) # Store high YUYV \n\
270 #define SSE2_YUV420_YVYU_UNALIGNED " \n\
271 movdqu (%2), %%xmm0 # Load 16 Y y7 y6 y5 y4 y3 y2 y1 y0 \n\
272 movdqu (%3), %%xmm3 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
273 prefetchnta (%0) # Tell CPU not to cache output YVYU data \n\
274 prefetchnta (%1) # Tell CPU not to cache output YVYU data \n\
275 punpcklbw %%xmm1, %%xmm2 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
276 movdqu %%xmm0, %%xmm1 # y7 y6 y5 y4 y3 y2 y1 y0 \n\
277 punpcklbw %%xmm2, %%xmm1 # u1 y3 v1 y2 u0 y1 v0 y0 \n\
278 movdqu %%xmm1, (%0) # Store low YUYV \n\
279 punpckhbw %%xmm2, %%xmm0 # u3 y7 v3 y6 u2 y5 v2 y4 \n\
280 movdqu %%xmm0, 16(%0) # Store high YUYV \n\
281 movdqu %%xmm3, %%xmm4 # Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
282 punpcklbw %%xmm2, %%xmm4 # u1 Y3 v1 Y2 u0 Y1 v0 Y0 \n\
283 movdqu %%xmm4, (%1) # Store low YUYV \n\
284 punpckhbw %%xmm2, %%xmm3 # u3 Y7 v3 Y6 u2 Y5 v2 Y4 \n\
285 movdqu %%xmm3, 16(%1) # Store high YUYV \n\
288 #define SSE2_YUV420_UYVY_ALIGNED " \n\
289 movdqa (%2), %%xmm0 # Load 16 Y y7 y6 y5 y4 y3 y2 y1 y0 \n\
290 movdqa (%3), %%xmm3 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
291 punpcklbw %%xmm2, %%xmm1 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
292 movdqa %%xmm1, %%xmm2 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
293 punpcklbw %%xmm0, %%xmm2 # y3 v1 y2 u1 y1 v0 y0 u0 \n\
294 movntdq %%xmm2, (%0) # Store low UYVY \n\
295 movdqa %%xmm1, %%xmm2 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
296 punpckhbw %%xmm0, %%xmm2 # y3 v1 y2 u1 y1 v0 y0 u0 \n\
297 movntdq %%xmm2, 16(%0) # Store high UYVY \n\
298 movdqa %%xmm1, %%xmm4 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
299 punpcklbw %%xmm3, %%xmm4 # Y3 v1 Y2 u1 Y1 v0 Y0 u0 \n\
300 movntdq %%xmm4, (%1) # Store low UYVY \n\
301 punpckhbw %%xmm3, %%xmm1 # Y7 v3 Y6 u3 Y5 v2 Y4 u2 \n\
302 movntdq %%xmm1, 16(%1) # Store high UYVY \n\
305 #define SSE2_YUV420_UYVY_UNALIGNED " \n\
306 movdqu (%2), %%xmm0 # Load 16 Y y7 y6 y5 y4 y3 y2 y1 y0 \n\
307 movdqu (%3), %%xmm3 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
308 prefetchnta (%0) # Tell CPU not to cache output UYVY data \n\
309 prefetchnta (%1) # Tell CPU not to cache output UYVY data \n\
310 punpcklbw %%xmm2, %%xmm1 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
311 movdqu %%xmm1, %%xmm2 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
312 punpcklbw %%xmm0, %%xmm2 # y3 v1 y2 u1 y1 v0 y0 u0 \n\
313 movdqu %%xmm2, (%0) # Store low UYVY \n\
314 movdqu %%xmm1, %%xmm2 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
315 punpckhbw %%xmm0, %%xmm2 # y3 v1 y2 u1 y1 v0 y0 u0 \n\
316 movdqu %%xmm2, 16(%0) # Store high UYVY \n\
317 movdqu %%xmm1, %%xmm4 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
318 punpcklbw %%xmm3, %%xmm4 # Y3 v1 Y2 u1 Y1 v0 Y0 u0 \n\
319 movdqu %%xmm4, (%1) # Store low UYVY \n\
320 punpckhbw %%xmm3, %%xmm1 # Y7 v3 Y6 u3 Y5 v2 Y4 u2 \n\
321 movdqu %%xmm1, 16(%1) # Store high UYVY \n\
324 #elif defined(HAVE_SSE2_INTRINSICS)
326 /* SSE2 intrinsics */
328 #include <emmintrin.h>
330 #define SSE2_CALL(SSE2_INSTRUCTIONS) \
331 do { \
332 __m128i xmm0, xmm1, xmm2, xmm3, xmm4; \
333 SSE2_INSTRUCTIONS \
334 p_line1 += 32; p_line2 += 32; \
335 p_y1 += 16; p_y2 += 16; \
336 p_u += 8; p_v += 8; \
337 } while(0)
339 #define SSE2_END _mm_sfence()
341 #define SSE2_YUV420_YUYV_ALIGNED \
342 xmm1 = _mm_loadl_epi64((__m128i *)p_u); \
343 xmm2 = _mm_loadl_epi64((__m128i *)p_v); \
344 xmm0 = _mm_load_si128((__m128i *)p_y1); \
345 xmm3 = _mm_load_si128((__m128i *)p_y2); \
346 xmm1 = _mm_unpacklo_epi8(xmm1, xmm2); \
347 xmm2 = xmm0; \
348 xmm2 = _mm_unpacklo_epi8(xmm2, xmm1); \
349 _mm_stream_si128((__m128i*)(p_line1), xmm2); \
350 xmm0 = _mm_unpackhi_epi8(xmm0, xmm1); \
351 _mm_stream_si128((__m128i*)(p_line1+16), xmm0); \
352 xmm4 = xmm3; \
353 xmm4 = _mm_unpacklo_epi8(xmm4, xmm1); \
354 _mm_stream_si128((__m128i*)(p_line2), xmm4); \
355 xmm3 = _mm_unpackhi_epi8(xmm3, xmm1); \
356 _mm_stream_si128((__m128i*)(p_line1+16), xmm3);
358 #define SSE2_YUV420_YUYV_UNALIGNED \
359 xmm1 = _mm_loadl_epi64((__m128i *)p_u); \
360 xmm2 = _mm_loadl_epi64((__m128i *)p_v); \
361 xmm0 = _mm_loadu_si128((__m128i *)p_y1); \
362 xmm3 = _mm_loadu_si128((__m128i *)p_y2); \
363 _mm_prefetch(p_line1, _MM_HINT_NTA); \
364 _mm_prefetch(p_line2, _MM_HINT_NTA); \
365 xmm1 = _mm_unpacklo_epi8(xmm1, xmm2); \
366 xmm2 = xmm0; \
367 xmm2 = _mm_unpacklo_epi8(xmm2, xmm1); \
368 _mm_storeu_si128((__m128i*)(p_line1), xmm2); \
369 xmm0 = _mm_unpackhi_epi8(xmm0, xmm1); \
370 _mm_storeu_si128((__m128i*)(p_line1+16), xmm0); \
371 xmm4 = xmm3; \
372 xmm4 = _mm_unpacklo_epi8(xmm4, xmm1); \
373 _mm_storeu_si128((__m128i*)(p_line2), xmm4); \
374 xmm3 = _mm_unpackhi_epi8(xmm3, xmm1); \
375 _mm_storeu_si128((__m128i*)(p_line1+16), xmm3);
377 #define SSE2_YUV420_YVYU_ALIGNED \
378 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
379 xmm2 = _mm_loadl_epi64((__m128i *)p_u); \
380 xmm0 = _mm_load_si128((__m128i *)p_y1); \
381 xmm3 = _mm_load_si128((__m128i *)p_y2); \
382 xmm1 = _mm_unpacklo_epi8(xmm1, xmm2); \
383 xmm2 = xmm0; \
384 xmm2 = _mm_unpacklo_epi8(xmm2, xmm1); \
385 _mm_stream_si128((__m128i*)(p_line1), xmm2); \
386 xmm0 = _mm_unpackhi_epi8(xmm0, xmm1); \
387 _mm_stream_si128((__m128i*)(p_line1+16), xmm0); \
388 xmm4 = xmm3; \
389 xmm4 = _mm_unpacklo_epi8(xmm4, xmm1); \
390 _mm_stream_si128((__m128i*)(p_line2), xmm4); \
391 xmm3 = _mm_unpackhi_epi8(xmm3, xmm1); \
392 _mm_stream_si128((__m128i*)(p_line1+16), xmm3);
394 #define SSE2_YUV420_YVYU_UNALIGNED \
395 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
396 xmm2 = _mm_loadl_epi64((__m128i *)p_u); \
397 xmm0 = _mm_loadu_si128((__m128i *)p_y1); \
398 xmm3 = _mm_loadu_si128((__m128i *)p_y2); \
399 _mm_prefetch(p_line1, _MM_HINT_NTA); \
400 _mm_prefetch(p_line2, _MM_HINT_NTA); \
401 xmm1 = _mm_unpacklo_epi8(xmm1, xmm2); \
402 xmm2 = xmm0; \
403 xmm2 = _mm_unpacklo_epi8(xmm2, xmm1); \
404 _mm_storeu_si128((__m128i*)(p_line1), xmm2); \
405 xmm0 = _mm_unpackhi_epi8(xmm0, xmm1); \
406 _mm_storeu_si128((__m128i*)(p_line1+16), xmm0); \
407 xmm4 = xmm3; \
408 xmm4 = _mm_unpacklo_epi8(xmm4, xmm1); \
409 _mm_storeu_si128((__m128i*)(p_line2), xmm4); \
410 xmm3 = _mm_unpackhi_epi8(xmm3, xmm1); \
411 _mm_storeu_si128((__m128i*)(p_line1+16), xmm3);
413 #define SSE2_YUV420_UYVY_ALIGNED \
414 xmm1 = _mm_loadl_epi64((__m128i *)p_u); \
415 xmm2 = _mm_loadl_epi64((__m128i *)p_v); \
416 xmm0 = _mm_load_si128((__m128i *)p_y1); \
417 xmm3 = _mm_load_si128((__m128i *)p_y2); \
418 xmm1 = _mm_unpacklo_epi8(xmm1, xmm2); \
419 xmm2 = xmm1; \
420 xmm2 = _mm_unpacklo_epi8(xmm2, xmm0); \
421 _mm_stream_si128((__m128i*)(p_line1), xmm2); \
422 xmm2 = xmm1; \
423 xmm2 = _mm_unpackhi_epi8(xmm2, xmm0); \
424 _mm_stream_si128((__m128i*)(p_line1+16), xmm2); \
425 xmm4 = xmm1; \
426 xmm4 = _mm_unpacklo_epi8(xmm4, xmm3); \
427 _mm_stream_si128((__m128i*)(p_line2), xmm4); \
428 xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
429 _mm_stream_si128((__m128i*)(p_line1+16), xmm1);
431 #define SSE2_YUV420_UYVY_UNALIGNED \
432 xmm1 = _mm_loadl_epi64((__m128i *)p_u); \
433 xmm2 = _mm_loadl_epi64((__m128i *)p_v); \
434 xmm0 = _mm_loadu_si128((__m128i *)p_y1); \
435 xmm3 = _mm_loadu_si128((__m128i *)p_y2); \
436 _mm_prefetch(p_line1, _MM_HINT_NTA); \
437 _mm_prefetch(p_line2, _MM_HINT_NTA); \
438 xmm1 = _mm_unpacklo_epi8(xmm1, xmm2); \
439 xmm2 = xmm1; \
440 xmm2 = _mm_unpacklo_epi8(xmm2, xmm0); \
441 _mm_storeu_si128((__m128i*)(p_line1), xmm2); \
442 xmm2 = xmm1; \
443 xmm2 = _mm_unpackhi_epi8(xmm2, xmm0); \
444 _mm_storeu_si128((__m128i*)(p_line1+16), xmm2); \
445 xmm4 = xmm1; \
446 xmm4 = _mm_unpacklo_epi8(xmm4, xmm3); \
447 _mm_storeu_si128((__m128i*)(p_line2), xmm4); \
448 xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
449 _mm_storeu_si128((__m128i*)(p_line1+16), xmm1);
451 #endif
453 #endif
455 /* Used in both accelerated and C modules */
457 #define C_YUV420_YVYU( ) \
458 *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++; \
459 *(p_line1)++ = *(p_line2)++ = *(p_v)++; \
460 *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++; \
461 *(p_line1)++ = *(p_line2)++ = *(p_u)++; \
463 #define C_YUV420_Y211( ) \
464 *(p_line1)++ = *(p_y1); p_y1 += 2; \
465 *(p_line2)++ = *(p_y2); p_y2 += 2; \
466 *(p_line1)++ = *(p_line2)++ = *(p_u) - 0x80; p_u += 2; \
467 *(p_line1)++ = *(p_y1); p_y1 += 2; \
468 *(p_line2)++ = *(p_y2); p_y2 += 2; \
469 *(p_line1)++ = *(p_line2)++ = *(p_v) - 0x80; p_v += 2; \
472 #define C_YUV420_YUYV( ) \
473 *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++; \
474 *(p_line1)++ = *(p_line2)++ = *(p_u)++; \
475 *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++; \
476 *(p_line1)++ = *(p_line2)++ = *(p_v)++; \
478 #define C_YUV420_UYVY( ) \
479 *(p_line1)++ = *(p_line2)++ = *(p_u)++; \
480 *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++; \
481 *(p_line1)++ = *(p_line2)++ = *(p_v)++; \
482 *(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++; \