let gcc choose how to reference memory addresses in i420_rgx mmx asm
[vlc.git] / modules / video_chroma / i420_rgb_mmx.h
blobe9c32fbfdcad52694e9ff9bb52f0dedd7a9a111a
1 /*****************************************************************************
2 * transforms_yuvmmx.h: MMX YUV transformation assembly
3 *****************************************************************************
4 * Copyright (C) 1999-2007 the VideoLAN team
5 * $Id$
7 * Authors: Olie Lho <ollie@sis.com.tw>
8 * Gaƫl Hendryckx <jimmy@via.ecp.fr>
9 * Samuel Hocevar <sam@zoy.org>
10 * Damien Fouilleul <damienf@videolan.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
25 *****************************************************************************/
27 #ifdef MODULE_NAME_IS_i420_rgb_mmx
29 /* hope these constant values are cache line aligned */
30 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
31 #define USED_U64(foo) \
32 static const uint64_t foo __asm__ (#foo) __attribute__((used))
33 #else
34 #define USED_U64(foo) \
35 static const uint64_t foo __asm__ (#foo) __attribute__((unused))
36 #endif
37 USED_U64(mmx_80w) = 0x0080008000800080ULL; /* Will be referenced as %4
38 * in inline asm */
39 USED_U64(mmx_10w) = 0x1010101010101010ULL; /* -- as %5 */
40 USED_U64(mmx_00ffw) = 0x00ff00ff00ff00ffULL; /* -- as %6 */
41 USED_U64(mmx_Y_coeff) = 0x253f253f253f253fULL; /* -- as %7 */
43 USED_U64(mmx_U_green) = 0xf37df37df37df37dULL; /* -- as %8 */
44 USED_U64(mmx_U_blue) = 0x4093409340934093ULL; /* -- as %9 */
45 USED_U64(mmx_V_red) = 0x3312331233123312ULL; /* -- as %10 */
46 USED_U64(mmx_V_green) = 0xe5fce5fce5fce5fcULL; /* -- as %11 */
48 USED_U64(mmx_mask_f8) = 0xf8f8f8f8f8f8f8f8ULL; /* -- as %12 */
49 USED_U64(mmx_mask_fc) = 0xfcfcfcfcfcfcfcfcULL; /* -- as %13 */
50 #undef USED_U64
52 #if defined(CAN_COMPILE_MMX)
54 /* MMX assembly */
56 #define MMX_CALL(MMX_INSTRUCTIONS) \
57 do { \
58 __asm__ __volatile__( \
59 ".p2align 3 \n\t" \
60 MMX_INSTRUCTIONS \
61 : \
62 : "r" (p_y), "r" (p_u), \
63 "r" (p_v), "r" (p_buffer), \
64 "m" (mmx_80w), "m" (mmx_10w), \
65 "m" (mmx_00ffw), "m" (mmx_Y_coeff), \
66 "m" (mmx_U_green), "m" (mmx_U_blue), \
67 "m" (mmx_V_red), "m" (mmx_V_green), \
68 "m" (mmx_mask_f8), "m" (mmx_mask_fc) ); \
69 } while(0)
71 #define MMX_END __asm__ __volatile__ ( "emms" )
73 #define MMX_INIT_16 " \n\
74 movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
75 movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
76 pxor %%mm4, %%mm4 # zero mm4 \n\
77 movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
80 #define MMX_INIT_16_GRAY " \n\
81 movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
82 #movl $0, (%3) # cache preload for image \n\
85 #define MMX_INIT_32 " \n\
86 movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0 \n\
87 movl $0, (%3) # cache preload for image \n\
88 movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0 \n\
89 pxor %%mm4, %%mm4 # zero mm4 \n\
90 movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
94 * Do the multiply part of the conversion for even and odd pixels,
95 * register usage:
96 * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
97 * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
98 * mm6 -> Y even, mm7 -> Y odd
101 #define MMX_YUV_MUL " \n\
102 # convert the chroma part \n\
103 punpcklbw %%mm4, %%mm0 # scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\
104 punpcklbw %%mm4, %%mm1 # scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\
105 psubsw %4, %%mm0 # Cb -= 128 \n\
106 psubsw %4, %%mm1 # Cr -= 128 \n\
107 psllw $3, %%mm0 # Promote precision \n\
108 psllw $3, %%mm1 # Promote precision \n\
109 movq %%mm0, %%mm2 # Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 \n\
110 movq %%mm1, %%mm3 # Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 \n\
111 pmulhw %8, %%mm2 # Mul Cb with green coeff -> Cb green \n\
112 pmulhw %11, %%mm3 # Mul Cr with green coeff -> Cr green \n\
113 pmulhw %9, %%mm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 \n\
114 pmulhw %10, %%mm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 \n\
115 paddsw %%mm3, %%mm2 # Cb green + Cr green -> Cgreen \n\
117 # convert the luma part \n\
118 psubusb %5, %%mm6 # Y -= 16 \n\
119 movq %%mm6, %%mm7 # Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
120 pand %6, %%mm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\
121 psrlw $8, %%mm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 \n\
122 psllw $3, %%mm6 # Promote precision \n\
123 psllw $3, %%mm7 # Promote precision \n\
124 pmulhw %7, %%mm6 # Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 \n\
125 pmulhw %7, %%mm7 # Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 \n\
129 * Do the addition part of the conversion for even and odd pixels,
130 * register usage:
131 * mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
132 * mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
133 * mm6 -> Y even, mm7 -> Y odd
136 #define MMX_YUV_ADD " \n\
137 # Do horizontal and vertical scaling \n\
138 movq %%mm0, %%mm3 # Copy Cblue \n\
139 movq %%mm1, %%mm4 # Copy Cred \n\
140 movq %%mm2, %%mm5 # Copy Cgreen \n\
141 paddsw %%mm6, %%mm0 # Y even + Cblue 00 B6 00 B4 00 B2 00 B0 \n\
142 paddsw %%mm7, %%mm3 # Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 \n\
143 paddsw %%mm6, %%mm1 # Y even + Cred 00 R6 00 R4 00 R2 00 R0 \n\
144 paddsw %%mm7, %%mm4 # Y odd + Cred 00 R7 00 R5 00 R3 00 R1 \n\
145 paddsw %%mm6, %%mm2 # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 \n\
146 paddsw %%mm7, %%mm5 # Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 \n\
148 # Limit RGB even to 0..255 \n\
149 packuswb %%mm0, %%mm0 # B6 B4 B2 B0 / B6 B4 B2 B0 \n\
150 packuswb %%mm1, %%mm1 # R6 R4 R2 R0 / R6 R4 R2 R0 \n\
151 packuswb %%mm2, %%mm2 # G6 G4 G2 G0 / G6 G4 G2 G0 \n\
153 # Limit RGB odd to 0..255 \n\
154 packuswb %%mm3, %%mm3 # B7 B5 B3 B1 / B7 B5 B3 B1 \n\
155 packuswb %%mm4, %%mm4 # R7 R5 R3 R1 / R7 R5 R3 R1 \n\
156 packuswb %%mm5, %%mm5 # G7 G5 G3 G1 / G7 G5 G3 G1 \n\
158 # Interleave RGB even and odd \n\
159 punpcklbw %%mm3, %%mm0 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
160 punpcklbw %%mm4, %%mm1 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
161 punpcklbw %%mm5, %%mm2 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
165 * Grayscale case, only use Y
168 #define MMX_YUV_GRAY " \n\
169 # convert the luma part \n\
170 psubusb %5, %%mm6 \n\
171 movq %%mm6, %%mm7 \n\
172 pand %6, %%mm6 \n\
173 psrlw $8, %%mm7 \n\
174 psllw $3, %%mm6 \n\
175 psllw $3, %%mm7 \n\
176 pmulhw %7, %%mm6 \n\
177 pmulhw %7, %%mm7 \n\
178 packuswb %%mm6, %%mm6 \n\
179 packuswb %%mm7, %%mm7 \n\
180 punpcklbw %%mm7, %%mm6 \n\
183 #define MMX_UNPACK_16_GRAY " \n\
184 movq %%mm6, %%mm5 \n\
185 pand %12, %%mm6 \n\
186 pand %13, %%mm5 \n\
187 movq %%mm6, %%mm7 \n\
188 psrlw $3, %%mm7 \n\
189 pxor %%mm3, %%mm3 \n\
190 movq %%mm7, %%mm2 \n\
191 movq %%mm5, %%mm0 \n\
192 punpcklbw %%mm3, %%mm5 \n\
193 punpcklbw %%mm6, %%mm7 \n\
194 psllw $3, %%mm5 \n\
195 por %%mm5, %%mm7 \n\
196 movq %%mm7, (%3) \n\
197 punpckhbw %%mm3, %%mm0 \n\
198 punpckhbw %%mm6, %%mm2 \n\
199 psllw $3, %%mm0 \n\
200 movq 8(%0), %%mm6 \n\
201 por %%mm0, %%mm2 \n\
202 movq %%mm2, 8(%3) \n\
207 * convert RGB plane to RGB 15 bits,
208 * mm0 -> B, mm1 -> R, mm2 -> G,
209 * mm4 -> GB, mm5 -> AR pixel 4-7,
210 * mm6 -> GB, mm7 -> AR pixel 0-3
213 #define MMX_UNPACK_15 " \n\
214 # mask unneeded bits off \n\
215 pand %12, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
216 psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
217 pand %12, %%mm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
218 pand %12, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
219 psrlw $1,%%mm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\
220 pxor %%mm4, %%mm4 # zero mm4 \n\
221 movq %%mm0, %%mm5 # Copy B7-B0 \n\
222 movq %%mm2, %%mm7 # Copy G7-G0 \n\
224 # convert rgb24 plane to rgb15 pack for pixel 0-3 \n\
225 punpcklbw %%mm4, %%mm2 # ________ ________ g7g6g5g4 g3______ \n\
226 punpcklbw %%mm1, %%mm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
227 psllw $2,%%mm2 # ________ ____g7g6 g5g4g3__ ________ \n\
228 por %%mm2, %%mm0 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
229 movq 8(%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
230 movq %%mm0, (%3) # store pixel 0-3 \n\
232 # convert rgb24 plane to rgb16 pack for pixel 0-3 \n\
233 punpckhbw %%mm4, %%mm7 # ________ ________ g7g6g5g4 g3______ \n\
234 punpckhbw %%mm1, %%mm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
235 psllw $2,%%mm7 # ________ ____g7g6 g5g4g3__ ________ \n\
236 movd 4(%1), %%mm0 # Load 4 Cb __ __ __ __ u3 u2 u1 u0 \n\
237 por %%mm7, %%mm5 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
238 movd 4(%2), %%mm1 # Load 4 Cr __ __ __ __ v3 v2 v1 v0 \n\
239 movq %%mm5, 8(%3) # store pixel 4-7 \n\
243 * convert RGB plane to RGB 16 bits,
244 * mm0 -> B, mm1 -> R, mm2 -> G,
245 * mm4 -> GB, mm5 -> AR pixel 4-7,
246 * mm6 -> GB, mm7 -> AR pixel 0-3
249 #define MMX_UNPACK_16 " \n\
250 # mask unneeded bits off \n\
251 pand %12, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
252 pand %13, %%mm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
253 pand %12, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
254 psrlw $3,%%mm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
255 pxor %%mm4, %%mm4 # zero mm4 \n\
256 movq %%mm0, %%mm5 # Copy B7-B0 \n\
257 movq %%mm2, %%mm7 # Copy G7-G0 \n\
259 # convert rgb24 plane to rgb16 pack for pixel 0-3 \n\
260 punpcklbw %%mm4, %%mm2 # ________ ________ g7g6g5g4 g3g2____ \n\
261 punpcklbw %%mm1, %%mm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
262 psllw $3,%%mm2 # ________ __g7g6g5 g4g3g2__ ________ \n\
263 por %%mm2, %%mm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
264 movq 8(%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
265 movq %%mm0, (%3) # store pixel 0-3 \n\
267 # convert rgb24 plane to rgb16 pack for pixel 0-3 \n\
268 punpckhbw %%mm4, %%mm7 # ________ ________ g7g6g5g4 g3g2____ \n\
269 punpckhbw %%mm1, %%mm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
270 psllw $3,%%mm7 # ________ __g7g6g5 g4g3g2__ ________ \n\
271 movd 4(%1), %%mm0 # Load 4 Cb __ __ __ __ u3 u2 u1 u0 \n\
272 por %%mm7, %%mm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
273 movd 4(%2), %%mm1 # Load 4 Cr __ __ __ __ v3 v2 v1 v0 \n\
274 movq %%mm5, 8(%3) # store pixel 4-7 \n\
278 * convert RGB plane to RGB packed format,
279 * mm0 -> B, mm1 -> R, mm2 -> G
282 #define MMX_UNPACK_32_ARGB " \n\
283 pxor %%mm3, %%mm3 # zero mm3 \n\
284 movq %%mm0, %%mm4 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
285 punpcklbw %%mm2, %%mm4 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
286 movq %%mm1, %%mm5 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
287 punpcklbw %%mm3, %%mm5 # 00 R3 00 R2 00 R1 00 R0 \n\
288 movq %%mm4, %%mm6 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
289 punpcklwd %%mm5, %%mm4 # 00 R1 B1 G1 00 R0 B0 G0 \n\
290 movq %%mm4, (%3) # Store ARGB1 ARGB0 \n\
291 punpckhwd %%mm5, %%mm6 # 00 R3 B3 G3 00 R2 B2 G2 \n\
292 movq %%mm6, 8(%3) # Store ARGB3 ARGB2 \n\
293 punpckhbw %%mm2, %%mm0 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
294 punpckhbw %%mm3, %%mm1 # 00 R7 00 R6 00 R5 00 R4 \n\
295 movq %%mm0, %%mm5 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
296 punpcklwd %%mm1, %%mm5 # 00 R5 B5 G5 00 R4 B4 G4 \n\
297 movq %%mm5, 16(%3) # Store ARGB5 ARGB4 \n\
298 punpckhwd %%mm1, %%mm0 # 00 R7 B7 G7 00 R6 B6 G6 \n\
299 movq %%mm0, 24(%3) # Store ARGB7 ARGB6 \n\
302 #define MMX_UNPACK_32_RGBA " \n\
303 pxor %%mm3, %%mm3 # zero mm3 \n\
304 movq %%mm2, %%mm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
305 punpcklbw %%mm1, %%mm4 # R3 G3 R2 G2 R1 G1 R0 G0 \n\
306 punpcklbw %%mm0, %%mm3 # B3 00 B2 00 B1 00 B0 00 \n\
307 movq %%mm3, %%mm5 # R3 00 R2 00 R1 00 R0 00 \n\
308 punpcklwd %%mm4, %%mm3 # R1 G1 B1 00 R0 G0 B0 00 \n\
309 movq %%mm3, (%3) # Store RGBA1 RGBA0 \n\
310 punpckhwd %%mm4, %%mm5 # R3 G3 B3 00 R2 G2 B2 00 \n\
311 movq %%mm5, 8(%3) # Store RGBA3 RGBA2 \n\
312 pxor %%mm6, %%mm6 # zero mm6 \n\
313 punpckhbw %%mm1, %%mm2 # R7 G7 R6 G6 R5 G5 R4 G4 \n\
314 punpckhbw %%mm0, %%mm6 # B7 00 B6 00 B5 00 B4 00 \n\
315 movq %%mm6, %%mm0 # B7 00 B6 00 B5 00 B4 00 \n\
316 punpcklwd %%mm2, %%mm6 # R5 G5 B5 00 R4 G4 B4 00 \n\
317 movq %%mm6, 16(%3) # Store RGBA5 RGBA4 \n\
318 punpckhwd %%mm2, %%mm0 # R7 G7 B7 00 R6 G6 B6 00 \n\
319 movq %%mm0, 24(%3) # Store RGBA7 RGBA6 \n\
322 #define MMX_UNPACK_32_BGRA " \n\
323 pxor %%mm3, %%mm3 # zero mm3 \n\
324 movq %%mm2, %%mm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
325 punpcklbw %%mm0, %%mm4 # B3 G3 B2 G2 B1 G1 B0 G0 \n\
326 punpcklbw %%mm1, %%mm3 # R3 00 R2 00 R1 00 R0 00 \n\
327 movq %%mm3, %%mm5 # R3 00 R2 00 R1 00 R0 00 \n\
328 punpcklwd %%mm4, %%mm3 # B1 G1 R1 00 B0 G0 R0 00 \n\
329 movq %%mm3, (%3) # Store BGRA1 BGRA0 \n\
330 punpckhwd %%mm4, %%mm5 # B3 G3 R3 00 B2 G2 R2 00 \n\
331 movq %%mm5, 8(%3) # Store BGRA3 BGRA2 \n\
332 pxor %%mm6, %%mm6 # zero mm6 \n\
333 punpckhbw %%mm0, %%mm2 # B7 G7 B6 G6 B5 G5 B4 G4 \n\
334 punpckhbw %%mm1, %%mm6 # R7 00 R6 00 R5 00 R4 00 \n\
335 movq %%mm6, %%mm0 # R7 00 R6 00 R5 00 R4 00 \n\
336 punpcklwd %%mm2, %%mm6 # B5 G5 R5 00 B4 G4 R4 00 \n\
337 movq %%mm6, 16(%3) # Store BGRA5 BGRA4 \n\
338 punpckhwd %%mm2, %%mm0 # B7 G7 R7 00 B6 G6 R6 00 \n\
339 movq %%mm0, 24(%3) # Store BGRA7 BGRA6 \n\
342 #define MMX_UNPACK_32_ABGR " \n\
343 pxor %%mm3, %%mm3 # zero mm3 \n\
344 movq %%mm1, %%mm4 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
345 punpcklbw %%mm2, %%mm4 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
346 movq %%mm0, %%mm5 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
347 punpcklbw %%mm3, %%mm5 # 00 B3 00 B2 00 B1 00 B0 \n\
348 movq %%mm4, %%mm6 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
349 punpcklwd %%mm5, %%mm4 # 00 B1 G1 R1 00 B0 G0 R0 \n\
350 movq %%mm4, (%3) # Store ABGR1 ABGR0 \n\
351 punpckhwd %%mm5, %%mm6 # 00 B3 G3 R3 00 B2 G2 R2 \n\
352 movq %%mm6, 8(%3) # Store ABGR3 ABGR2 \n\
353 punpckhbw %%mm2, %%mm1 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
354 punpckhbw %%mm3, %%mm0 # 00 B7 00 B6 00 B5 00 B4 \n\
355 movq %%mm1, %%mm2 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
356 punpcklwd %%mm0, %%mm1 # 00 B5 G5 R5 00 B4 G4 R4 \n\
357 movq %%mm1, 16(%3) # Store ABGR5 ABGR4 \n\
358 punpckhwd %%mm0, %%mm2 # B7 G7 R7 00 B6 G6 R6 00 \n\
359 movq %%mm2, 24(%3) # Store ABGR7 ABGR6 \n\
362 #elif defined(HAVE_MMX_INTRINSICS)
364 /* MMX intrinsics */
366 #include <mmintrin.h>
368 #define MMX_CALL(MMX_INSTRUCTIONS) \
369 do { \
370 __m64 mm0, mm1, mm2, mm3, \
371 mm4, mm5, mm6, mm7; \
372 MMX_INSTRUCTIONS \
373 } while(0)
375 #define MMX_END _mm_empty()
377 #define MMX_INIT_16 \
378 mm0 = _mm_cvtsi32_si64(*(int*)p_u); \
379 mm1 = _mm_cvtsi32_si64(*(int*)p_v); \
380 mm4 = _mm_setzero_si64(); \
381 mm6 = (__m64)*(uint64_t *)p_y;
383 #define MMX_INIT_32 \
384 mm0 = _mm_cvtsi32_si64(*(int*)p_u); \
385 *(uint16_t *)p_buffer = 0; \
386 mm1 = _mm_cvtsi32_si64(*(int*)p_v); \
387 mm4 = _mm_setzero_si64(); \
388 mm6 = (__m64)*(uint64_t *)p_y;
390 #define MMX_YUV_MUL \
391 mm0 = _mm_unpacklo_pi8(mm0, mm4); \
392 mm1 = _mm_unpacklo_pi8(mm1, mm4); \
393 mm0 = _mm_subs_pi16(mm0, (__m64)mmx_80w); \
394 mm1 = _mm_subs_pi16(mm1, (__m64)mmx_80w); \
395 mm0 = _mm_slli_pi16(mm0, 3); \
396 mm1 = _mm_slli_pi16(mm1, 3); \
397 mm2 = mm0; \
398 mm3 = mm1; \
399 mm2 = _mm_mulhi_pi16(mm2, (__m64)mmx_U_green); \
400 mm3 = _mm_mulhi_pi16(mm3, (__m64)mmx_V_green); \
401 mm0 = _mm_mulhi_pi16(mm0, (__m64)mmx_U_blue); \
402 mm1 = _mm_mulhi_pi16(mm1, (__m64)mmx_V_red); \
403 mm2 = _mm_adds_pi16(mm2, mm3); \
405 mm6 = _mm_subs_pu8(mm6, (__m64)mmx_10w); \
406 mm7 = mm6; \
407 mm6 = _mm_and_si64(mm6, (__m64)mmx_00ffw); \
408 mm7 = _mm_srli_pi16(mm7, 8); \
409 mm6 = _mm_slli_pi16(mm6, 3); \
410 mm7 = _mm_slli_pi16(mm7, 3); \
411 mm6 = _mm_mulhi_pi16(mm6, (__m64)mmx_Y_coeff); \
412 mm7 = _mm_mulhi_pi16(mm7, (__m64)mmx_Y_coeff);
414 #define MMX_YUV_ADD \
415 mm3 = mm0; \
416 mm4 = mm1; \
417 mm5 = mm2; \
418 mm0 = _mm_adds_pi16(mm0, mm6); \
419 mm3 = _mm_adds_pi16(mm3, mm7); \
420 mm1 = _mm_adds_pi16(mm1, mm6); \
421 mm4 = _mm_adds_pi16(mm4, mm7); \
422 mm2 = _mm_adds_pi16(mm2, mm6); \
423 mm5 = _mm_adds_pi16(mm5, mm7); \
425 mm0 = _mm_packs_pu16(mm0, mm0); \
426 mm1 = _mm_packs_pu16(mm1, mm1); \
427 mm2 = _mm_packs_pu16(mm2, mm2); \
429 mm3 = _mm_packs_pu16(mm3, mm3); \
430 mm4 = _mm_packs_pu16(mm4, mm4); \
431 mm5 = _mm_packs_pu16(mm5, mm5); \
433 mm0 = _mm_unpacklo_pi8(mm0, mm3); \
434 mm1 = _mm_unpacklo_pi8(mm1, mm4); \
435 mm2 = _mm_unpacklo_pi8(mm2, mm5);
437 #define MMX_UNPACK_15 \
438 mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
439 mm0 = _mm_srli_pi16(mm0, 3); \
440 mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_f8); \
441 mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
442 mm1 = _mm_srli_pi16(mm1, 1); \
443 mm4 = _mm_setzero_si64(); \
444 mm5 = mm0; \
445 mm7 = mm2; \
447 mm2 = _mm_unpacklo_pi8(mm2, mm4); \
448 mm0 = _mm_unpacklo_pi8(mm0, mm1); \
449 mm2 = _mm_slli_pi16(mm2, 2); \
450 mm0 = _mm_or_si64(mm0, mm2); \
451 mm6 = (__m64)*(uint64_t *)(p_y + 8); \
452 *(uint64_t *)p_buffer = (uint64_t)mm0; \
454 mm7 = _mm_unpackhi_pi8(mm7, mm4); \
455 mm5 = _mm_unpackhi_pi8(mm5, mm1); \
456 mm7 = _mm_slli_pi16(mm7, 2); \
457 mm0 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_u + 4)); \
458 mm5 = _mm_or_si64(mm5, mm7); \
459 mm1 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_v + 4)); \
460 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
462 #define MMX_UNPACK_16 \
463 mm0 = _mm_and_si64(mm0, (__m64)mmx_mask_f8); \
464 mm2 = _mm_and_si64(mm2, (__m64)mmx_mask_fc); \
465 mm1 = _mm_and_si64(mm1, (__m64)mmx_mask_f8); \
466 mm0 = _mm_srli_pi16(mm0, 3); \
467 mm4 = _mm_setzero_si64(); \
468 mm5 = mm0; \
469 mm7 = mm2; \
471 mm2 = _mm_unpacklo_pi8(mm2, mm4); \
472 mm0 = _mm_unpacklo_pi8(mm0, mm1); \
473 mm2 = _mm_slli_pi16(mm2, 3); \
474 mm0 = _mm_or_si64(mm0, mm2); \
475 mm6 = (__m64)*(uint64_t *)(p_y + 8); \
476 *(uint64_t *)p_buffer = (uint64_t)mm0; \
478 mm7 = _mm_unpackhi_pi8(mm7, mm4); \
479 mm5 = _mm_unpackhi_pi8(mm5, mm1); \
480 mm7 = _mm_slli_pi16(mm7, 3); \
481 mm0 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_u + 4)); \
482 mm5 = _mm_or_si64(mm5, mm7); \
483 mm1 = _mm_cvtsi32_si64((int)*(uint32_t *)(p_v + 4)); \
484 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;
486 #define MMX_UNPACK_32_ARGB \
487 mm3 = _mm_setzero_si64(); \
488 mm4 = mm0; \
489 mm4 = _mm_unpacklo_pi8(mm4, mm2); \
490 mm5 = mm1; \
491 mm5 = _mm_unpacklo_pi8(mm5, mm3); \
492 mm6 = mm4; \
493 mm4 = _mm_unpacklo_pi16(mm4, mm5); \
494 *(uint64_t *)p_buffer = (uint64_t)mm4; \
495 mm6 = _mm_unpackhi_pi16(mm6, mm5); \
496 *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6;\
497 mm0 = _mm_unpackhi_pi8(mm0, mm2); \
498 mm1 = _mm_unpackhi_pi8(mm1, mm3); \
499 mm5 = mm0; \
500 mm5 = _mm_unpacklo_pi16(mm5, mm1); \
501 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm5;\
502 mm0 = _mm_unpackhi_pi16(mm0, mm1); \
503 *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
505 #define MMX_UNPACK_32_RGBA \
506 mm3 = _mm_setzero_si64(); \
507 mm4 = mm2; \
508 mm4 = _mm_unpacklo_pi8(mm4, mm1); \
509 mm3 = _mm_unpacklo_pi8(mm3, mm0); \
510 mm5 = mm3; \
511 mm3 = _mm_unpacklo_pi16(mm3, mm4); \
512 *(uint64_t *)p_buffer = (uint64_t)mm3; \
513 mm5 = _mm_unpackhi_pi16(mm5, mm4); \
514 *(uint64_t *)(p_buffer + 2) = (uint64_t)mm5;\
515 mm6 = _mm_setzero_si64(); \
516 mm2 = _mm_unpackhi_pi8(mm2, mm1); \
517 mm6 = _mm_unpackhi_pi8(mm6, mm0); \
518 mm0 = mm6; \
519 mm6 = _mm_unpacklo_pi16(mm6, mm2); \
520 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm6;\
521 mm0 = _mm_unpackhi_pi16(mm0, mm2); \
522 *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
524 #define MMX_UNPACK_32_BGRA \
525 mm3 = _mm_setzero_si64(); \
526 mm4 = mm2; \
527 mm4 = _mm_unpacklo_pi8(mm4, mm0); \
528 mm3 = _mm_unpacklo_pi8(mm3, mm1); \
529 mm5 = mm3; \
530 mm3 = _mm_unpacklo_pi16(mm3, mm4); \
531 *(uint64_t *)p_buffer = (uint64_t)mm3; \
532 mm5 = _mm_unpackhi_pi16(mm5, mm4); \
533 *(uint64_t *)(p_buffer + 2) = (uint64_t)mm5;\
534 mm6 = _mm_setzero_si64(); \
535 mm2 = _mm_unpackhi_pi8(mm2, mm0); \
536 mm6 = _mm_unpackhi_pi8(mm6, mm1); \
537 mm0 = mm6; \
538 mm6 = _mm_unpacklo_pi16(mm6, mm2); \
539 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm6;\
540 mm0 = _mm_unpackhi_pi16(mm0, mm2); \
541 *(uint64_t *)(p_buffer + 6) = (uint64_t)mm0;
543 #define MMX_UNPACK_32_ABGR \
544 mm3 = _mm_setzero_si64(); \
545 mm4 = mm1; \
546 mm4 = _mm_unpacklo_pi8(mm4, mm2); \
547 mm5 = mm0; \
548 mm5 = _mm_unpacklo_pi8(mm5, mm3); \
549 mm6 = mm4; \
550 mm4 = _mm_unpacklo_pi16(mm4, mm5); \
551 *(uint64_t *)p_buffer = (uint64_t)mm4; \
552 mm6 = _mm_unpackhi_pi16(mm6, mm5); \
553 *(uint64_t *)(p_buffer + 2) = (uint64_t)mm6;\
554 mm1 = _mm_unpackhi_pi8(mm1, mm2); \
555 mm0 = _mm_unpackhi_pi8(mm0, mm3); \
556 mm2 = mm1; \
557 mm1 = _mm_unpacklo_pi16(mm1, mm0); \
558 *(uint64_t *)(p_buffer + 4) = (uint64_t)mm1;\
559 mm2 = _mm_unpackhi_pi16(mm2, mm0); \
560 *(uint64_t *)(p_buffer + 6) = (uint64_t)mm2;
562 #endif
564 #elif defined( MODULE_NAME_IS_i420_rgb_sse2 )
566 #if defined(CAN_COMPILE_SSE2)
568 /* SSE2 assembly */
570 #define SSE2_CALL(SSE2_INSTRUCTIONS) \
571 do { \
572 __asm__ __volatile__( \
573 ".p2align 3 \n\t" \
574 SSE2_INSTRUCTIONS \
576 : "r" (p_y), "r" (p_u), \
577 "r" (p_v), "r" (p_buffer) \
578 : "eax" ); \
579 } while(0)
581 #define SSE2_END __asm__ __volatile__ ( "sfence" ::: "memory" )
583 #define SSE2_INIT_16_ALIGNED " \n\
584 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
585 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
586 pxor %%xmm4, %%xmm4 # zero mm4 \n\
587 movdqa (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
590 #define SSE2_INIT_16_UNALIGNED " \n\
591 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
592 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
593 pxor %%xmm4, %%xmm4 # zero mm4 \n\
594 movdqu (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
595 prefetchnta (%3) # Tell CPU not to cache output RGB data \n\
598 #define SSE2_INIT_32_ALIGNED " \n\
599 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
600 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
601 pxor %%xmm4, %%xmm4 # zero mm4 \n\
602 movdqa (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
605 #define SSE2_INIT_32_UNALIGNED " \n\
606 movq (%1), %%xmm0 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
607 movq (%2), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
608 pxor %%xmm4, %%xmm4 # zero mm4 \n\
609 movdqu (%0), %%xmm6 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
610 prefetchnta (%3) # Tell CPU not to cache output RGB data \n\
613 #define SSE2_YUV_MUL " \n\
614 # convert the chroma part \n\
615 punpcklbw %%xmm4, %%xmm0 # scatter 8 Cb 00 u3 00 u2 00 u1 00 u0 \n\
616 punpcklbw %%xmm4, %%xmm1 # scatter 8 Cr 00 v3 00 v2 00 v1 00 v0 \n\
617 movl $0x00800080, %%eax # \n\
618 movd %%eax, %%xmm5 # \n\
619 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 0080 0080 ... 0080 0080 \n\
620 psubsw %%xmm5, %%xmm0 # Cb -= 128 \n\
621 psubsw %%xmm5, %%xmm1 # Cr -= 128 \n\
622 psllw $3, %%xmm0 # Promote precision \n\
623 psllw $3, %%xmm1 # Promote precision \n\
624 movdqa %%xmm0, %%xmm2 # Copy 8 Cb 00 u3 00 u2 00 u1 00 u0 \n\
625 movdqa %%xmm1, %%xmm3 # Copy 8 Cr 00 v3 00 v2 00 v1 00 v0 \n\
626 movl $0xf37df37d, %%eax # \n\
627 movd %%eax, %%xmm5 # \n\
628 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to f37d f37d ... f37d f37d \n\
629 pmulhw %%xmm5, %%xmm2 # Mul Cb with green coeff -> Cb green \n\
630 movl $0xe5fce5fc, %%eax # \n\
631 movd %%eax, %%xmm5 # \n\
632 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to e5fc e5fc ... e5fc e5fc \n\
633 pmulhw %%xmm5, %%xmm3 # Mul Cr with green coeff -> Cr green \n\
634 movl $0x40934093, %%eax # \n\
635 movd %%eax, %%xmm5 # \n\
636 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 4093 4093 ... 4093 4093 \n\
637 pmulhw %%xmm5, %%xmm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 \n\
638 movl $0x33123312, %%eax # \n\
639 movd %%eax, %%xmm5 # \n\
640 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 3312 3312 ... 3312 3312 \n\
641 pmulhw %%xmm5, %%xmm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 \n\
642 paddsw %%xmm3, %%xmm2 # Cb green + Cr green -> Cgreen \n\
644 # convert the luma part \n\
645 movl $0x10101010, %%eax # \n\
646 movd %%eax, %%xmm5 # \n\
647 pshufd $0, %%xmm5, %%xmm5 # Set xmm5 to 1010 1010 ... 1010 1010 \n\
648 psubusb %%xmm5, %%xmm6 # Y -= 16 \n\
649 movdqa %%xmm6, %%xmm7 # Copy 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
650 movl $0x00ff00ff, %%eax # \n\
651 movd %%eax, %%xmm5 # \n\
652 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to 00ff 00ff ... 00ff 00ff \n\
653 pand %%xmm5, %%xmm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0 \n\
654 psrlw $8, %%xmm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 \n\
655 psllw $3, %%xmm6 # Promote precision \n\
656 psllw $3, %%xmm7 # Promote precision \n\
657 movl $0x253f253f, %%eax # \n\
658 movd %%eax, %%xmm5 # \n\
659 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to 253f 253f ... 253f 253f \n\
660 pmulhw %%xmm5, %%xmm6 # Mul 8 Y even 00 y6 00 y4 00 y2 00 y0 \n\
661 pmulhw %%xmm5, %%xmm7 # Mul 8 Y odd 00 y7 00 y5 00 y3 00 y1 \n\
664 #define SSE2_YUV_ADD " \n\
665 # Do horizontal and vertical scaling \n\
666 movdqa %%xmm0, %%xmm3 # Copy Cblue \n\
667 movdqa %%xmm1, %%xmm4 # Copy Cred \n\
668 movdqa %%xmm2, %%xmm5 # Copy Cgreen \n\
669 paddsw %%xmm6, %%xmm0 # Y even + Cblue 00 B6 00 B4 00 B2 00 B0 \n\
670 paddsw %%xmm7, %%xmm3 # Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 \n\
671 paddsw %%xmm6, %%xmm1 # Y even + Cred 00 R6 00 R4 00 R2 00 R0 \n\
672 paddsw %%xmm7, %%xmm4 # Y odd + Cred 00 R7 00 R5 00 R3 00 R1 \n\
673 paddsw %%xmm6, %%xmm2 # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 \n\
674 paddsw %%xmm7, %%xmm5 # Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 \n\
676 # Limit RGB even to 0..255 \n\
677 packuswb %%xmm0, %%xmm0 # B6 B4 B2 B0 / B6 B4 B2 B0 \n\
678 packuswb %%xmm1, %%xmm1 # R6 R4 R2 R0 / R6 R4 R2 R0 \n\
679 packuswb %%xmm2, %%xmm2 # G6 G4 G2 G0 / G6 G4 G2 G0 \n\
681 # Limit RGB odd to 0..255 \n\
682 packuswb %%xmm3, %%xmm3 # B7 B5 B3 B1 / B7 B5 B3 B1 \n\
683 packuswb %%xmm4, %%xmm4 # R7 R5 R3 R1 / R7 R5 R3 R1 \n\
684 packuswb %%xmm5, %%xmm5 # G7 G5 G3 G1 / G7 G5 G3 G1 \n\
686 # Interleave RGB even and odd \n\
687 punpcklbw %%xmm3, %%xmm0 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
688 punpcklbw %%xmm4, %%xmm1 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
689 punpcklbw %%xmm5, %%xmm2 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
692 #define SSE2_UNPACK_15_ALIGNED " \n\
693 # mask unneeded bits off \n\
694 movl $0xf8f8f8f8, %%eax # \n\
695 movd %%eax, %%xmm5 # \n\
696 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
697 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
698 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
699 pand %%xmm5, %%xmm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
700 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
701 psrlw $1,%%xmm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\
702 pxor %%xmm4, %%xmm4 # zero mm4 \n\
703 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
704 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
706 # convert rgb24 plane to rgb15 pack for pixel 0-7 \n\
707 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3______ \n\
708 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
709 psllw $2,%%xmm2 # ________ ____g7g6 g5g4g3__ ________ \n\
710 por %%xmm2, %%xmm0 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
711 movntdq %%xmm0, (%3) # store pixel 0-7 \n\
713 # convert rgb24 plane to rgb15 pack for pixel 8-15 \n\
714 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3______ \n\
715 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
716 psllw $2,%%xmm7 # ________ ____g7g6 g5g4g3__ ________ \n\
717 por %%xmm7, %%xmm5 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
718 movntdq %%xmm5, 16(%3) # store pixel 4-7 \n\
721 #define SSE2_UNPACK_15_UNALIGNED " \n\
722 # mask unneeded bits off \n\
723 movl $0xf8f8f8f8, %%eax # \n\
724 movd %%eax, %%xmm5 # \n\
725 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
726 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
727 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
728 pand %%xmm5, %%xmm2 # g7g6g5g4 g3______ g7g6g5g4 g3______ \n\
729 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
730 psrlw $1,%%xmm1 # __r7r6r5 r4r3____ __r7r6r5 r4r3____ \n\
731 pxor %%xmm4, %%xmm4 # zero mm4 \n\
732 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
733 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
735 # convert rgb24 plane to rgb15 pack for pixel 0-7 \n\
736 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3______ \n\
737 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
738 psllw $2,%%xmm2 # ________ ____g7g6 g5g4g3__ ________ \n\
739 por %%xmm2, %%xmm0 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
740 movdqu %%xmm0, (%3) # store pixel 0-7 \n\
742 # convert rgb24 plane to rgb15 pack for pixel 8-15 \n\
743 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3______ \n\
744 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
745 psllw $2,%%xmm7 # ________ ____g7g6 g5g4g3__ ________ \n\
746 por %%xmm7, %%xmm5 # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3 \n\
747 movdqu %%xmm5, 16(%3) # store pixel 4-7 \n\
750 #define SSE2_UNPACK_16_ALIGNED " \n\
751 # mask unneeded bits off \n\
752 movl $0xf8f8f8f8, %%eax # \n\
753 movd %%eax, %%xmm5 # \n\
754 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
755 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
756 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
757 movl $0xfcfcfcfc, %%eax # \n\
758 movd %%eax, %%xmm5 # \n\
759 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
760 pand %%xmm5, %%xmm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
761 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
762 pxor %%xmm4, %%xmm4 # zero mm4 \n\
763 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
764 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
766 # convert rgb24 plane to rgb16 pack for pixel 0-7 \n\
767 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3g2____ \n\
768 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
769 psllw $3,%%xmm2 # ________ __g7g6g5 g4g3g2__ ________ \n\
770 por %%xmm2, %%xmm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
771 movntdq %%xmm0, (%3) # store pixel 0-7 \n\
773 # convert rgb24 plane to rgb16 pack for pixel 8-15 \n\
774 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3g2____ \n\
775 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
776 psllw $3,%%xmm7 # ________ __g7g6g5 g4g3g2__ ________ \n\
777 por %%xmm7, %%xmm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
778 movntdq %%xmm5, 16(%3) # store pixel 4-7 \n\
781 #define SSE2_UNPACK_16_UNALIGNED " \n\
782 # mask unneeded bits off \n\
783 movl $0xf8f8f8f8, %%eax # \n\
784 movd %%eax, %%xmm5 # \n\
785 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
786 pand %%xmm5, %%xmm0 # b7b6b5b4 b3______ b7b6b5b4 b3______ \n\
787 pand %%xmm5, %%xmm1 # r7r6r5r4 r3______ r7r6r5r4 r3______ \n\
788 movl $0xfcfcfcfc, %%eax # \n\
789 movd %%eax, %%xmm5 # \n\
790 pshufd $0, %%xmm5, %%xmm5 # set xmm5 to f8f8 f8f8 ... f8f8 f8f8 \n\
791 pand %%xmm5, %%xmm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____ \n\
792 psrlw $3,%%xmm0 # ______b7 b6b5b4b3 ______b7 b6b5b4b3 \n\
793 pxor %%xmm4, %%xmm4 # zero mm4 \n\
794 movdqa %%xmm0, %%xmm5 # Copy B15-B0 \n\
795 movdqa %%xmm2, %%xmm7 # Copy G15-G0 \n\
797 # convert rgb24 plane to rgb16 pack for pixel 0-7 \n\
798 punpcklbw %%xmm4, %%xmm2 # ________ ________ g7g6g5g4 g3g2____ \n\
799 punpcklbw %%xmm1, %%xmm0 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
800 psllw $3,%%xmm2 # ________ __g7g6g5 g4g3g2__ ________ \n\
801 por %%xmm2, %%xmm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
802 movdqu %%xmm0, (%3) # store pixel 0-7 \n\
804 # convert rgb24 plane to rgb16 pack for pixel 8-15 \n\
805 punpckhbw %%xmm4, %%xmm7 # ________ ________ g7g6g5g4 g3g2____ \n\
806 punpckhbw %%xmm1, %%xmm5 # r7r6r5r4 r3______ ______b7 b6b5b4b3 \n\
807 psllw $3,%%xmm7 # ________ __g7g6g5 g4g3g2__ ________ \n\
808 por %%xmm7, %%xmm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 \n\
809 movdqu %%xmm5, 16(%3) # store pixel 4-7 \n\
812 #define SSE2_UNPACK_32_ARGB_ALIGNED " \n\
813 pxor %%xmm3, %%xmm3 # zero xmm3 \n\
814 movdqa %%xmm0, %%xmm4 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
815 punpcklbw %%xmm2, %%xmm4 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
816 movdqa %%xmm1, %%xmm5 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
817 punpcklbw %%xmm3, %%xmm5 # 00 R3 00 R2 00 R1 00 R0 \n\
818 movdqa %%xmm4, %%xmm6 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
819 punpcklwd %%xmm5, %%xmm4 # 00 R1 B1 G1 00 R0 B0 G0 \n\
820 movntdq %%xmm4, (%3) # Store ARGB3 ARGB2 ARGB1 ARGB0 \n\
821 punpckhwd %%xmm5, %%xmm6 # 00 R3 B3 G3 00 R2 B2 G2 \n\
822 movntdq %%xmm6, 16(%3) # Store ARGB7 ARGB6 ARGB5 ARGB4 \n\
823 punpckhbw %%xmm2, %%xmm0 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
824 punpckhbw %%xmm3, %%xmm1 # 00 R7 00 R6 00 R5 00 R4 \n\
825 movdqa %%xmm0, %%xmm5 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
826 punpcklwd %%xmm1, %%xmm5 # 00 R5 B5 G5 00 R4 B4 G4 \n\
827 movntdq %%xmm5, 32(%3) # Store ARGB11 ARGB10 ARGB9 ARGB8 \n\
828 punpckhwd %%xmm1, %%xmm0 # 00 R7 B7 G7 00 R6 B6 G6 \n\
829 movntdq %%xmm0, 48(%3) # Store ARGB15 ARGB14 ARGB13 ARGB12 \n\
832 #define SSE2_UNPACK_32_ARGB_UNALIGNED " \n\
833 pxor %%xmm3, %%xmm3 # zero xmm3 \n\
834 movdqa %%xmm0, %%xmm4 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
835 punpcklbw %%xmm2, %%xmm4 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
836 movdqa %%xmm1, %%xmm5 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
837 punpcklbw %%xmm3, %%xmm5 # 00 R3 00 R2 00 R1 00 R0 \n\
838 movdqa %%xmm4, %%xmm6 # G3 B3 G2 B2 G1 B1 G0 B0 \n\
839 punpcklwd %%xmm5, %%xmm4 # 00 R1 B1 G1 00 R0 B0 G0 \n\
840 movdqu %%xmm4, (%3) # Store ARGB3 ARGB2 ARGB1 ARGB0 \n\
841 punpckhwd %%xmm5, %%xmm6 # 00 R3 B3 G3 00 R2 B2 G2 \n\
842 movdqu %%xmm6, 16(%3) # Store ARGB7 ARGB6 ARGB5 ARGB4 \n\
843 punpckhbw %%xmm2, %%xmm0 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
844 punpckhbw %%xmm3, %%xmm1 # 00 R7 00 R6 00 R5 00 R4 \n\
845 movdqa %%xmm0, %%xmm5 # G7 B7 G6 B6 G5 B5 G4 B4 \n\
846 punpcklwd %%xmm1, %%xmm5 # 00 R5 B5 G5 00 R4 B4 G4 \n\
847 movdqu %%xmm5, 32(%3) # Store ARGB11 ARGB10 ARGB9 ARGB8 \n\
848 punpckhwd %%xmm1, %%xmm0 # 00 R7 B7 G7 00 R6 B6 G6 \n\
849 movdqu %%xmm0, 48(%3) # Store ARGB15 ARGB14 ARGB13 ARGB12 \n\
852 #define SSE2_UNPACK_32_RGBA_ALIGNED " \n\
853 pxor %%xmm3, %%xmm3 # zero mm3 \n\
854 movdqa %%xmm2, %%xmm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
855 punpcklbw %%xmm1, %%xmm4 # R3 G3 R2 G2 R1 G1 R0 G0 \n\
856 punpcklbw %%xmm0, %%xmm3 # B3 00 B2 00 B1 00 B0 00 \n\
857 movdqa %%xmm3, %%xmm5 # R3 00 R2 00 R1 00 R0 00 \n\
858 punpcklwd %%xmm4, %%xmm3 # R1 G1 B1 00 R0 B0 G0 00 \n\
859 movntdq %%xmm3, (%3) # Store RGBA3 RGBA2 RGBA1 RGBA0 \n\
860 punpckhwd %%xmm4, %%xmm5 # R3 G3 B3 00 R2 G2 B2 00 \n\
861 movntdq %%xmm5, 16(%3) # Store RGBA7 RGBA6 RGBA5 RGBA4 \n\
862 pxor %%xmm6, %%xmm6 # zero mm6 \n\
863 punpckhbw %%xmm1, %%xmm2 # R7 G7 R6 G6 R5 G5 R4 G4 \n\
864 punpckhbw %%xmm0, %%xmm6 # B7 00 B6 00 B5 00 B4 00 \n\
865 movdqa %%xmm6, %%xmm0 # B7 00 B6 00 B5 00 B4 00 \n\
866 punpcklwd %%xmm2, %%xmm6 # R5 G5 B5 00 R4 G4 B4 00 \n\
867 movntdq %%xmm6, 32(%3) # Store BGRA11 BGRA10 BGRA9 RGBA8 \n\
868 punpckhwd %%xmm2, %%xmm0 # R7 G7 B7 00 R6 G6 B6 00 \n\
869 movntdq %%xmm0, 48(%3) # Store RGBA15 RGBA14 RGBA13 RGBA12 \n\
872 #define SSE2_UNPACK_32_RGBA_UNALIGNED " \n\
873 pxor %%xmm3, %%xmm3 # zero mm3 \n\
874 movdqa %%xmm2, %%xmm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
875 punpcklbw %%xmm1, %%xmm4 # R3 G3 R2 G2 R1 G1 R0 G0 \n\
876 punpcklbw %%xmm0, %%xmm3 # B3 00 B2 00 B1 00 B0 00 \n\
877 movdqa %%xmm3, %%xmm5 # R3 00 R2 00 R1 00 R0 00 \n\
878 punpcklwd %%xmm4, %%xmm3 # R1 G1 B1 00 R0 B0 G0 00 \n\
879 movdqu %%xmm3, (%3) # Store RGBA3 RGBA2 RGBA1 RGBA0 \n\
880 punpckhwd %%xmm4, %%xmm5 # R3 G3 B3 00 R2 G2 B2 00 \n\
881 movdqu %%xmm5, 16(%3) # Store RGBA7 RGBA6 RGBA5 RGBA4 \n\
882 pxor %%xmm6, %%xmm6 # zero mm6 \n\
883 punpckhbw %%xmm1, %%xmm2 # R7 G7 R6 G6 R5 G5 R4 G4 \n\
884 punpckhbw %%xmm0, %%xmm6 # B7 00 B6 00 B5 00 B4 00 \n\
885 movdqa %%xmm6, %%xmm0 # B7 00 B6 00 B5 00 B4 00 \n\
886 punpcklwd %%xmm2, %%xmm6 # R5 G5 B5 00 R4 G4 B4 00 \n\
887 movdqu %%xmm6, 32(%3) # Store RGBA11 RGBA10 RGBA9 RGBA8 \n\
888 punpckhwd %%xmm2, %%xmm0 # R7 G7 B7 00 R6 G6 B6 00 \n\
889 movdqu %%xmm0, 48(%3) # Store RGBA15 RGBA14 RGBA13 RGBA12 \n\
892 #define SSE2_UNPACK_32_BGRA_ALIGNED " \n\
893 pxor %%xmm3, %%xmm3 # zero mm3 \n\
894 movdqa %%xmm2, %%xmm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
895 punpcklbw %%xmm0, %%xmm4 # B3 G3 B2 G2 B1 G1 B0 G0 \n\
896 punpcklbw %%xmm1, %%xmm3 # R3 00 R2 00 R1 00 R0 00 \n\
897 movdqa %%xmm3, %%xmm5 # R3 00 R2 00 R1 00 R0 00 \n\
898 punpcklwd %%xmm4, %%xmm3 # B1 G1 R1 00 B0 G0 R0 00 \n\
899 movntdq %%xmm3, (%3) # Store BGRA3 BGRA2 BGRA1 BGRA0 \n\
900 punpckhwd %%xmm4, %%xmm5 # B3 G3 R3 00 B2 G2 R2 00 \n\
901 movntdq %%xmm5, 16(%3) # Store BGRA7 BGRA6 BGRA5 BGRA4 \n\
902 pxor %%xmm6, %%xmm6 # zero mm6 \n\
903 punpckhbw %%xmm0, %%xmm2 # B7 G7 B6 G6 B5 G5 B4 G4 \n\
904 punpckhbw %%xmm1, %%xmm6 # R7 00 R6 00 R5 00 R4 00 \n\
905 movdqa %%xmm6, %%xmm0 # R7 00 R6 00 R5 00 R4 00 \n\
906 punpcklwd %%xmm2, %%xmm6 # B5 G5 R5 00 B4 G4 R4 00 \n\
907 movntdq %%xmm6, 32(%3) # Store BGRA11 BGRA10 BGRA9 BGRA8 \n\
908 punpckhwd %%xmm2, %%xmm0 # B7 G7 R7 00 B6 G6 R6 00 \n\
909 movntdq %%xmm0, 48(%3) # Store BGRA15 BGRA14 BGRA13 BGRA12 \n\
912 #define SSE2_UNPACK_32_BGRA_UNALIGNED " \n\
913 pxor %%xmm3, %%xmm3 # zero mm3 \n\
914 movdqa %%xmm2, %%xmm4 # G7 G6 G5 G4 G3 G2 G1 G0 \n\
915 punpcklbw %%xmm0, %%xmm4 # B3 G3 B2 G2 B1 G1 B0 G0 \n\
916 punpcklbw %%xmm1, %%xmm3 # R3 00 R2 00 R1 00 R0 00 \n\
917 movdqa %%xmm3, %%xmm5 # R3 00 R2 00 R1 00 R0 00 \n\
918 punpcklwd %%xmm4, %%xmm3 # B1 G1 R1 00 B0 G0 R0 00 \n\
919 movdqu %%xmm3, (%3) # Store BGRA3 BGRA2 BGRA1 BGRA0 \n\
920 punpckhwd %%xmm4, %%xmm5 # B3 G3 R3 00 B2 G2 R2 00 \n\
921 movdqu %%xmm5, 16(%3) # Store BGRA7 BGRA6 BGRA5 BGRA4 \n\
922 pxor %%xmm6, %%xmm6 # zero mm6 \n\
923 punpckhbw %%xmm0, %%xmm2 # B7 G7 B6 G6 B5 G5 B4 G4 \n\
924 punpckhbw %%xmm1, %%xmm6 # R7 00 R6 00 R5 00 R4 00 \n\
925 movdqa %%xmm6, %%xmm0 # R7 00 R6 00 R5 00 R4 00 \n\
926 punpcklwd %%xmm2, %%xmm6 # B5 G5 R5 00 B4 G4 R4 00 \n\
927 movdqu %%xmm6, 32(%3) # Store BGRA11 BGRA10 BGRA9 BGRA8 \n\
928 punpckhwd %%xmm2, %%xmm0 # B7 G7 R7 00 B6 G6 R6 00 \n\
929 movdqu %%xmm0, 48(%3) # Store BGRA15 BGRA14 BGRA13 BGRA12 \n\
932 #define SSE2_UNPACK_32_ABGR_ALIGNED " \n\
933 pxor %%xmm3, %%xmm3 # zero mm3 \n\
934 movdqa %%xmm1, %%xmm4 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
935 punpcklbw %%xmm2, %%xmm4 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
936 movdqa %%xmm0, %%xmm5 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
937 punpcklbw %%xmm3, %%xmm5 # 00 B3 00 B2 00 B1 00 B0 \n\
938 movdqa %%xmm4, %%xmm6 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
939 punpcklwd %%xmm5, %%xmm4 # 00 B1 G1 R1 00 B0 G0 R0 \n\
940 movntdq %%xmm4, (%3) # Store ABGR3 ABGR2 ABGR1 ABGR0 \n\
941 punpckhwd %%xmm5, %%xmm6 # 00 B3 G3 R3 00 B2 G2 R2 \n\
942 movntdq %%xmm6, 16(%3) # Store ABGR7 ABGR6 ABGR5 ABGR4 \n\
943 punpckhbw %%xmm2, %%xmm1 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
944 punpckhbw %%xmm3, %%xmm0 # 00 B7 00 B6 00 B5 00 B4 \n\
945 movdqa %%xmm1, %%xmm2 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
946 punpcklwd %%xmm0, %%xmm1 # 00 B5 G5 R5 00 B4 G4 R4 \n\
947 movntdq %%xmm1, 32(%3) # Store ABGR11 ABGR10 ABGR9 ABGR8 \n\
948 punpckhwd %%xmm0, %%xmm2 # B7 G7 R7 00 B6 G6 R6 00 \n\
949 movntdq %%xmm2, 48(%3) # Store ABGR15 ABGR14 ABGR13 ABGR12 \n\
952 #define SSE2_UNPACK_32_ABGR_UNALIGNED " \n\
953 pxor %%xmm3, %%xmm3 # zero mm3 \n\
954 movdqa %%xmm1, %%xmm4 # R7 R6 R5 R4 R3 R2 R1 R0 \n\
955 punpcklbw %%xmm2, %%xmm4 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
956 movdqa %%xmm0, %%xmm5 # B7 B6 B5 B4 B3 B2 B1 B0 \n\
957 punpcklbw %%xmm3, %%xmm5 # 00 B3 00 B2 00 B1 00 B0 \n\
958 movdqa %%xmm4, %%xmm6 # G3 R3 G2 R2 G1 R1 G0 R0 \n\
959 punpcklwd %%xmm5, %%xmm4 # 00 B1 G1 R1 00 B0 G0 R0 \n\
960 movdqu %%xmm4, (%3) # Store ABGR3 ABGR2 ABGR1 ABGR0 \n\
961 punpckhwd %%xmm5, %%xmm6 # 00 B3 G3 R3 00 B2 G2 R2 \n\
962 movdqu %%xmm6, 16(%3) # Store ABGR7 ABGR6 ABGR5 ABGR4 \n\
963 punpckhbw %%xmm2, %%xmm1 # G7 R7 G6 R6 G5 R5 G4 R4 \n\
964 punpckhbw %%xmm3, %%xmm0 # 00 B7 00 B6 00 B5 00 B4 \n\
965 movdqa %%xmm1, %%xmm2 # R7 00 R6 00 R5 00 R4 00 \n\
966 punpcklwd %%xmm0, %%xmm1 # 00 B5 G5 R5 00 B4 G4 R4 \n\
967 movdqu %%xmm1, 32(%3) # Store ABGR11 ABGR10 ABGR9 ABGR8 \n\
968 punpckhwd %%xmm0, %%xmm2 # B7 G7 R7 00 B6 G6 R6 00 \n\
969 movdqu %%xmm2, 48(%3) # Store ABGR15 ABGR14 ABGR13 ABGR12 \n\
972 #elif defined(HAVE_SSE2_INTRINSICS)
974 /* SSE2 intrinsics */
976 #include <emmintrin.h>
978 #define SSE2_CALL(SSE2_INSTRUCTIONS) \
979 do { \
980 __m128i xmm0, xmm1, xmm2, xmm3, \
981 xmm4, xmm5, xmm6, xmm7; \
982 SSE2_INSTRUCTIONS \
983 } while(0)
985 #define SSE2_END _mm_sfence()
987 #define SSE2_INIT_16_ALIGNED \
988 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
989 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
990 xmm4 = _mm_setzero_si128(); \
991 xmm6 = _mm_load_si128((__m128i *)p_y);
993 #define SSE2_INIT_16_UNALIGNED \
994 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
995 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
996 xmm4 = _mm_setzero_si128(); \
997 xmm6 = _mm_loadu_si128((__m128i *)p_y); \
998 _mm_prefetch(p_buffer, _MM_HINT_NTA);
1000 #define SSE2_INIT_32_ALIGNED \
1001 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
1002 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
1003 xmm4 = _mm_setzero_si128(); \
1004 xmm6 = _mm_load_si128((__m128i *)p_y);
1006 #define SSE2_INIT_32_UNALIGNED \
1007 xmm0 = _mm_loadl_epi64((__m128i *)p_u); \
1008 xmm1 = _mm_loadl_epi64((__m128i *)p_v); \
1009 xmm4 = _mm_setzero_si128(); \
1010 xmm6 = _mm_loadu_si128((__m128i *)p_y); \
1011 _mm_prefetch(p_buffer, _MM_HINT_NTA);
1013 #define SSE2_YUV_MUL \
1014 xmm0 = _mm_unpacklo_epi8(xmm0, xmm4); \
1015 xmm1 = _mm_unpacklo_epi8(xmm1, xmm4); \
1016 xmm5 = _mm_set1_epi32(0x00800080UL); \
1017 xmm0 = _mm_subs_epi16(xmm0, xmm5); \
1018 xmm1 = _mm_subs_epi16(xmm1, xmm5); \
1019 xmm0 = _mm_slli_epi16(xmm0, 3); \
1020 xmm1 = _mm_slli_epi16(xmm1, 3); \
1021 xmm2 = xmm0; \
1022 xmm3 = xmm1; \
1023 xmm5 = _mm_set1_epi32(0xf37df37dUL); \
1024 xmm2 = _mm_mulhi_epi16(xmm2, xmm5); \
1025 xmm5 = _mm_set1_epi32(0xe5fce5fcUL); \
1026 xmm3 = _mm_mulhi_epi16(xmm3, xmm5); \
1027 xmm5 = _mm_set1_epi32(0x40934093UL); \
1028 xmm0 = _mm_mulhi_epi16(xmm0, xmm5); \
1029 xmm5 = _mm_set1_epi32(0x33123312UL); \
1030 xmm1 = _mm_mulhi_epi16(xmm1, xmm5); \
1031 xmm2 = _mm_adds_epi16(xmm2, xmm3); \
1033 xmm5 = _mm_set1_epi32(0x10101010UL); \
1034 xmm6 = _mm_subs_epu8(xmm6, xmm5); \
1035 xmm7 = xmm6; \
1036 xmm5 = _mm_set1_epi32(0x00ff00ffUL); \
1037 xmm6 = _mm_and_si128(xmm6, xmm5); \
1038 xmm7 = _mm_srli_epi16(xmm7, 8); \
1039 xmm6 = _mm_slli_epi16(xmm6, 3); \
1040 xmm7 = _mm_slli_epi16(xmm7, 3); \
1041 xmm5 = _mm_set1_epi32(0x253f253fUL); \
1042 xmm6 = _mm_mulhi_epi16(xmm6, xmm5); \
1043 xmm7 = _mm_mulhi_epi16(xmm7, xmm5);
1045 #define SSE2_YUV_ADD \
1046 xmm3 = xmm0; \
1047 xmm4 = xmm1; \
1048 xmm5 = xmm2; \
1049 xmm0 = _mm_adds_epi16(xmm0, xmm6); \
1050 xmm3 = _mm_adds_epi16(xmm3, xmm7); \
1051 xmm1 = _mm_adds_epi16(xmm1, xmm6); \
1052 xmm4 = _mm_adds_epi16(xmm4, xmm7); \
1053 xmm2 = _mm_adds_epi16(xmm2, xmm6); \
1054 xmm5 = _mm_adds_epi16(xmm5, xmm7); \
1056 xmm0 = _mm_packus_epi16(xmm0, xmm0); \
1057 xmm1 = _mm_packus_epi16(xmm1, xmm1); \
1058 xmm2 = _mm_packus_epi16(xmm2, xmm2); \
1060 xmm3 = _mm_packus_epi16(xmm3, xmm3); \
1061 xmm4 = _mm_packus_epi16(xmm4, xmm4); \
1062 xmm5 = _mm_packus_epi16(xmm5, xmm5); \
1064 xmm0 = _mm_unpacklo_epi8(xmm0, xmm3); \
1065 xmm1 = _mm_unpacklo_epi8(xmm1, xmm4); \
1066 xmm2 = _mm_unpacklo_epi8(xmm2, xmm5);
1068 #define SSE2_UNPACK_15_ALIGNED \
1069 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
1070 xmm0 = _mm_and_si128(xmm0, xmm5); \
1071 xmm0 = _mm_srli_epi16(xmm0, 3); \
1072 xmm2 = _mm_and_si128(xmm2, xmm5); \
1073 xmm1 = _mm_and_si128(xmm1, xmm5); \
1074 xmm1 = _mm_srli_epi16(xmm1, 1); \
1075 xmm4 = _mm_setzero_si128(); \
1076 xmm5 = xmm0; \
1077 xmm7 = xmm2; \
1079 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
1080 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
1081 xmm2 = _mm_slli_epi16(xmm2, 2); \
1082 xmm0 = _mm_or_si128(xmm0, xmm2); \
1083 _mm_stream_si128((__m128i*)p_buffer, xmm0); \
1085 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
1086 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
1087 xmm7 = _mm_slli_epi16(xmm7, 2); \
1088 xmm5 = _mm_or_si128(xmm5, xmm7); \
1089 _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
1091 #define SSE2_UNPACK_15_UNALIGNED \
1092 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
1093 xmm0 = _mm_and_si128(xmm0, xmm5); \
1094 xmm0 = _mm_srli_epi16(xmm0, 3); \
1095 xmm2 = _mm_and_si128(xmm2, xmm5); \
1096 xmm1 = _mm_and_si128(xmm1, xmm5); \
1097 xmm1 = _mm_srli_epi16(xmm1, 1); \
1098 xmm4 = _mm_setzero_si128(); \
1099 xmm5 = xmm0; \
1100 xmm7 = xmm2; \
1102 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
1103 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
1104 xmm2 = _mm_slli_epi16(xmm2, 2); \
1105 xmm0 = _mm_or_si128(xmm0, xmm2); \
1106 _mm_storeu_si128((__m128i*)p_buffer, xmm0); \
1108 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
1109 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
1110 xmm7 = _mm_slli_epi16(xmm7, 2); \
1111 xmm5 = _mm_or_si128(xmm5, xmm7); \
1112 _mm_storeu_si128((__m128i*)(p_buffer+16), xmm5);
1114 #define SSE2_UNPACK_16_ALIGNED \
1115 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
1116 xmm0 = _mm_and_si128(xmm0, xmm5); \
1117 xmm1 = _mm_and_si128(xmm1, xmm5); \
1118 xmm5 = _mm_set1_epi32(0xfcfcfcfcUL); \
1119 xmm2 = _mm_and_si128(xmm2, xmm5); \
1120 xmm0 = _mm_srli_epi16(xmm0, 3); \
1121 xmm4 = _mm_setzero_si128(); \
1122 xmm5 = xmm0; \
1123 xmm7 = xmm2; \
1125 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
1126 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
1127 xmm2 = _mm_slli_epi16(xmm2, 3); \
1128 xmm0 = _mm_or_si128(xmm0, xmm2); \
1129 _mm_stream_si128((__m128i*)p_buffer, xmm0); \
1131 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
1132 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
1133 xmm7 = _mm_slli_epi16(xmm7, 3); \
1134 xmm5 = _mm_or_si128(xmm5, xmm7); \
1135 _mm_stream_si128((__m128i*)(p_buffer+8), xmm5);
1137 #define SSE2_UNPACK_16_UNALIGNED \
1138 xmm5 = _mm_set1_epi32(0xf8f8f8f8UL); \
1139 xmm0 = _mm_and_si128(xmm0, xmm5); \
1140 xmm1 = _mm_and_si128(xmm1, xmm5); \
1141 xmm5 = _mm_set1_epi32(0xfcfcfcfcUL); \
1142 xmm2 = _mm_and_si128(xmm2, xmm5); \
1143 xmm0 = _mm_srli_epi16(xmm0, 3); \
1144 xmm4 = _mm_setzero_si128(); \
1145 xmm5 = xmm0; \
1146 xmm7 = xmm2; \
1148 xmm2 = _mm_unpacklo_epi8(xmm2, xmm4); \
1149 xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \
1150 xmm2 = _mm_slli_epi16(xmm2, 3); \
1151 xmm0 = _mm_or_si128(xmm0, xmm2); \
1152 _mm_storeu_si128((__m128i*)p_buffer, xmm0); \
1154 xmm7 = _mm_unpackhi_epi8(xmm7, xmm4); \
1155 xmm5 = _mm_unpackhi_epi8(xmm5, xmm1); \
1156 xmm7 = _mm_slli_epi16(xmm7, 3); \
1157 xmm5 = _mm_or_si128(xmm5, xmm7); \
1158 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5);
1160 #define SSE2_UNPACK_32_ARGB_ALIGNED \
1161 xmm3 = _mm_setzero_si128(); \
1162 xmm4 = xmm0; \
1163 xmm4 = _mm_unpacklo_epi8(xmm4, xmm2); \
1164 xmm5 = xmm1; \
1165 xmm5 = _mm_unpacklo_epi8(xmm5, xmm3); \
1166 xmm6 = xmm4; \
1167 xmm4 = _mm_unpacklo_epi16(xmm4, xmm5); \
1168 _mm_stream_si128((__m128i*)(p_buffer), xmm4); \
1169 xmm6 = _mm_unpackhi_epi16(xmm6, xmm5); \
1170 _mm_stream_si128((__m128i*)(p_buffer+4), xmm6); \
1171 xmm0 = _mm_unpackhi_epi8(xmm0, xmm2); \
1172 xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
1173 xmm5 = xmm0; \
1174 xmm5 = _mm_unpacklo_epi16(xmm5, xmm1); \
1175 _mm_stream_si128((__m128i*)(p_buffer+8), xmm5); \
1176 xmm0 = _mm_unpackhi_epi16(xmm0, xmm1); \
1177 _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
1179 #define SSE2_UNPACK_32_ARGB_UNALIGNED \
1180 xmm3 = _mm_setzero_si128(); \
1181 xmm4 = xmm0; \
1182 xmm4 = _mm_unpacklo_epi8(xmm4, xmm2); \
1183 xmm5 = xmm1; \
1184 xmm5 = _mm_unpacklo_epi8(xmm5, xmm3); \
1185 xmm6 = xmm4; \
1186 xmm4 = _mm_unpacklo_epi16(xmm4, xmm5); \
1187 _mm_storeu_si128((__m128i*)(p_buffer), xmm4); \
1188 xmm6 = _mm_unpackhi_epi16(xmm6, xmm5); \
1189 _mm_storeu_si128((__m128i*)(p_buffer+4), xmm6); \
1190 xmm0 = _mm_unpackhi_epi8(xmm0, xmm2); \
1191 xmm1 = _mm_unpackhi_epi8(xmm1, xmm3); \
1192 xmm5 = xmm0; \
1193 xmm5 = _mm_unpacklo_epi16(xmm5, xmm1); \
1194 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm5); \
1195 xmm0 = _mm_unpackhi_epi16(xmm0, xmm1); \
1196 _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
1198 #define SSE2_UNPACK_32_RGBA_ALIGNED \
1199 xmm3 = _mm_setzero_si128(); \
1200 xmm4 = xmm2; \
1201 xmm4 = _mm_unpacklo_epi8(xmm4, xmm1); \
1202 xmm3 = _mm_unpacklo_epi8(xmm3, xmm0); \
1203 xmm5 = xmm3; \
1204 xmm3 = _mm_unpacklo_epi16(xmm3, xmm4); \
1205 _mm_stream_si128((__m128i*)(p_buffer), xmm3); \
1206 xmm5 = _mm_unpackhi_epi16(xmm5, xmm4); \
1207 _mm_stream_si128((__m128i*)(p_buffer+4), xmm5); \
1208 xmm6 = _mm_setzero_si128(); \
1209 xmm2 = _mm_unpackhi_epi8(xmm2, xmm1); \
1210 xmm6 = _mm_unpackhi_epi8(xmm6, xmm0); \
1211 xmm0 = xmm6; \
1212 xmm6 = _mm_unpacklo_epi16(xmm6, xmm2); \
1213 _mm_stream_si128((__m128i*)(p_buffer+8), xmm6); \
1214 xmm0 = _mm_unpackhi_epi16(xmm0, xmm2); \
1215 _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
1217 #define SSE2_UNPACK_32_RGBA_UNALIGNED \
1218 xmm3 = _mm_setzero_si128(); \
1219 xmm4 = xmm2; \
1220 xmm4 = _mm_unpacklo_epi8(xmm4, xmm1); \
1221 xmm3 = _mm_unpacklo_epi8(xmm3, xmm0); \
1222 xmm5 = xmm3; \
1223 xmm3 = _mm_unpacklo_epi16(xmm3, xmm4); \
1224 _mm_storeu_si128((__m128i*)(p_buffer), xmm3); \
1225 xmm5 = _mm_unpackhi_epi16(xmm5, xmm4); \
1226 _mm_storeu_si128((__m128i*)(p_buffer+4), xmm5); \
1227 xmm6 = _mm_setzero_si128(); \
1228 xmm2 = _mm_unpackhi_epi8(xmm2, xmm1); \
1229 xmm6 = _mm_unpackhi_epi8(xmm6, xmm0); \
1230 xmm0 = xmm6; \
1231 xmm6 = _mm_unpacklo_epi16(xmm6, xmm2); \
1232 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm6); \
1233 xmm0 = _mm_unpackhi_epi16(xmm0, xmm2); \
1234 _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
1236 #define SSE2_UNPACK_32_BGRA_ALIGNED \
1237 xmm3 = _mm_setzero_si128(); \
1238 xmm4 = xmm2; \
1239 xmm4 = _mm_unpacklo_epi8(xmm4, xmm0); \
1240 xmm3 = _mm_unpacklo_epi8(xmm3, xmm1); \
1241 xmm5 = xmm3; \
1242 xmm3 = _mm_unpacklo_epi16(xmm3, xmm4); \
1243 _mm_stream_si128((__m128i*)(p_buffer), xmm3); \
1244 xmm5 = _mm_unpackhi_epi16(xmm5, xmm4); \
1245 _mm_stream_si128((__m128i*)(p_buffer+4), xmm5); \
1246 xmm6 = _mm_setzero_si128(); \
1247 xmm2 = _mm_unpackhi_epi8(xmm2, xmm0); \
1248 xmm6 = _mm_unpackhi_epi8(xmm6, xmm1); \
1249 xmm0 = xmm6; \
1250 xmm6 = _mm_unpacklo_epi16(xmm6, xmm2); \
1251 _mm_stream_si128((__m128i*)(p_buffer+8), xmm6); \
1252 xmm0 = _mm_unpackhi_epi16(xmm0, xmm2); \
1253 _mm_stream_si128((__m128i*)(p_buffer+12), xmm0);
1255 #define SSE2_UNPACK_32_BGRA_UNALIGNED \
1256 xmm3 = _mm_setzero_si128(); \
1257 xmm4 = xmm2; \
1258 xmm4 = _mm_unpacklo_epi8(xmm4, xmm0); \
1259 xmm3 = _mm_unpacklo_epi8(xmm3, xmm1); \
1260 xmm5 = xmm3; \
1261 xmm3 = _mm_unpacklo_epi16(xmm3, xmm4); \
1262 _mm_storeu_si128((__m128i*)(p_buffer), xmm3); \
1263 xmm5 = _mm_unpackhi_epi16(xmm5, xmm4); \
1264 _mm_storeu_si128((__m128i*)(p_buffer+4), xmm5); \
1265 xmm6 = _mm_setzero_si128(); \
1266 xmm2 = _mm_unpackhi_epi8(xmm2, xmm0); \
1267 xmm6 = _mm_unpackhi_epi8(xmm6, xmm1); \
1268 xmm0 = xmm6; \
1269 xmm6 = _mm_unpacklo_epi16(xmm6, xmm2); \
1270 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm6); \
1271 xmm0 = _mm_unpackhi_epi16(xmm0, xmm2); \
1272 _mm_storeu_si128((__m128i*)(p_buffer+12), xmm0);
1274 #define SSE2_UNPACK_32_ABGR_ALIGNED \
1275 xmm3 = _mm_setzero_si128(); \
1276 xmm4 = xmm1; \
1277 xmm4 = _mm_unpacklo_epi8(xmm4, xmm2); \
1278 xmm5 = xmm0; \
1279 xmm5 = _mm_unpacklo_epi8(xmm5, xmm3); \
1280 xmm6 = xmm4; \
1281 xmm4 = _mm_unpacklo_epi16(xmm4, xmm5); \
1282 _mm_stream_si128((__m128i*)(p_buffer), xmm4); \
1283 xmm6 = _mm_unpackhi_epi16(xmm6, xmm5); \
1284 _mm_stream_si128((__m128i*)(p_buffer+4), xmm6); \
1285 xmm1 = _mm_unpackhi_epi8(xmm1, xmm2); \
1286 xmm0 = _mm_unpackhi_epi8(xmm0, xmm3); \
1287 xmm2 = xmm1; \
1288 xmm1 = _mm_unpacklo_epi16(xmm1, xmm0); \
1289 _mm_stream_si128((__m128i*)(p_buffer+8), xmm1); \
1290 xmm2 = _mm_unpackhi_epi16(xmm2, xmm0); \
1291 _mm_stream_si128((__m128i*)(p_buffer+12), xmm2);
1293 #define SSE2_UNPACK_32_ABGR_UNALIGNED \
1294 xmm3 = _mm_setzero_si128(); \
1295 xmm4 = xmm1; \
1296 xmm4 = _mm_unpacklo_epi8(xmm4, xmm2); \
1297 xmm5 = xmm0; \
1298 xmm5 = _mm_unpacklo_epi8(xmm5, xmm3); \
1299 xmm6 = xmm4; \
1300 xmm4 = _mm_unpacklo_epi16(xmm4, xmm5); \
1301 _mm_storeu_si128((__m128i*)(p_buffer), xmm4); \
1302 xmm6 = _mm_unpackhi_epi16(xmm6, xmm5); \
1303 _mm_storeu_si128((__m128i*)(p_buffer+4), xmm6); \
1304 xmm1 = _mm_unpackhi_epi8(xmm1, xmm2); \
1305 xmm0 = _mm_unpackhi_epi8(xmm0, xmm3); \
1306 xmm2 = xmm1; \
1307 xmm1 = _mm_unpacklo_epi16(xmm1, xmm0); \
1308 _mm_storeu_si128((__m128i*)(p_buffer+8), xmm1); \
1309 xmm2 = _mm_unpackhi_epi16(xmm2, xmm0); \
1310 _mm_storeu_si128((__m128i*)(p_buffer+12), xmm2);
1312 #endif
1314 #endif