vo_xv: Fix context Shminfo table size
[mplayer.git] / libswscale / swscale_template.c
blob4562866b7b60debad9077f50104d7c15e9d78445
1 /*
2 * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 * The C code (not assembly, MMX, ...) of this file can be used
21 * under the LGPL license.
24 #undef REAL_MOVNTQ
25 #undef MOVNTQ
26 #undef PAVGB
27 #undef PREFETCH
28 #undef PREFETCHW
30 #if HAVE_AMD3DNOW
31 #define PREFETCH "prefetch"
32 #define PREFETCHW "prefetchw"
33 #elif HAVE_MMX2
34 #define PREFETCH "prefetchnta"
35 #define PREFETCHW "prefetcht0"
36 #else
37 #define PREFETCH " # nop"
38 #define PREFETCHW " # nop"
39 #endif
41 #if HAVE_MMX2
42 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
43 #elif HAVE_AMD3DNOW
44 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
45 #endif
47 #if HAVE_MMX2
48 #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
49 #else
50 #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
51 #endif
52 #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
54 #if HAVE_ALTIVEC
55 #include "ppc/swscale_altivec_template.c"
56 #endif
58 #define YSCALEYUV2YV12X(x, offset, dest, width) \
59 __asm__ volatile(\
60 "xor %%"REG_a", %%"REG_a" \n\t"\
61 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
62 "movq %%mm3, %%mm4 \n\t"\
63 "lea " offset "(%0), %%"REG_d" \n\t"\
64 "mov (%%"REG_d"), %%"REG_S" \n\t"\
65 ASMALIGN(4) /* FIXME Unroll? */\
66 "1: \n\t"\
67 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
68 "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
69 "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* srcData */\
70 "add $16, %%"REG_d" \n\t"\
71 "mov (%%"REG_d"), %%"REG_S" \n\t"\
72 "test %%"REG_S", %%"REG_S" \n\t"\
73 "pmulhw %%mm0, %%mm2 \n\t"\
74 "pmulhw %%mm0, %%mm5 \n\t"\
75 "paddw %%mm2, %%mm3 \n\t"\
76 "paddw %%mm5, %%mm4 \n\t"\
77 " jnz 1b \n\t"\
78 "psraw $3, %%mm3 \n\t"\
79 "psraw $3, %%mm4 \n\t"\
80 "packuswb %%mm4, %%mm3 \n\t"\
81 MOVNTQ(%%mm3, (%1, %%REGa))\
82 "add $8, %%"REG_a" \n\t"\
83 "cmp %2, %%"REG_a" \n\t"\
84 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
85 "movq %%mm3, %%mm4 \n\t"\
86 "lea " offset "(%0), %%"REG_d" \n\t"\
87 "mov (%%"REG_d"), %%"REG_S" \n\t"\
88 "jb 1b \n\t"\
89 :: "r" (&c->redDither),\
90 "r" (dest), "g" (width)\
91 : "%"REG_a, "%"REG_d, "%"REG_S\
94 #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
95 __asm__ volatile(\
96 "lea " offset "(%0), %%"REG_d" \n\t"\
97 "xor %%"REG_a", %%"REG_a" \n\t"\
98 "pxor %%mm4, %%mm4 \n\t"\
99 "pxor %%mm5, %%mm5 \n\t"\
100 "pxor %%mm6, %%mm6 \n\t"\
101 "pxor %%mm7, %%mm7 \n\t"\
102 "mov (%%"REG_d"), %%"REG_S" \n\t"\
103 ASMALIGN(4) \
104 "1: \n\t"\
105 "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* srcData */\
106 "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
107 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
108 "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm1 \n\t" /* srcData */\
109 "movq %%mm0, %%mm3 \n\t"\
110 "punpcklwd %%mm1, %%mm0 \n\t"\
111 "punpckhwd %%mm1, %%mm3 \n\t"\
112 "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
113 "pmaddwd %%mm1, %%mm0 \n\t"\
114 "pmaddwd %%mm1, %%mm3 \n\t"\
115 "paddd %%mm0, %%mm4 \n\t"\
116 "paddd %%mm3, %%mm5 \n\t"\
117 "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* srcData */\
118 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
119 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
120 "test %%"REG_S", %%"REG_S" \n\t"\
121 "movq %%mm2, %%mm0 \n\t"\
122 "punpcklwd %%mm3, %%mm2 \n\t"\
123 "punpckhwd %%mm3, %%mm0 \n\t"\
124 "pmaddwd %%mm1, %%mm2 \n\t"\
125 "pmaddwd %%mm1, %%mm0 \n\t"\
126 "paddd %%mm2, %%mm6 \n\t"\
127 "paddd %%mm0, %%mm7 \n\t"\
128 " jnz 1b \n\t"\
129 "psrad $16, %%mm4 \n\t"\
130 "psrad $16, %%mm5 \n\t"\
131 "psrad $16, %%mm6 \n\t"\
132 "psrad $16, %%mm7 \n\t"\
133 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
134 "packssdw %%mm5, %%mm4 \n\t"\
135 "packssdw %%mm7, %%mm6 \n\t"\
136 "paddw %%mm0, %%mm4 \n\t"\
137 "paddw %%mm0, %%mm6 \n\t"\
138 "psraw $3, %%mm4 \n\t"\
139 "psraw $3, %%mm6 \n\t"\
140 "packuswb %%mm6, %%mm4 \n\t"\
141 MOVNTQ(%%mm4, (%1, %%REGa))\
142 "add $8, %%"REG_a" \n\t"\
143 "cmp %2, %%"REG_a" \n\t"\
144 "lea " offset "(%0), %%"REG_d" \n\t"\
145 "pxor %%mm4, %%mm4 \n\t"\
146 "pxor %%mm5, %%mm5 \n\t"\
147 "pxor %%mm6, %%mm6 \n\t"\
148 "pxor %%mm7, %%mm7 \n\t"\
149 "mov (%%"REG_d"), %%"REG_S" \n\t"\
150 "jb 1b \n\t"\
151 :: "r" (&c->redDither),\
152 "r" (dest), "g" (width)\
153 : "%"REG_a, "%"REG_d, "%"REG_S\
156 #define YSCALEYUV2YV121 \
157 "mov %2, %%"REG_a" \n\t"\
158 ASMALIGN(4) /* FIXME Unroll? */\
159 "1: \n\t"\
160 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
161 "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
162 "psraw $7, %%mm0 \n\t"\
163 "psraw $7, %%mm1 \n\t"\
164 "packuswb %%mm1, %%mm0 \n\t"\
165 MOVNTQ(%%mm0, (%1, %%REGa))\
166 "add $8, %%"REG_a" \n\t"\
167 "jnc 1b \n\t"
169 #define YSCALEYUV2YV121_ACCURATE \
170 "mov %2, %%"REG_a" \n\t"\
171 "pcmpeqw %%mm7, %%mm7 \n\t"\
172 "psrlw $15, %%mm7 \n\t"\
173 "psllw $6, %%mm7 \n\t"\
174 ASMALIGN(4) /* FIXME Unroll? */\
175 "1: \n\t"\
176 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
177 "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
178 "paddsw %%mm7, %%mm0 \n\t"\
179 "paddsw %%mm7, %%mm1 \n\t"\
180 "psraw $7, %%mm0 \n\t"\
181 "psraw $7, %%mm1 \n\t"\
182 "packuswb %%mm1, %%mm0 \n\t"\
183 MOVNTQ(%%mm0, (%1, %%REGa))\
184 "add $8, %%"REG_a" \n\t"\
185 "jnc 1b \n\t"
188 :: "m" (-lumFilterSize), "m" (-chrFilterSize),
189 "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
190 "r" (dest), "m" (dstW),
191 "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
192 : "%eax", "%ebx", "%ecx", "%edx", "%esi"
194 #define YSCALEYUV2PACKEDX_UV \
195 __asm__ volatile(\
196 "xor %%"REG_a", %%"REG_a" \n\t"\
197 ASMALIGN(4)\
198 "nop \n\t"\
199 "1: \n\t"\
200 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
201 "mov (%%"REG_d"), %%"REG_S" \n\t"\
202 "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
203 "movq %%mm3, %%mm4 \n\t"\
204 ASMALIGN(4)\
205 "2: \n\t"\
206 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
207 "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
208 "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
209 "add $16, %%"REG_d" \n\t"\
210 "mov (%%"REG_d"), %%"REG_S" \n\t"\
211 "pmulhw %%mm0, %%mm2 \n\t"\
212 "pmulhw %%mm0, %%mm5 \n\t"\
213 "paddw %%mm2, %%mm3 \n\t"\
214 "paddw %%mm5, %%mm4 \n\t"\
215 "test %%"REG_S", %%"REG_S" \n\t"\
216 " jnz 2b \n\t"\
218 #define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
219 "lea "offset"(%0), %%"REG_d" \n\t"\
220 "mov (%%"REG_d"), %%"REG_S" \n\t"\
221 "movq "VROUNDER_OFFSET"(%0), "#dst1" \n\t"\
222 "movq "#dst1", "#dst2" \n\t"\
223 ASMALIGN(4)\
224 "2: \n\t"\
225 "movq 8(%%"REG_d"), "#coeff" \n\t" /* filterCoeff */\
226 "movq (%%"REG_S", %%"REG_a", 2), "#src1" \n\t" /* Y1srcData */\
227 "movq 8(%%"REG_S", %%"REG_a", 2), "#src2" \n\t" /* Y2srcData */\
228 "add $16, %%"REG_d" \n\t"\
229 "mov (%%"REG_d"), %%"REG_S" \n\t"\
230 "pmulhw "#coeff", "#src1" \n\t"\
231 "pmulhw "#coeff", "#src2" \n\t"\
232 "paddw "#src1", "#dst1" \n\t"\
233 "paddw "#src2", "#dst2" \n\t"\
234 "test %%"REG_S", %%"REG_S" \n\t"\
235 " jnz 2b \n\t"\
237 #define YSCALEYUV2PACKEDX \
238 YSCALEYUV2PACKEDX_UV \
239 YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
241 #define YSCALEYUV2PACKEDX_END \
242 :: "r" (&c->redDither), \
243 "m" (dummy), "m" (dummy), "m" (dummy),\
244 "r" (dest), "m" (dstW) \
245 : "%"REG_a, "%"REG_d, "%"REG_S \
248 #define YSCALEYUV2PACKEDX_ACCURATE_UV \
249 __asm__ volatile(\
250 "xor %%"REG_a", %%"REG_a" \n\t"\
251 ASMALIGN(4)\
252 "nop \n\t"\
253 "1: \n\t"\
254 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
255 "mov (%%"REG_d"), %%"REG_S" \n\t"\
256 "pxor %%mm4, %%mm4 \n\t"\
257 "pxor %%mm5, %%mm5 \n\t"\
258 "pxor %%mm6, %%mm6 \n\t"\
259 "pxor %%mm7, %%mm7 \n\t"\
260 ASMALIGN(4)\
261 "2: \n\t"\
262 "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
263 "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
264 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
265 "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
266 "movq %%mm0, %%mm3 \n\t"\
267 "punpcklwd %%mm1, %%mm0 \n\t"\
268 "punpckhwd %%mm1, %%mm3 \n\t"\
269 "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" /* filterCoeff */\
270 "pmaddwd %%mm1, %%mm0 \n\t"\
271 "pmaddwd %%mm1, %%mm3 \n\t"\
272 "paddd %%mm0, %%mm4 \n\t"\
273 "paddd %%mm3, %%mm5 \n\t"\
274 "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
275 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
276 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
277 "test %%"REG_S", %%"REG_S" \n\t"\
278 "movq %%mm2, %%mm0 \n\t"\
279 "punpcklwd %%mm3, %%mm2 \n\t"\
280 "punpckhwd %%mm3, %%mm0 \n\t"\
281 "pmaddwd %%mm1, %%mm2 \n\t"\
282 "pmaddwd %%mm1, %%mm0 \n\t"\
283 "paddd %%mm2, %%mm6 \n\t"\
284 "paddd %%mm0, %%mm7 \n\t"\
285 " jnz 2b \n\t"\
286 "psrad $16, %%mm4 \n\t"\
287 "psrad $16, %%mm5 \n\t"\
288 "psrad $16, %%mm6 \n\t"\
289 "psrad $16, %%mm7 \n\t"\
290 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
291 "packssdw %%mm5, %%mm4 \n\t"\
292 "packssdw %%mm7, %%mm6 \n\t"\
293 "paddw %%mm0, %%mm4 \n\t"\
294 "paddw %%mm0, %%mm6 \n\t"\
295 "movq %%mm4, "U_TEMP"(%0) \n\t"\
296 "movq %%mm6, "V_TEMP"(%0) \n\t"\
298 #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
299 "lea "offset"(%0), %%"REG_d" \n\t"\
300 "mov (%%"REG_d"), %%"REG_S" \n\t"\
301 "pxor %%mm1, %%mm1 \n\t"\
302 "pxor %%mm5, %%mm5 \n\t"\
303 "pxor %%mm7, %%mm7 \n\t"\
304 "pxor %%mm6, %%mm6 \n\t"\
305 ASMALIGN(4)\
306 "2: \n\t"\
307 "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
308 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
309 "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
310 "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
311 "movq %%mm0, %%mm3 \n\t"\
312 "punpcklwd %%mm4, %%mm0 \n\t"\
313 "punpckhwd %%mm4, %%mm3 \n\t"\
314 "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
315 "pmaddwd %%mm4, %%mm0 \n\t"\
316 "pmaddwd %%mm4, %%mm3 \n\t"\
317 "paddd %%mm0, %%mm1 \n\t"\
318 "paddd %%mm3, %%mm5 \n\t"\
319 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
320 "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
321 "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
322 "test %%"REG_S", %%"REG_S" \n\t"\
323 "movq %%mm2, %%mm0 \n\t"\
324 "punpcklwd %%mm3, %%mm2 \n\t"\
325 "punpckhwd %%mm3, %%mm0 \n\t"\
326 "pmaddwd %%mm4, %%mm2 \n\t"\
327 "pmaddwd %%mm4, %%mm0 \n\t"\
328 "paddd %%mm2, %%mm7 \n\t"\
329 "paddd %%mm0, %%mm6 \n\t"\
330 " jnz 2b \n\t"\
331 "psrad $16, %%mm1 \n\t"\
332 "psrad $16, %%mm5 \n\t"\
333 "psrad $16, %%mm7 \n\t"\
334 "psrad $16, %%mm6 \n\t"\
335 "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
336 "packssdw %%mm5, %%mm1 \n\t"\
337 "packssdw %%mm6, %%mm7 \n\t"\
338 "paddw %%mm0, %%mm1 \n\t"\
339 "paddw %%mm0, %%mm7 \n\t"\
340 "movq "U_TEMP"(%0), %%mm3 \n\t"\
341 "movq "V_TEMP"(%0), %%mm4 \n\t"\
343 #define YSCALEYUV2PACKEDX_ACCURATE \
344 YSCALEYUV2PACKEDX_ACCURATE_UV \
345 YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
347 #define YSCALEYUV2RGBX \
348 "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
349 "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
350 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
351 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
352 "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
353 "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
354 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
355 "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
356 "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
357 "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
358 "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
359 "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
360 "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
361 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
362 "paddw %%mm3, %%mm4 \n\t"\
363 "movq %%mm2, %%mm0 \n\t"\
364 "movq %%mm5, %%mm6 \n\t"\
365 "movq %%mm4, %%mm3 \n\t"\
366 "punpcklwd %%mm2, %%mm2 \n\t"\
367 "punpcklwd %%mm5, %%mm5 \n\t"\
368 "punpcklwd %%mm4, %%mm4 \n\t"\
369 "paddw %%mm1, %%mm2 \n\t"\
370 "paddw %%mm1, %%mm5 \n\t"\
371 "paddw %%mm1, %%mm4 \n\t"\
372 "punpckhwd %%mm0, %%mm0 \n\t"\
373 "punpckhwd %%mm6, %%mm6 \n\t"\
374 "punpckhwd %%mm3, %%mm3 \n\t"\
375 "paddw %%mm7, %%mm0 \n\t"\
376 "paddw %%mm7, %%mm6 \n\t"\
377 "paddw %%mm7, %%mm3 \n\t"\
378 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
379 "packuswb %%mm0, %%mm2 \n\t"\
380 "packuswb %%mm6, %%mm5 \n\t"\
381 "packuswb %%mm3, %%mm4 \n\t"\
383 #define REAL_YSCALEYUV2PACKED(index, c) \
384 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
385 "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
386 "psraw $3, %%mm0 \n\t"\
387 "psraw $3, %%mm1 \n\t"\
388 "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
389 "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
390 "xor "#index", "#index" \n\t"\
391 ASMALIGN(4)\
392 "1: \n\t"\
393 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
394 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
395 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
396 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
397 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
398 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
399 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
400 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
401 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
402 "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
403 "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
404 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
405 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
406 "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
407 "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
408 "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
409 "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
410 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
411 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
412 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
413 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
414 "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
415 "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
416 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
417 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
419 #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
421 #define REAL_YSCALEYUV2RGB_UV(index, c) \
422 "xor "#index", "#index" \n\t"\
423 ASMALIGN(4)\
424 "1: \n\t"\
425 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
426 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
427 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
428 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
429 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
430 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
431 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
432 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
433 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
434 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
435 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
436 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
437 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
438 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
439 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
440 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
441 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
442 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
443 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
444 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
446 #define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
447 "movq ("#b1", "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
448 "movq ("#b2", "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
449 "movq 8("#b1", "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
450 "movq 8("#b2", "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
451 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
452 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
453 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
454 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
455 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
456 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
457 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
458 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
460 #define REAL_YSCALEYUV2RGB_COEFF(c) \
461 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
462 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
463 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
464 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
465 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
466 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
467 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
468 "paddw %%mm3, %%mm4 \n\t"\
469 "movq %%mm2, %%mm0 \n\t"\
470 "movq %%mm5, %%mm6 \n\t"\
471 "movq %%mm4, %%mm3 \n\t"\
472 "punpcklwd %%mm2, %%mm2 \n\t"\
473 "punpcklwd %%mm5, %%mm5 \n\t"\
474 "punpcklwd %%mm4, %%mm4 \n\t"\
475 "paddw %%mm1, %%mm2 \n\t"\
476 "paddw %%mm1, %%mm5 \n\t"\
477 "paddw %%mm1, %%mm4 \n\t"\
478 "punpckhwd %%mm0, %%mm0 \n\t"\
479 "punpckhwd %%mm6, %%mm6 \n\t"\
480 "punpckhwd %%mm3, %%mm3 \n\t"\
481 "paddw %%mm7, %%mm0 \n\t"\
482 "paddw %%mm7, %%mm6 \n\t"\
483 "paddw %%mm7, %%mm3 \n\t"\
484 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
485 "packuswb %%mm0, %%mm2 \n\t"\
486 "packuswb %%mm6, %%mm5 \n\t"\
487 "packuswb %%mm3, %%mm4 \n\t"\
489 #define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
491 #define YSCALEYUV2RGB(index, c) \
492 REAL_YSCALEYUV2RGB_UV(index, c) \
493 REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
494 REAL_YSCALEYUV2RGB_COEFF(c)
496 #define REAL_YSCALEYUV2PACKED1(index, c) \
497 "xor "#index", "#index" \n\t"\
498 ASMALIGN(4)\
499 "1: \n\t"\
500 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
501 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
502 "psraw $7, %%mm3 \n\t" \
503 "psraw $7, %%mm4 \n\t" \
504 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
505 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
506 "psraw $7, %%mm1 \n\t" \
507 "psraw $7, %%mm7 \n\t" \
509 #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
511 #define REAL_YSCALEYUV2RGB1(index, c) \
512 "xor "#index", "#index" \n\t"\
513 ASMALIGN(4)\
514 "1: \n\t"\
515 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
516 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
517 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
518 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
519 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
520 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
521 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
522 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
523 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
524 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
525 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
526 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
527 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
528 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
529 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
530 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
531 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
532 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
533 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
534 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
535 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
536 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
537 "paddw %%mm3, %%mm4 \n\t"\
538 "movq %%mm2, %%mm0 \n\t"\
539 "movq %%mm5, %%mm6 \n\t"\
540 "movq %%mm4, %%mm3 \n\t"\
541 "punpcklwd %%mm2, %%mm2 \n\t"\
542 "punpcklwd %%mm5, %%mm5 \n\t"\
543 "punpcklwd %%mm4, %%mm4 \n\t"\
544 "paddw %%mm1, %%mm2 \n\t"\
545 "paddw %%mm1, %%mm5 \n\t"\
546 "paddw %%mm1, %%mm4 \n\t"\
547 "punpckhwd %%mm0, %%mm0 \n\t"\
548 "punpckhwd %%mm6, %%mm6 \n\t"\
549 "punpckhwd %%mm3, %%mm3 \n\t"\
550 "paddw %%mm7, %%mm0 \n\t"\
551 "paddw %%mm7, %%mm6 \n\t"\
552 "paddw %%mm7, %%mm3 \n\t"\
553 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
554 "packuswb %%mm0, %%mm2 \n\t"\
555 "packuswb %%mm6, %%mm5 \n\t"\
556 "packuswb %%mm3, %%mm4 \n\t"\
558 #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
560 #define REAL_YSCALEYUV2PACKED1b(index, c) \
561 "xor "#index", "#index" \n\t"\
562 ASMALIGN(4)\
563 "1: \n\t"\
564 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
565 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
566 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
567 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
568 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
569 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
570 "psrlw $8, %%mm3 \n\t" \
571 "psrlw $8, %%mm4 \n\t" \
572 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
573 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
574 "psraw $7, %%mm1 \n\t" \
575 "psraw $7, %%mm7 \n\t"
576 #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
578 // do vertical chrominance interpolation
579 #define REAL_YSCALEYUV2RGB1b(index, c) \
580 "xor "#index", "#index" \n\t"\
581 ASMALIGN(4)\
582 "1: \n\t"\
583 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
584 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
585 "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
586 "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
587 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
588 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
589 "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
590 "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
591 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
592 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
593 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
594 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
595 "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
596 "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
597 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
598 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
599 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
600 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
601 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
602 "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
603 "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
604 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
605 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
606 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
607 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
608 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
609 "paddw %%mm3, %%mm4 \n\t"\
610 "movq %%mm2, %%mm0 \n\t"\
611 "movq %%mm5, %%mm6 \n\t"\
612 "movq %%mm4, %%mm3 \n\t"\
613 "punpcklwd %%mm2, %%mm2 \n\t"\
614 "punpcklwd %%mm5, %%mm5 \n\t"\
615 "punpcklwd %%mm4, %%mm4 \n\t"\
616 "paddw %%mm1, %%mm2 \n\t"\
617 "paddw %%mm1, %%mm5 \n\t"\
618 "paddw %%mm1, %%mm4 \n\t"\
619 "punpckhwd %%mm0, %%mm0 \n\t"\
620 "punpckhwd %%mm6, %%mm6 \n\t"\
621 "punpckhwd %%mm3, %%mm3 \n\t"\
622 "paddw %%mm7, %%mm0 \n\t"\
623 "paddw %%mm7, %%mm6 \n\t"\
624 "paddw %%mm7, %%mm3 \n\t"\
625 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
626 "packuswb %%mm0, %%mm2 \n\t"\
627 "packuswb %%mm6, %%mm5 \n\t"\
628 "packuswb %%mm3, %%mm4 \n\t"\
630 #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
632 #define REAL_YSCALEYUV2RGB1_ALPHA(index) \
633 "movq (%1, "#index", 2), %%mm7 \n\t" /* abuf0[index ] */\
634 "movq 8(%1, "#index", 2), %%mm1 \n\t" /* abuf0[index+4] */\
635 "psraw $7, %%mm7 \n\t" /* abuf0[index ] >>7 */\
636 "psraw $7, %%mm1 \n\t" /* abuf0[index+4] >>7 */\
637 "packuswb %%mm1, %%mm7 \n\t"
638 #define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
640 #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
641 "movq "#b", "#q2" \n\t" /* B */\
642 "movq "#r", "#t" \n\t" /* R */\
643 "punpcklbw "#g", "#b" \n\t" /* GBGBGBGB 0 */\
644 "punpcklbw "#a", "#r" \n\t" /* ARARARAR 0 */\
645 "punpckhbw "#g", "#q2" \n\t" /* GBGBGBGB 2 */\
646 "punpckhbw "#a", "#t" \n\t" /* ARARARAR 2 */\
647 "movq "#b", "#q0" \n\t" /* GBGBGBGB 0 */\
648 "movq "#q2", "#q3" \n\t" /* GBGBGBGB 2 */\
649 "punpcklwd "#r", "#q0" \n\t" /* ARGBARGB 0 */\
650 "punpckhwd "#r", "#b" \n\t" /* ARGBARGB 1 */\
651 "punpcklwd "#t", "#q2" \n\t" /* ARGBARGB 2 */\
652 "punpckhwd "#t", "#q3" \n\t" /* ARGBARGB 3 */\
654 MOVNTQ( q0, (dst, index, 4))\
655 MOVNTQ( b, 8(dst, index, 4))\
656 MOVNTQ( q2, 16(dst, index, 4))\
657 MOVNTQ( q3, 24(dst, index, 4))\
659 "add $8, "#index" \n\t"\
660 "cmp "#dstw", "#index" \n\t"\
661 " jb 1b \n\t"
662 #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
664 #define REAL_WRITERGB16(dst, dstw, index) \
665 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
666 "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
667 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
668 "psrlq $3, %%mm2 \n\t"\
670 "movq %%mm2, %%mm1 \n\t"\
671 "movq %%mm4, %%mm3 \n\t"\
673 "punpcklbw %%mm7, %%mm3 \n\t"\
674 "punpcklbw %%mm5, %%mm2 \n\t"\
675 "punpckhbw %%mm7, %%mm4 \n\t"\
676 "punpckhbw %%mm5, %%mm1 \n\t"\
678 "psllq $3, %%mm3 \n\t"\
679 "psllq $3, %%mm4 \n\t"\
681 "por %%mm3, %%mm2 \n\t"\
682 "por %%mm4, %%mm1 \n\t"\
684 MOVNTQ(%%mm2, (dst, index, 2))\
685 MOVNTQ(%%mm1, 8(dst, index, 2))\
687 "add $8, "#index" \n\t"\
688 "cmp "#dstw", "#index" \n\t"\
689 " jb 1b \n\t"
690 #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
692 #define REAL_WRITERGB15(dst, dstw, index) \
693 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
694 "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
695 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
696 "psrlq $3, %%mm2 \n\t"\
697 "psrlq $1, %%mm5 \n\t"\
699 "movq %%mm2, %%mm1 \n\t"\
700 "movq %%mm4, %%mm3 \n\t"\
702 "punpcklbw %%mm7, %%mm3 \n\t"\
703 "punpcklbw %%mm5, %%mm2 \n\t"\
704 "punpckhbw %%mm7, %%mm4 \n\t"\
705 "punpckhbw %%mm5, %%mm1 \n\t"\
707 "psllq $2, %%mm3 \n\t"\
708 "psllq $2, %%mm4 \n\t"\
710 "por %%mm3, %%mm2 \n\t"\
711 "por %%mm4, %%mm1 \n\t"\
713 MOVNTQ(%%mm2, (dst, index, 2))\
714 MOVNTQ(%%mm1, 8(dst, index, 2))\
716 "add $8, "#index" \n\t"\
717 "cmp "#dstw", "#index" \n\t"\
718 " jb 1b \n\t"
719 #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
721 #define WRITEBGR24OLD(dst, dstw, index) \
722 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
723 "movq %%mm2, %%mm1 \n\t" /* B */\
724 "movq %%mm5, %%mm6 \n\t" /* R */\
725 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
726 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
727 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
728 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
729 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
730 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
731 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
732 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
733 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
734 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
736 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
737 "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
738 "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 0 */\
739 "pand "MANGLE(bm11111000)", %%mm0 \n\t" /* 00RGB000 0.5 */\
740 "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
741 "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
742 "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
743 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
745 "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
746 "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
747 "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
748 "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
749 "pand "MANGLE(bm00001111)", %%mm2 \n\t" /* 0000RGBR 1 */\
750 "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
751 "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
752 "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 2 */\
753 "pand "MANGLE(bm11111000)", %%mm1 \n\t" /* 00RGB000 2.5 */\
754 "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
755 "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
756 "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
757 "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
759 "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
760 "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
761 "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
762 "pand "MANGLE(bm00000111)", %%mm5 \n\t" /* 00000RGB 3 */\
763 "pand "MANGLE(bm11111000)", %%mm3 \n\t" /* 00RGB000 3.5 */\
764 "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
765 "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
766 "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
768 MOVNTQ(%%mm0, (dst))\
769 MOVNTQ(%%mm2, 8(dst))\
770 MOVNTQ(%%mm3, 16(dst))\
771 "add $24, "#dst" \n\t"\
773 "add $8, "#index" \n\t"\
774 "cmp "#dstw", "#index" \n\t"\
775 " jb 1b \n\t"
777 #define WRITEBGR24MMX(dst, dstw, index) \
778 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
779 "movq %%mm2, %%mm1 \n\t" /* B */\
780 "movq %%mm5, %%mm6 \n\t" /* R */\
781 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
782 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
783 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
784 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
785 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
786 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
787 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
788 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
789 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
790 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
792 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
793 "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
794 "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
795 "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
797 "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
798 "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
799 "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
800 "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
802 "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
803 "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
804 "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
805 "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
807 "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
808 "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
809 "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
810 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
811 MOVNTQ(%%mm0, (dst))\
813 "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
814 "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
815 "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
816 "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
817 MOVNTQ(%%mm6, 8(dst))\
819 "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
820 "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
821 "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
822 MOVNTQ(%%mm5, 16(dst))\
824 "add $24, "#dst" \n\t"\
826 "add $8, "#index" \n\t"\
827 "cmp "#dstw", "#index" \n\t"\
828 " jb 1b \n\t"
830 #define WRITEBGR24MMX2(dst, dstw, index) \
831 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
832 "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
833 "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
834 "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
835 "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
836 "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
838 "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
839 "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
840 "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
842 "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
843 "por %%mm1, %%mm6 \n\t"\
844 "por %%mm3, %%mm6 \n\t"\
845 MOVNTQ(%%mm6, (dst))\
847 "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
848 "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
849 "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
850 "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
852 "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
853 "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
854 "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
856 "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
857 "por %%mm3, %%mm6 \n\t"\
858 MOVNTQ(%%mm6, 8(dst))\
860 "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
861 "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
862 "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
864 "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
865 "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
866 "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
868 "por %%mm1, %%mm3 \n\t"\
869 "por %%mm3, %%mm6 \n\t"\
870 MOVNTQ(%%mm6, 16(dst))\
872 "add $24, "#dst" \n\t"\
874 "add $8, "#index" \n\t"\
875 "cmp "#dstw", "#index" \n\t"\
876 " jb 1b \n\t"
878 #if HAVE_MMX2
879 #undef WRITEBGR24
880 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
881 #else
882 #undef WRITEBGR24
883 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
884 #endif
886 #define REAL_WRITEYUY2(dst, dstw, index) \
887 "packuswb %%mm3, %%mm3 \n\t"\
888 "packuswb %%mm4, %%mm4 \n\t"\
889 "packuswb %%mm7, %%mm1 \n\t"\
890 "punpcklbw %%mm4, %%mm3 \n\t"\
891 "movq %%mm1, %%mm7 \n\t"\
892 "punpcklbw %%mm3, %%mm1 \n\t"\
893 "punpckhbw %%mm3, %%mm7 \n\t"\
895 MOVNTQ(%%mm1, (dst, index, 2))\
896 MOVNTQ(%%mm7, 8(dst, index, 2))\
898 "add $8, "#index" \n\t"\
899 "cmp "#dstw", "#index" \n\t"\
900 " jb 1b \n\t"
901 #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
904 static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
905 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, int16_t **alpSrc,
906 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
908 #if HAVE_MMX
909 if(!(c->flags & SWS_BITEXACT)){
910 if (c->flags & SWS_ACCURATE_RND){
911 if (uDest){
912 YSCALEYUV2YV12X_ACCURATE( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
913 YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
915 if (CONFIG_SWSCALE_ALPHA && aDest){
916 YSCALEYUV2YV12X_ACCURATE( "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
919 YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
920 }else{
921 if (uDest){
922 YSCALEYUV2YV12X( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
923 YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
925 if (CONFIG_SWSCALE_ALPHA && aDest){
926 YSCALEYUV2YV12X( "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
929 YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
931 return;
933 #endif
934 #if HAVE_ALTIVEC
935 yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
936 chrFilter, chrSrc, chrFilterSize,
937 dest, uDest, vDest, dstW, chrDstW);
938 #else //HAVE_ALTIVEC
939 yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
940 chrFilter, chrSrc, chrFilterSize,
941 alpSrc, dest, uDest, vDest, aDest, dstW, chrDstW);
942 #endif //!HAVE_ALTIVEC
945 static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
946 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
947 uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
949 yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
950 chrFilter, chrSrc, chrFilterSize,
951 dest, uDest, dstW, chrDstW, dstFormat);
954 static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chrSrc, int16_t *alpSrc,
955 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
957 int i;
958 #if HAVE_MMX
959 if(!(c->flags & SWS_BITEXACT)){
960 long p= 4;
961 uint8_t *src[4]= {alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
962 uint8_t *dst[4]= {aDest, dest, uDest, vDest};
963 x86_reg counter[4]= {dstW, dstW, chrDstW, chrDstW};
965 if (c->flags & SWS_ACCURATE_RND){
966 while(p--){
967 if (dst[p]){
968 __asm__ volatile(
969 YSCALEYUV2YV121_ACCURATE
970 :: "r" (src[p]), "r" (dst[p] + counter[p]),
971 "g" (-counter[p])
972 : "%"REG_a
976 }else{
977 while(p--){
978 if (dst[p]){
979 __asm__ volatile(
980 YSCALEYUV2YV121
981 :: "r" (src[p]), "r" (dst[p] + counter[p]),
982 "g" (-counter[p])
983 : "%"REG_a
988 return;
990 #endif
991 for (i=0; i<dstW; i++)
993 int val= (lumSrc[i]+64)>>7;
995 if (val&256){
996 if (val<0) val=0;
997 else val=255;
1000 dest[i]= val;
1003 if (uDest)
1004 for (i=0; i<chrDstW; i++)
1006 int u=(chrSrc[i ]+64)>>7;
1007 int v=(chrSrc[i + VOFW]+64)>>7;
1009 if ((u|v)&256){
1010 if (u<0) u=0;
1011 else if (u>255) u=255;
1012 if (v<0) v=0;
1013 else if (v>255) v=255;
1016 uDest[i]= u;
1017 vDest[i]= v;
1020 if (CONFIG_SWSCALE_ALPHA && aDest)
1021 for (i=0; i<dstW; i++){
1022 int val= (alpSrc[i]+64)>>7;
1023 aDest[i]= av_clip_uint8(val);
1029 * vertical scale YV12 to RGB
1031 static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
1032 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
1033 int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
1035 #if HAVE_MMX
1036 x86_reg dummy=0;
1037 if(!(c->flags & SWS_BITEXACT)){
1038 if (c->flags & SWS_ACCURATE_RND){
1039 switch(c->dstFormat){
1040 case PIX_FMT_RGB32:
1041 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
1042 YSCALEYUV2PACKEDX_ACCURATE
1043 YSCALEYUV2RGBX
1044 "movq %%mm2, "U_TEMP"(%0) \n\t"
1045 "movq %%mm4, "V_TEMP"(%0) \n\t"
1046 "movq %%mm5, "Y_TEMP"(%0) \n\t"
1047 YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
1048 "movq "Y_TEMP"(%0), %%mm5 \n\t"
1049 "psraw $3, %%mm1 \n\t"
1050 "psraw $3, %%mm7 \n\t"
1051 "packuswb %%mm7, %%mm1 \n\t"
1052 WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
1054 YSCALEYUV2PACKEDX_END
1055 }else{
1056 YSCALEYUV2PACKEDX_ACCURATE
1057 YSCALEYUV2RGBX
1058 "pcmpeqd %%mm7, %%mm7 \n\t"
1059 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1061 YSCALEYUV2PACKEDX_END
1063 return;
1064 case PIX_FMT_BGR24:
1065 YSCALEYUV2PACKEDX_ACCURATE
1066 YSCALEYUV2RGBX
1067 "pxor %%mm7, %%mm7 \n\t"
1068 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
1069 "add %4, %%"REG_c" \n\t"
1070 WRITEBGR24(%%REGc, %5, %%REGa)
1073 :: "r" (&c->redDither),
1074 "m" (dummy), "m" (dummy), "m" (dummy),
1075 "r" (dest), "m" (dstW)
1076 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
1078 return;
1079 case PIX_FMT_RGB555:
1080 YSCALEYUV2PACKEDX_ACCURATE
1081 YSCALEYUV2RGBX
1082 "pxor %%mm7, %%mm7 \n\t"
1083 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1084 #ifdef DITHER1XBPP
1085 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
1086 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
1087 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
1088 #endif
1090 WRITERGB15(%4, %5, %%REGa)
1091 YSCALEYUV2PACKEDX_END
1092 return;
1093 case PIX_FMT_RGB565:
1094 YSCALEYUV2PACKEDX_ACCURATE
1095 YSCALEYUV2RGBX
1096 "pxor %%mm7, %%mm7 \n\t"
1097 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1098 #ifdef DITHER1XBPP
1099 "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
1100 "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
1101 "paddusb "RED_DITHER"(%0), %%mm5\n\t"
1102 #endif
1104 WRITERGB16(%4, %5, %%REGa)
1105 YSCALEYUV2PACKEDX_END
1106 return;
1107 case PIX_FMT_YUYV422:
1108 YSCALEYUV2PACKEDX_ACCURATE
1109 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1111 "psraw $3, %%mm3 \n\t"
1112 "psraw $3, %%mm4 \n\t"
1113 "psraw $3, %%mm1 \n\t"
1114 "psraw $3, %%mm7 \n\t"
1115 WRITEYUY2(%4, %5, %%REGa)
1116 YSCALEYUV2PACKEDX_END
1117 return;
1119 }else{
1120 switch(c->dstFormat)
1122 case PIX_FMT_RGB32:
1123 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
1124 YSCALEYUV2PACKEDX
1125 YSCALEYUV2RGBX
1126 YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
1127 "psraw $3, %%mm1 \n\t"
1128 "psraw $3, %%mm7 \n\t"
1129 "packuswb %%mm7, %%mm1 \n\t"
1130 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1131 YSCALEYUV2PACKEDX_END
1132 }else{
1133 YSCALEYUV2PACKEDX
1134 YSCALEYUV2RGBX
1135 "pcmpeqd %%mm7, %%mm7 \n\t"
1136 WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1137 YSCALEYUV2PACKEDX_END
1139 return;
1140 case PIX_FMT_BGR24:
1141 YSCALEYUV2PACKEDX
1142 YSCALEYUV2RGBX
1143 "pxor %%mm7, %%mm7 \n\t"
1144 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
1145 "add %4, %%"REG_c" \n\t"
1146 WRITEBGR24(%%REGc, %5, %%REGa)
1148 :: "r" (&c->redDither),
1149 "m" (dummy), "m" (dummy), "m" (dummy),
1150 "r" (dest), "m" (dstW)
1151 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
1153 return;
1154 case PIX_FMT_RGB555:
1155 YSCALEYUV2PACKEDX
1156 YSCALEYUV2RGBX
1157 "pxor %%mm7, %%mm7 \n\t"
1158 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1159 #ifdef DITHER1XBPP
1160 "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
1161 "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
1162 "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
1163 #endif
1165 WRITERGB15(%4, %5, %%REGa)
1166 YSCALEYUV2PACKEDX_END
1167 return;
1168 case PIX_FMT_RGB565:
1169 YSCALEYUV2PACKEDX
1170 YSCALEYUV2RGBX
1171 "pxor %%mm7, %%mm7 \n\t"
1172 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1173 #ifdef DITHER1XBPP
1174 "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
1175 "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
1176 "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
1177 #endif
1179 WRITERGB16(%4, %5, %%REGa)
1180 YSCALEYUV2PACKEDX_END
1181 return;
1182 case PIX_FMT_YUYV422:
1183 YSCALEYUV2PACKEDX
1184 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1186 "psraw $3, %%mm3 \n\t"
1187 "psraw $3, %%mm4 \n\t"
1188 "psraw $3, %%mm1 \n\t"
1189 "psraw $3, %%mm7 \n\t"
1190 WRITEYUY2(%4, %5, %%REGa)
1191 YSCALEYUV2PACKEDX_END
1192 return;
1196 #endif /* HAVE_MMX */
1197 #if HAVE_ALTIVEC
1198 /* The following list of supported dstFormat values should
1199 match what's found in the body of ff_yuv2packedX_altivec() */
1200 if (!(c->flags & SWS_BITEXACT) && !c->alpPixBuf &&
1201 (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
1202 c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
1203 c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB))
1204 ff_yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize,
1205 chrFilter, chrSrc, chrFilterSize,
1206 dest, dstW, dstY);
1207 else
1208 #endif
1209 yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
1210 chrFilter, chrSrc, chrFilterSize,
1211 alpSrc, dest, dstW, dstY);
1215 * vertical bilinear scale YV12 to RGB
1217 static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
1218 uint16_t *abuf0, uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
1220 int yalpha1=4095- yalpha;
1221 int uvalpha1=4095-uvalpha;
1222 int i;
1224 #if HAVE_MMX
1225 if(!(c->flags & SWS_BITEXACT)){
1226 switch(c->dstFormat)
1228 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
1229 case PIX_FMT_RGB32:
1230 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
1231 #if ARCH_X86_64
1232 __asm__ volatile(
1233 YSCALEYUV2RGB(%%REGBP, %5)
1234 YSCALEYUV2RGB_YA(%%REGBP, %5, %6, %7)
1235 "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1236 "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1237 "packuswb %%mm7, %%mm1 \n\t"
1238 WRITEBGR32(%4, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1240 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "r" (dest),
1241 "a" (&c->redDither)
1242 ,"r" (abuf0), "r" (abuf1)
1243 : "%"REG_BP
1245 #else
1246 *(uint16_t **)(&c->u_temp)=abuf0;
1247 *(uint16_t **)(&c->v_temp)=abuf1;
1248 __asm__ volatile(
1249 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1250 "mov %4, %%"REG_b" \n\t"
1251 "push %%"REG_BP" \n\t"
1252 YSCALEYUV2RGB(%%REGBP, %5)
1253 "push %0 \n\t"
1254 "push %1 \n\t"
1255 "mov "U_TEMP"(%5), %0 \n\t"
1256 "mov "V_TEMP"(%5), %1 \n\t"
1257 YSCALEYUV2RGB_YA(%%REGBP, %5, %0, %1)
1258 "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1259 "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
1260 "packuswb %%mm7, %%mm1 \n\t"
1261 "pop %1 \n\t"
1262 "pop %0 \n\t"
1263 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
1264 "pop %%"REG_BP" \n\t"
1265 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1267 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1268 "a" (&c->redDither)
1270 #endif
1271 }else{
1272 __asm__ volatile(
1273 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1274 "mov %4, %%"REG_b" \n\t"
1275 "push %%"REG_BP" \n\t"
1276 YSCALEYUV2RGB(%%REGBP, %5)
1277 "pcmpeqd %%mm7, %%mm7 \n\t"
1278 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1279 "pop %%"REG_BP" \n\t"
1280 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1282 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1283 "a" (&c->redDither)
1286 return;
1287 case PIX_FMT_BGR24:
1288 __asm__ volatile(
1289 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1290 "mov %4, %%"REG_b" \n\t"
1291 "push %%"REG_BP" \n\t"
1292 YSCALEYUV2RGB(%%REGBP, %5)
1293 "pxor %%mm7, %%mm7 \n\t"
1294 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1295 "pop %%"REG_BP" \n\t"
1296 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1297 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1298 "a" (&c->redDither)
1300 return;
1301 case PIX_FMT_RGB555:
1302 __asm__ volatile(
1303 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1304 "mov %4, %%"REG_b" \n\t"
1305 "push %%"REG_BP" \n\t"
1306 YSCALEYUV2RGB(%%REGBP, %5)
1307 "pxor %%mm7, %%mm7 \n\t"
1308 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1309 #ifdef DITHER1XBPP
1310 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1311 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1312 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1313 #endif
1315 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1316 "pop %%"REG_BP" \n\t"
1317 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1319 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1320 "a" (&c->redDither)
1322 return;
1323 case PIX_FMT_RGB565:
1324 __asm__ volatile(
1325 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1326 "mov %4, %%"REG_b" \n\t"
1327 "push %%"REG_BP" \n\t"
1328 YSCALEYUV2RGB(%%REGBP, %5)
1329 "pxor %%mm7, %%mm7 \n\t"
1330 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1331 #ifdef DITHER1XBPP
1332 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1333 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1334 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1335 #endif
1337 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1338 "pop %%"REG_BP" \n\t"
1339 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1340 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1341 "a" (&c->redDither)
1343 return;
1344 case PIX_FMT_YUYV422:
1345 __asm__ volatile(
1346 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1347 "mov %4, %%"REG_b" \n\t"
1348 "push %%"REG_BP" \n\t"
1349 YSCALEYUV2PACKED(%%REGBP, %5)
1350 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1351 "pop %%"REG_BP" \n\t"
1352 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1353 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1354 "a" (&c->redDither)
1356 return;
1357 default: break;
1360 #endif //HAVE_MMX
1361 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C(void,0), YSCALE_YUV_2_GRAY16_2_C, YSCALE_YUV_2_MONO2_C)
1365 * YV12 to RGB without scaling or interpolating
1367 static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
1368 uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
1370 const int yalpha1=0;
1371 int i;
1373 uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
1374 const int yalpha= 4096; //FIXME ...
1376 if (flags&SWS_FULL_CHR_H_INT)
1378 RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, abuf0, abuf0, dest, dstW, 0, uvalpha, y);
1379 return;
1382 #if HAVE_MMX
1383 if(!(flags & SWS_BITEXACT)){
1384 if (uvalpha < 2048) // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
1386 switch(dstFormat)
1388 case PIX_FMT_RGB32:
1389 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
1390 __asm__ volatile(
1391 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1392 "mov %4, %%"REG_b" \n\t"
1393 "push %%"REG_BP" \n\t"
1394 YSCALEYUV2RGB1(%%REGBP, %5)
1395 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1396 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1397 "pop %%"REG_BP" \n\t"
1398 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1400 :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1401 "a" (&c->redDither)
1403 }else{
1404 __asm__ volatile(
1405 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1406 "mov %4, %%"REG_b" \n\t"
1407 "push %%"REG_BP" \n\t"
1408 YSCALEYUV2RGB1(%%REGBP, %5)
1409 "pcmpeqd %%mm7, %%mm7 \n\t"
1410 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1411 "pop %%"REG_BP" \n\t"
1412 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1414 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1415 "a" (&c->redDither)
1418 return;
1419 case PIX_FMT_BGR24:
1420 __asm__ volatile(
1421 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1422 "mov %4, %%"REG_b" \n\t"
1423 "push %%"REG_BP" \n\t"
1424 YSCALEYUV2RGB1(%%REGBP, %5)
1425 "pxor %%mm7, %%mm7 \n\t"
1426 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1427 "pop %%"REG_BP" \n\t"
1428 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1430 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1431 "a" (&c->redDither)
1433 return;
1434 case PIX_FMT_RGB555:
1435 __asm__ volatile(
1436 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1437 "mov %4, %%"REG_b" \n\t"
1438 "push %%"REG_BP" \n\t"
1439 YSCALEYUV2RGB1(%%REGBP, %5)
1440 "pxor %%mm7, %%mm7 \n\t"
1441 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1442 #ifdef DITHER1XBPP
1443 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1444 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1445 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1446 #endif
1447 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1448 "pop %%"REG_BP" \n\t"
1449 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1451 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1452 "a" (&c->redDither)
1454 return;
1455 case PIX_FMT_RGB565:
1456 __asm__ volatile(
1457 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1458 "mov %4, %%"REG_b" \n\t"
1459 "push %%"REG_BP" \n\t"
1460 YSCALEYUV2RGB1(%%REGBP, %5)
1461 "pxor %%mm7, %%mm7 \n\t"
1462 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1463 #ifdef DITHER1XBPP
1464 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1465 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1466 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1467 #endif
1469 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1470 "pop %%"REG_BP" \n\t"
1471 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1473 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1474 "a" (&c->redDither)
1476 return;
1477 case PIX_FMT_YUYV422:
1478 __asm__ volatile(
1479 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1480 "mov %4, %%"REG_b" \n\t"
1481 "push %%"REG_BP" \n\t"
1482 YSCALEYUV2PACKED1(%%REGBP, %5)
1483 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1484 "pop %%"REG_BP" \n\t"
1485 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1487 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1488 "a" (&c->redDither)
1490 return;
1493 else
1495 switch(dstFormat)
1497 case PIX_FMT_RGB32:
1498 if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
1499 __asm__ volatile(
1500 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1501 "mov %4, %%"REG_b" \n\t"
1502 "push %%"REG_BP" \n\t"
1503 YSCALEYUV2RGB1b(%%REGBP, %5)
1504 YSCALEYUV2RGB1_ALPHA(%%REGBP)
1505 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1506 "pop %%"REG_BP" \n\t"
1507 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1509 :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1510 "a" (&c->redDither)
1512 }else{
1513 __asm__ volatile(
1514 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1515 "mov %4, %%"REG_b" \n\t"
1516 "push %%"REG_BP" \n\t"
1517 YSCALEYUV2RGB1b(%%REGBP, %5)
1518 "pcmpeqd %%mm7, %%mm7 \n\t"
1519 WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
1520 "pop %%"REG_BP" \n\t"
1521 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1523 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1524 "a" (&c->redDither)
1527 return;
1528 case PIX_FMT_BGR24:
1529 __asm__ volatile(
1530 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1531 "mov %4, %%"REG_b" \n\t"
1532 "push %%"REG_BP" \n\t"
1533 YSCALEYUV2RGB1b(%%REGBP, %5)
1534 "pxor %%mm7, %%mm7 \n\t"
1535 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
1536 "pop %%"REG_BP" \n\t"
1537 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1539 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1540 "a" (&c->redDither)
1542 return;
1543 case PIX_FMT_RGB555:
1544 __asm__ volatile(
1545 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1546 "mov %4, %%"REG_b" \n\t"
1547 "push %%"REG_BP" \n\t"
1548 YSCALEYUV2RGB1b(%%REGBP, %5)
1549 "pxor %%mm7, %%mm7 \n\t"
1550 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1551 #ifdef DITHER1XBPP
1552 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1553 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1554 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1555 #endif
1556 WRITERGB15(%%REGb, 8280(%5), %%REGBP)
1557 "pop %%"REG_BP" \n\t"
1558 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1560 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1561 "a" (&c->redDither)
1563 return;
1564 case PIX_FMT_RGB565:
1565 __asm__ volatile(
1566 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1567 "mov %4, %%"REG_b" \n\t"
1568 "push %%"REG_BP" \n\t"
1569 YSCALEYUV2RGB1b(%%REGBP, %5)
1570 "pxor %%mm7, %%mm7 \n\t"
1571 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
1572 #ifdef DITHER1XBPP
1573 "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
1574 "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
1575 "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
1576 #endif
1578 WRITERGB16(%%REGb, 8280(%5), %%REGBP)
1579 "pop %%"REG_BP" \n\t"
1580 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1582 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1583 "a" (&c->redDither)
1585 return;
1586 case PIX_FMT_YUYV422:
1587 __asm__ volatile(
1588 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
1589 "mov %4, %%"REG_b" \n\t"
1590 "push %%"REG_BP" \n\t"
1591 YSCALEYUV2PACKED1b(%%REGBP, %5)
1592 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
1593 "pop %%"REG_BP" \n\t"
1594 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
1596 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
1597 "a" (&c->redDither)
1599 return;
1603 #endif /* HAVE_MMX */
1604 if (uvalpha < 2048)
1606 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C(void,0), YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
1607 }else{
1608 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C(void,0), YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
1612 //FIXME yuy2* can read up to 7 samples too much
1614 static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
1616 #if HAVE_MMX
1617 __asm__ volatile(
1618 "movq "MANGLE(bm01010101)", %%mm2 \n\t"
1619 "mov %0, %%"REG_a" \n\t"
1620 "1: \n\t"
1621 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1622 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1623 "pand %%mm2, %%mm0 \n\t"
1624 "pand %%mm2, %%mm1 \n\t"
1625 "packuswb %%mm1, %%mm0 \n\t"
1626 "movq %%mm0, (%2, %%"REG_a") \n\t"
1627 "add $8, %%"REG_a" \n\t"
1628 " js 1b \n\t"
1629 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1630 : "%"REG_a
1632 #else
1633 int i;
1634 for (i=0; i<width; i++)
1635 dst[i]= src[2*i];
1636 #endif
1639 static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
1641 #if HAVE_MMX
1642 __asm__ volatile(
1643 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1644 "mov %0, %%"REG_a" \n\t"
1645 "1: \n\t"
1646 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
1647 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1648 "psrlw $8, %%mm0 \n\t"
1649 "psrlw $8, %%mm1 \n\t"
1650 "packuswb %%mm1, %%mm0 \n\t"
1651 "movq %%mm0, %%mm1 \n\t"
1652 "psrlw $8, %%mm0 \n\t"
1653 "pand %%mm4, %%mm1 \n\t"
1654 "packuswb %%mm0, %%mm0 \n\t"
1655 "packuswb %%mm1, %%mm1 \n\t"
1656 "movd %%mm0, (%3, %%"REG_a") \n\t"
1657 "movd %%mm1, (%2, %%"REG_a") \n\t"
1658 "add $4, %%"REG_a" \n\t"
1659 " js 1b \n\t"
1660 : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1661 : "%"REG_a
1663 #else
1664 int i;
1665 for (i=0; i<width; i++)
1667 dstU[i]= src1[4*i + 1];
1668 dstV[i]= src1[4*i + 3];
1670 #endif
1671 assert(src1 == src2);
1674 /* This is almost identical to the previous, end exists only because
1675 * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
1676 static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
1678 #if HAVE_MMX
1679 __asm__ volatile(
1680 "mov %0, %%"REG_a" \n\t"
1681 "1: \n\t"
1682 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
1683 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
1684 "psrlw $8, %%mm0 \n\t"
1685 "psrlw $8, %%mm1 \n\t"
1686 "packuswb %%mm1, %%mm0 \n\t"
1687 "movq %%mm0, (%2, %%"REG_a") \n\t"
1688 "add $8, %%"REG_a" \n\t"
1689 " js 1b \n\t"
1690 : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
1691 : "%"REG_a
1693 #else
1694 int i;
1695 for (i=0; i<width; i++)
1696 dst[i]= src[2*i+1];
1697 #endif
1700 static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
1702 #if HAVE_MMX
1703 __asm__ volatile(
1704 "movq "MANGLE(bm01010101)", %%mm4 \n\t"
1705 "mov %0, %%"REG_a" \n\t"
1706 "1: \n\t"
1707 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
1708 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
1709 "pand %%mm4, %%mm0 \n\t"
1710 "pand %%mm4, %%mm1 \n\t"
1711 "packuswb %%mm1, %%mm0 \n\t"
1712 "movq %%mm0, %%mm1 \n\t"
1713 "psrlw $8, %%mm0 \n\t"
1714 "pand %%mm4, %%mm1 \n\t"
1715 "packuswb %%mm0, %%mm0 \n\t"
1716 "packuswb %%mm1, %%mm1 \n\t"
1717 "movd %%mm0, (%3, %%"REG_a") \n\t"
1718 "movd %%mm1, (%2, %%"REG_a") \n\t"
1719 "add $4, %%"REG_a" \n\t"
1720 " js 1b \n\t"
1721 : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
1722 : "%"REG_a
1724 #else
1725 int i;
1726 for (i=0; i<width; i++)
1728 dstU[i]= src1[4*i + 0];
1729 dstV[i]= src1[4*i + 2];
1731 #endif
1732 assert(src1 == src2);
1735 #define BGR2Y(type, name, shr, shg, shb, maskr, maskg, maskb, RY, GY, BY, S)\
1736 static inline void RENAME(name)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)\
1738 int i;\
1739 for (i=0; i<width; i++)\
1741 int b= (((type*)src)[i]>>shb)&maskb;\
1742 int g= (((type*)src)[i]>>shg)&maskg;\
1743 int r= (((type*)src)[i]>>shr)&maskr;\
1745 dst[i]= (((RY)*r + (GY)*g + (BY)*b + (33<<((S)-1)))>>(S));\
1749 BGR2Y(uint32_t, bgr32ToY,16, 0, 0, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY , BY<< 8, RGB2YUV_SHIFT+8)
1750 BGR2Y(uint32_t, rgb32ToY, 0, 0,16, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY , BY<< 8, RGB2YUV_SHIFT+8)
1751 BGR2Y(uint16_t, bgr16ToY, 0, 0, 0, 0x001F, 0x07E0, 0xF800, RY<<11, GY<<5, BY , RGB2YUV_SHIFT+8)
1752 BGR2Y(uint16_t, bgr15ToY, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RY<<10, GY<<5, BY , RGB2YUV_SHIFT+7)
1753 BGR2Y(uint16_t, rgb16ToY, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RY , GY<<5, BY<<11, RGB2YUV_SHIFT+8)
1754 BGR2Y(uint16_t, rgb15ToY, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RY , GY<<5, BY<<10, RGB2YUV_SHIFT+7)
1756 static inline void RENAME(abgrToA)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused){
1757 int i;
1758 for (i=0; i<width; i++){
1759 dst[i]= src[4*i];
1763 #define BGR2UV(type, name, shr, shg, shb, maska, maskr, maskg, maskb, RU, GU, BU, RV, GV, BV, S)\
1764 static inline void RENAME(name)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, uint8_t *dummy, long width, uint32_t *unused)\
1766 int i;\
1767 for (i=0; i<width; i++)\
1769 int b= (((type*)src)[i]&maskb)>>shb;\
1770 int g= (((type*)src)[i]&maskg)>>shg;\
1771 int r= (((type*)src)[i]&maskr)>>shr;\
1773 dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<((S)-1)))>>(S);\
1774 dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<((S)-1)))>>(S);\
1777 static inline void RENAME(name ## _half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, uint8_t *dummy, long width, uint32_t *unused)\
1779 int i;\
1780 for (i=0; i<width; i++)\
1782 int pix0= ((type*)src)[2*i+0];\
1783 int pix1= ((type*)src)[2*i+1];\
1784 int g= (pix0&~(maskr|maskb))+(pix1&~(maskr|maskb));\
1785 int b= ((pix0+pix1-g)&(maskb|(2*maskb)))>>shb;\
1786 int r= ((pix0+pix1-g)&(maskr|(2*maskr)))>>shr;\
1787 g&= maskg|(2*maskg);\
1789 g>>=shg;\
1791 dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<(S)))>>((S)+1);\
1792 dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<(S)))>>((S)+1);\
1796 BGR2UV(uint32_t, bgr32ToUV,16, 0, 0, 0xFF000000, 0xFF0000, 0xFF00, 0x00FF, RU<< 8, GU , BU<< 8, RV<< 8, GV , BV<< 8, RGB2YUV_SHIFT+8)
1797 BGR2UV(uint32_t, rgb32ToUV, 0, 0,16, 0xFF000000, 0x00FF, 0xFF00, 0xFF0000, RU<< 8, GU , BU<< 8, RV<< 8, GV , BV<< 8, RGB2YUV_SHIFT+8)
1798 BGR2UV(uint16_t, bgr16ToUV, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, RU<<11, GU<<5, BU , RV<<11, GV<<5, BV , RGB2YUV_SHIFT+8)
1799 BGR2UV(uint16_t, bgr15ToUV, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RU<<10, GU<<5, BU , RV<<10, GV<<5, BV , RGB2YUV_SHIFT+7)
1800 BGR2UV(uint16_t, rgb16ToUV, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RU , GU<<5, BU<<11, RV , GV<<5, BV<<11, RGB2YUV_SHIFT+8)
1801 BGR2UV(uint16_t, rgb15ToUV, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RU , GU<<5, BU<<10, RV , GV<<5, BV<<10, RGB2YUV_SHIFT+7)
1803 #if HAVE_MMX
1804 static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, uint8_t *src, long width, int srcFormat)
1807 if(srcFormat == PIX_FMT_BGR24){
1808 __asm__ volatile(
1809 "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t"
1810 "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t"
1813 }else{
1814 __asm__ volatile(
1815 "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t"
1816 "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t"
1821 __asm__ volatile(
1822 "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t"
1823 "mov %2, %%"REG_a" \n\t"
1824 "pxor %%mm7, %%mm7 \n\t"
1825 "1: \n\t"
1826 PREFETCH" 64(%0) \n\t"
1827 "movd (%0), %%mm0 \n\t"
1828 "movd 2(%0), %%mm1 \n\t"
1829 "movd 6(%0), %%mm2 \n\t"
1830 "movd 8(%0), %%mm3 \n\t"
1831 "add $12, %0 \n\t"
1832 "punpcklbw %%mm7, %%mm0 \n\t"
1833 "punpcklbw %%mm7, %%mm1 \n\t"
1834 "punpcklbw %%mm7, %%mm2 \n\t"
1835 "punpcklbw %%mm7, %%mm3 \n\t"
1836 "pmaddwd %%mm5, %%mm0 \n\t"
1837 "pmaddwd %%mm6, %%mm1 \n\t"
1838 "pmaddwd %%mm5, %%mm2 \n\t"
1839 "pmaddwd %%mm6, %%mm3 \n\t"
1840 "paddd %%mm1, %%mm0 \n\t"
1841 "paddd %%mm3, %%mm2 \n\t"
1842 "paddd %%mm4, %%mm0 \n\t"
1843 "paddd %%mm4, %%mm2 \n\t"
1844 "psrad $15, %%mm0 \n\t"
1845 "psrad $15, %%mm2 \n\t"
1846 "packssdw %%mm2, %%mm0 \n\t"
1847 "packuswb %%mm0, %%mm0 \n\t"
1848 "movd %%mm0, (%1, %%"REG_a") \n\t"
1849 "add $4, %%"REG_a" \n\t"
1850 " js 1b \n\t"
1851 : "+r" (src)
1852 : "r" (dst+width), "g" ((x86_reg)-width)
1853 : "%"REG_a
1857 static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, long width, int srcFormat)
1859 __asm__ volatile(
1860 "movq 24+%4, %%mm6 \n\t"
1861 "mov %3, %%"REG_a" \n\t"
1862 "pxor %%mm7, %%mm7 \n\t"
1863 "1: \n\t"
1864 PREFETCH" 64(%0) \n\t"
1865 "movd (%0), %%mm0 \n\t"
1866 "movd 2(%0), %%mm1 \n\t"
1867 "punpcklbw %%mm7, %%mm0 \n\t"
1868 "punpcklbw %%mm7, %%mm1 \n\t"
1869 "movq %%mm0, %%mm2 \n\t"
1870 "movq %%mm1, %%mm3 \n\t"
1871 "pmaddwd %4, %%mm0 \n\t"
1872 "pmaddwd 8+%4, %%mm1 \n\t"
1873 "pmaddwd 16+%4, %%mm2 \n\t"
1874 "pmaddwd %%mm6, %%mm3 \n\t"
1875 "paddd %%mm1, %%mm0 \n\t"
1876 "paddd %%mm3, %%mm2 \n\t"
1878 "movd 6(%0), %%mm1 \n\t"
1879 "movd 8(%0), %%mm3 \n\t"
1880 "add $12, %0 \n\t"
1881 "punpcklbw %%mm7, %%mm1 \n\t"
1882 "punpcklbw %%mm7, %%mm3 \n\t"
1883 "movq %%mm1, %%mm4 \n\t"
1884 "movq %%mm3, %%mm5 \n\t"
1885 "pmaddwd %4, %%mm1 \n\t"
1886 "pmaddwd 8+%4, %%mm3 \n\t"
1887 "pmaddwd 16+%4, %%mm4 \n\t"
1888 "pmaddwd %%mm6, %%mm5 \n\t"
1889 "paddd %%mm3, %%mm1 \n\t"
1890 "paddd %%mm5, %%mm4 \n\t"
1892 "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3 \n\t"
1893 "paddd %%mm3, %%mm0 \n\t"
1894 "paddd %%mm3, %%mm2 \n\t"
1895 "paddd %%mm3, %%mm1 \n\t"
1896 "paddd %%mm3, %%mm4 \n\t"
1897 "psrad $15, %%mm0 \n\t"
1898 "psrad $15, %%mm2 \n\t"
1899 "psrad $15, %%mm1 \n\t"
1900 "psrad $15, %%mm4 \n\t"
1901 "packssdw %%mm1, %%mm0 \n\t"
1902 "packssdw %%mm4, %%mm2 \n\t"
1903 "packuswb %%mm0, %%mm0 \n\t"
1904 "packuswb %%mm2, %%mm2 \n\t"
1905 "movd %%mm0, (%1, %%"REG_a") \n\t"
1906 "movd %%mm2, (%2, %%"REG_a") \n\t"
1907 "add $4, %%"REG_a" \n\t"
1908 " js 1b \n\t"
1909 : "+r" (src)
1910 : "r" (dstU+width), "r" (dstV+width), "g" ((x86_reg)-width), "m"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24][0])
1911 : "%"REG_a
1914 #endif
1916 static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
1918 #if HAVE_MMX
1919 RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
1920 #else
1921 int i;
1922 for (i=0; i<width; i++)
1924 int b= src[i*3+0];
1925 int g= src[i*3+1];
1926 int r= src[i*3+2];
1928 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
1930 #endif /* HAVE_MMX */
1933 static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
1935 #if HAVE_MMX
1936 RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
1937 #else
1938 int i;
1939 for (i=0; i<width; i++)
1941 int b= src1[3*i + 0];
1942 int g= src1[3*i + 1];
1943 int r= src1[3*i + 2];
1945 dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
1946 dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
1948 #endif /* HAVE_MMX */
1949 assert(src1 == src2);
1952 static inline void RENAME(bgr24ToUV_half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
1954 int i;
1955 for (i=0; i<width; i++)
1957 int b= src1[6*i + 0] + src1[6*i + 3];
1958 int g= src1[6*i + 1] + src1[6*i + 4];
1959 int r= src1[6*i + 2] + src1[6*i + 5];
1961 dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
1962 dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
1964 assert(src1 == src2);
1967 static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
1969 #if HAVE_MMX
1970 RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
1971 #else
1972 int i;
1973 for (i=0; i<width; i++)
1975 int r= src[i*3+0];
1976 int g= src[i*3+1];
1977 int b= src[i*3+2];
1979 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
1981 #endif
1984 static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
1986 #if HAVE_MMX
1987 assert(src1==src2);
1988 RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
1989 #else
1990 int i;
1991 assert(src1==src2);
1992 for (i=0; i<width; i++)
1994 int r= src1[3*i + 0];
1995 int g= src1[3*i + 1];
1996 int b= src1[3*i + 2];
1998 dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
1999 dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
2001 #endif
2004 static inline void RENAME(rgb24ToUV_half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
2006 int i;
2007 assert(src1==src2);
2008 for (i=0; i<width; i++)
2010 int r= src1[6*i + 0] + src1[6*i + 3];
2011 int g= src1[6*i + 1] + src1[6*i + 4];
2012 int b= src1[6*i + 2] + src1[6*i + 5];
2014 dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
2015 dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
2020 static inline void RENAME(palToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *pal)
2022 int i;
2023 for (i=0; i<width; i++)
2025 int d= src[i];
2027 dst[i]= pal[d] & 0xFF;
2031 static inline void RENAME(palToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *pal)
2033 int i;
2034 assert(src1 == src2);
2035 for (i=0; i<width; i++)
2037 int p= pal[src1[i]];
2039 dstU[i]= p>>8;
2040 dstV[i]= p>>16;
2044 static inline void RENAME(monowhite2Y)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
2046 int i, j;
2047 for (i=0; i<width/8; i++){
2048 int d= ~src[i];
2049 for(j=0; j<8; j++)
2050 dst[8*i+j]= ((d>>(7-j))&1)*255;
2054 static inline void RENAME(monoblack2Y)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
2056 int i, j;
2057 for (i=0; i<width/8; i++){
2058 int d= src[i];
2059 for(j=0; j<8; j++)
2060 dst[8*i+j]= ((d>>(7-j))&1)*255;
2064 // bilinear / bicubic scaling
2065 static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
2066 int16_t *filter, int16_t *filterPos, long filterSize)
2068 #if HAVE_MMX
2069 assert(filterSize % 4 == 0 && filterSize>0);
2070 if (filterSize==4) // Always true for upscaling, sometimes for down, too.
2072 x86_reg counter= -2*dstW;
2073 filter-= counter*2;
2074 filterPos-= counter/2;
2075 dst-= counter/2;
2076 __asm__ volatile(
2077 #if defined(PIC)
2078 "push %%"REG_b" \n\t"
2079 #endif
2080 "pxor %%mm7, %%mm7 \n\t"
2081 "push %%"REG_BP" \n\t" // we use 7 regs here ...
2082 "mov %%"REG_a", %%"REG_BP" \n\t"
2083 ASMALIGN(4)
2084 "1: \n\t"
2085 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
2086 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
2087 "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
2088 "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
2089 "movd (%3, %%"REG_a"), %%mm0 \n\t"
2090 "movd (%3, %%"REG_b"), %%mm2 \n\t"
2091 "punpcklbw %%mm7, %%mm0 \n\t"
2092 "punpcklbw %%mm7, %%mm2 \n\t"
2093 "pmaddwd %%mm1, %%mm0 \n\t"
2094 "pmaddwd %%mm2, %%mm3 \n\t"
2095 "movq %%mm0, %%mm4 \n\t"
2096 "punpckldq %%mm3, %%mm0 \n\t"
2097 "punpckhdq %%mm3, %%mm4 \n\t"
2098 "paddd %%mm4, %%mm0 \n\t"
2099 "psrad $7, %%mm0 \n\t"
2100 "packssdw %%mm0, %%mm0 \n\t"
2101 "movd %%mm0, (%4, %%"REG_BP") \n\t"
2102 "add $4, %%"REG_BP" \n\t"
2103 " jnc 1b \n\t"
2105 "pop %%"REG_BP" \n\t"
2106 #if defined(PIC)
2107 "pop %%"REG_b" \n\t"
2108 #endif
2109 : "+a" (counter)
2110 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
2111 #if !defined(PIC)
2112 : "%"REG_b
2113 #endif
2116 else if (filterSize==8)
2118 x86_reg counter= -2*dstW;
2119 filter-= counter*4;
2120 filterPos-= counter/2;
2121 dst-= counter/2;
2122 __asm__ volatile(
2123 #if defined(PIC)
2124 "push %%"REG_b" \n\t"
2125 #endif
2126 "pxor %%mm7, %%mm7 \n\t"
2127 "push %%"REG_BP" \n\t" // we use 7 regs here ...
2128 "mov %%"REG_a", %%"REG_BP" \n\t"
2129 ASMALIGN(4)
2130 "1: \n\t"
2131 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
2132 "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
2133 "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
2134 "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
2135 "movd (%3, %%"REG_a"), %%mm0 \n\t"
2136 "movd (%3, %%"REG_b"), %%mm2 \n\t"
2137 "punpcklbw %%mm7, %%mm0 \n\t"
2138 "punpcklbw %%mm7, %%mm2 \n\t"
2139 "pmaddwd %%mm1, %%mm0 \n\t"
2140 "pmaddwd %%mm2, %%mm3 \n\t"
2142 "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
2143 "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
2144 "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
2145 "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
2146 "punpcklbw %%mm7, %%mm4 \n\t"
2147 "punpcklbw %%mm7, %%mm2 \n\t"
2148 "pmaddwd %%mm1, %%mm4 \n\t"
2149 "pmaddwd %%mm2, %%mm5 \n\t"
2150 "paddd %%mm4, %%mm0 \n\t"
2151 "paddd %%mm5, %%mm3 \n\t"
2152 "movq %%mm0, %%mm4 \n\t"
2153 "punpckldq %%mm3, %%mm0 \n\t"
2154 "punpckhdq %%mm3, %%mm4 \n\t"
2155 "paddd %%mm4, %%mm0 \n\t"
2156 "psrad $7, %%mm0 \n\t"
2157 "packssdw %%mm0, %%mm0 \n\t"
2158 "movd %%mm0, (%4, %%"REG_BP") \n\t"
2159 "add $4, %%"REG_BP" \n\t"
2160 " jnc 1b \n\t"
2162 "pop %%"REG_BP" \n\t"
2163 #if defined(PIC)
2164 "pop %%"REG_b" \n\t"
2165 #endif
2166 : "+a" (counter)
2167 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
2168 #if !defined(PIC)
2169 : "%"REG_b
2170 #endif
2173 else
2175 uint8_t *offset = src+filterSize;
2176 x86_reg counter= -2*dstW;
2177 //filter-= counter*filterSize/2;
2178 filterPos-= counter/2;
2179 dst-= counter/2;
2180 __asm__ volatile(
2181 "pxor %%mm7, %%mm7 \n\t"
2182 ASMALIGN(4)
2183 "1: \n\t"
2184 "mov %2, %%"REG_c" \n\t"
2185 "movzwl (%%"REG_c", %0), %%eax \n\t"
2186 "movzwl 2(%%"REG_c", %0), %%edx \n\t"
2187 "mov %5, %%"REG_c" \n\t"
2188 "pxor %%mm4, %%mm4 \n\t"
2189 "pxor %%mm5, %%mm5 \n\t"
2190 "2: \n\t"
2191 "movq (%1), %%mm1 \n\t"
2192 "movq (%1, %6), %%mm3 \n\t"
2193 "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t"
2194 "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t"
2195 "punpcklbw %%mm7, %%mm0 \n\t"
2196 "punpcklbw %%mm7, %%mm2 \n\t"
2197 "pmaddwd %%mm1, %%mm0 \n\t"
2198 "pmaddwd %%mm2, %%mm3 \n\t"
2199 "paddd %%mm3, %%mm5 \n\t"
2200 "paddd %%mm0, %%mm4 \n\t"
2201 "add $8, %1 \n\t"
2202 "add $4, %%"REG_c" \n\t"
2203 "cmp %4, %%"REG_c" \n\t"
2204 " jb 2b \n\t"
2205 "add %6, %1 \n\t"
2206 "movq %%mm4, %%mm0 \n\t"
2207 "punpckldq %%mm5, %%mm4 \n\t"
2208 "punpckhdq %%mm5, %%mm0 \n\t"
2209 "paddd %%mm0, %%mm4 \n\t"
2210 "psrad $7, %%mm4 \n\t"
2211 "packssdw %%mm4, %%mm4 \n\t"
2212 "mov %3, %%"REG_a" \n\t"
2213 "movd %%mm4, (%%"REG_a", %0) \n\t"
2214 "add $4, %0 \n\t"
2215 " jnc 1b \n\t"
2217 : "+r" (counter), "+r" (filter)
2218 : "m" (filterPos), "m" (dst), "m"(offset),
2219 "m" (src), "r" ((x86_reg)filterSize*2)
2220 : "%"REG_a, "%"REG_c, "%"REG_d
2223 #else
2224 #if HAVE_ALTIVEC
2225 hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
2226 #else
2227 int i;
2228 for (i=0; i<dstW; i++)
2230 int j;
2231 int srcPos= filterPos[i];
2232 int val=0;
2233 //printf("filterPos: %d\n", filterPos[i]);
2234 for (j=0; j<filterSize; j++)
2236 //printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
2237 val += ((int)src[srcPos + j])*filter[filterSize*i + j];
2239 //filter += hFilterSize;
2240 dst[i] = FFMIN(val>>7, (1<<15)-1); // the cubic equation does overflow ...
2241 //dst[i] = val>>7;
2243 #endif /* HAVE_ALTIVEC */
2244 #endif /* HAVE_MMX */
2247 static inline void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst,
2248 int dstWidth, uint8_t *src, int srcW,
2249 int xInc)
2251 int i;
2252 unsigned int xpos=0;
2253 for (i=0;i<dstWidth;i++)
2255 register unsigned int xx=xpos>>16;
2256 register unsigned int xalpha=(xpos&0xFFFF)>>9;
2257 dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
2258 xpos+=xInc;
2262 // *** horizontal scale Y line to temp buffer
2263 static inline void RENAME(hyscale)(SwsContext *c, uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc,
2264 int flags, int16_t *hLumFilter,
2265 int16_t *hLumFilterPos, int hLumFilterSize,
2266 int srcFormat, uint8_t *formatConvBuffer,
2267 uint32_t *pal, int isAlpha)
2269 int32_t *mmx2FilterPos = c->lumMmx2FilterPos;
2270 int16_t *mmx2Filter = c->lumMmx2Filter;
2271 int canMMX2BeUsed = c->canMMX2BeUsed;
2272 void *funnyYCode = c->funnyYCode;
2274 if (srcFormat==PIX_FMT_YUYV422 || srcFormat==PIX_FMT_GRAY16BE)
2276 RENAME(yuy2ToY)(formatConvBuffer, src, srcW, pal);
2277 src= formatConvBuffer;
2279 else if (srcFormat==PIX_FMT_UYVY422 || srcFormat==PIX_FMT_GRAY16LE)
2281 RENAME(uyvyToY)(formatConvBuffer, src, srcW, pal);
2282 src= formatConvBuffer;
2284 else if (srcFormat==PIX_FMT_RGB32)
2286 if (isAlpha)
2287 RENAME(abgrToA)(formatConvBuffer, src+3, srcW, pal);
2288 else
2289 RENAME(bgr32ToY)(formatConvBuffer, src, srcW, pal);
2290 src= formatConvBuffer;
2292 else if (srcFormat==PIX_FMT_RGB32_1)
2294 if (isAlpha)
2295 RENAME(abgrToA)(formatConvBuffer, src, srcW, pal);
2296 else
2297 RENAME(bgr32ToY)(formatConvBuffer, src+ALT32_CORR, srcW, pal);
2298 src= formatConvBuffer;
2300 else if (srcFormat==PIX_FMT_BGR24)
2302 RENAME(bgr24ToY)(formatConvBuffer, src, srcW, pal);
2303 src= formatConvBuffer;
2305 else if (srcFormat==PIX_FMT_BGR565)
2307 RENAME(bgr16ToY)(formatConvBuffer, src, srcW, pal);
2308 src= formatConvBuffer;
2310 else if (srcFormat==PIX_FMT_BGR555)
2312 RENAME(bgr15ToY)(formatConvBuffer, src, srcW, pal);
2313 src= formatConvBuffer;
2315 else if (srcFormat==PIX_FMT_BGR32)
2317 if (isAlpha)
2318 RENAME(abgrToA)(formatConvBuffer, src+3, srcW, pal);
2319 else
2320 RENAME(rgb32ToY)(formatConvBuffer, src, srcW, pal);
2321 src= formatConvBuffer;
2323 else if (srcFormat==PIX_FMT_BGR32_1)
2325 if (isAlpha)
2326 RENAME(abgrToA)(formatConvBuffer, src, srcW, pal);
2327 else
2328 RENAME(rgb32ToY)(formatConvBuffer, src+ALT32_CORR, srcW, pal);
2329 src= formatConvBuffer;
2331 else if (srcFormat==PIX_FMT_RGB24)
2333 RENAME(rgb24ToY)(formatConvBuffer, src, srcW, pal);
2334 src= formatConvBuffer;
2336 else if (srcFormat==PIX_FMT_RGB565)
2338 RENAME(rgb16ToY)(formatConvBuffer, src, srcW, pal);
2339 src= formatConvBuffer;
2341 else if (srcFormat==PIX_FMT_RGB555)
2343 RENAME(rgb15ToY)(formatConvBuffer, src, srcW, pal);
2344 src= formatConvBuffer;
2346 else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
2348 RENAME(palToY)(formatConvBuffer, src, srcW, pal);
2349 src= formatConvBuffer;
2351 else if (srcFormat==PIX_FMT_MONOBLACK)
2353 RENAME(monoblack2Y)(formatConvBuffer, src, srcW, pal);
2354 src= formatConvBuffer;
2356 else if (srcFormat==PIX_FMT_MONOWHITE)
2358 RENAME(monowhite2Y)(formatConvBuffer, src, srcW, pal);
2359 src= formatConvBuffer;
2362 #if HAVE_MMX
2363 // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
2364 if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
2365 #else
2366 if (!(flags&SWS_FAST_BILINEAR))
2367 #endif
2369 RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
2371 else // fast bilinear upscale / crap downscale
2373 #if ARCH_X86 && CONFIG_GPL
2374 #if HAVE_MMX2
2375 int i;
2376 #if defined(PIC)
2377 uint64_t ebxsave __attribute__((aligned(8)));
2378 #endif
2379 if (canMMX2BeUsed)
2381 __asm__ volatile(
2382 #if defined(PIC)
2383 "mov %%"REG_b", %5 \n\t"
2384 #endif
2385 "pxor %%mm7, %%mm7 \n\t"
2386 "mov %0, %%"REG_c" \n\t"
2387 "mov %1, %%"REG_D" \n\t"
2388 "mov %2, %%"REG_d" \n\t"
2389 "mov %3, %%"REG_b" \n\t"
2390 "xor %%"REG_a", %%"REG_a" \n\t" // i
2391 PREFETCH" (%%"REG_c") \n\t"
2392 PREFETCH" 32(%%"REG_c") \n\t"
2393 PREFETCH" 64(%%"REG_c") \n\t"
2395 #if ARCH_X86_64
2397 #define FUNNY_Y_CODE \
2398 "movl (%%"REG_b"), %%esi \n\t"\
2399 "call *%4 \n\t"\
2400 "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
2401 "add %%"REG_S", %%"REG_c" \n\t"\
2402 "add %%"REG_a", %%"REG_D" \n\t"\
2403 "xor %%"REG_a", %%"REG_a" \n\t"\
2405 #else
2407 #define FUNNY_Y_CODE \
2408 "movl (%%"REG_b"), %%esi \n\t"\
2409 "call *%4 \n\t"\
2410 "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
2411 "add %%"REG_a", %%"REG_D" \n\t"\
2412 "xor %%"REG_a", %%"REG_a" \n\t"\
2414 #endif /* ARCH_X86_64 */
2416 FUNNY_Y_CODE
2417 FUNNY_Y_CODE
2418 FUNNY_Y_CODE
2419 FUNNY_Y_CODE
2420 FUNNY_Y_CODE
2421 FUNNY_Y_CODE
2422 FUNNY_Y_CODE
2423 FUNNY_Y_CODE
2425 #if defined(PIC)
2426 "mov %5, %%"REG_b" \n\t"
2427 #endif
2428 :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
2429 "m" (funnyYCode)
2430 #if defined(PIC)
2431 ,"m" (ebxsave)
2432 #endif
2433 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2434 #if !defined(PIC)
2435 ,"%"REG_b
2436 #endif
2438 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
2440 else
2442 #endif /* HAVE_MMX2 */
2443 x86_reg xInc_shr16 = xInc >> 16;
2444 uint16_t xInc_mask = xInc & 0xffff;
2445 //NO MMX just normal asm ...
2446 __asm__ volatile(
2447 "xor %%"REG_a", %%"REG_a" \n\t" // i
2448 "xor %%"REG_d", %%"REG_d" \n\t" // xx
2449 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
2450 ASMALIGN(4)
2451 "1: \n\t"
2452 "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
2453 "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
2454 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
2455 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
2456 "shll $16, %%edi \n\t"
2457 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2458 "mov %1, %%"REG_D" \n\t"
2459 "shrl $9, %%esi \n\t"
2460 "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
2461 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
2462 "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
2464 "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
2465 "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
2466 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
2467 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
2468 "shll $16, %%edi \n\t"
2469 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2470 "mov %1, %%"REG_D" \n\t"
2471 "shrl $9, %%esi \n\t"
2472 "movw %%si, 2(%%"REG_D", %%"REG_a", 2) \n\t"
2473 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
2474 "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
2477 "add $2, %%"REG_a" \n\t"
2478 "cmp %2, %%"REG_a" \n\t"
2479 " jb 1b \n\t"
2482 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
2483 : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
2485 #if HAVE_MMX2
2486 } //if MMX2 can't be used
2487 #endif
2488 #else
2489 RENAME(hyscale_fast)(c, dst, dstWidth, src, srcW, xInc);
2490 #endif /* ARCH_X86 */
2493 if(!isAlpha && c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){
2494 int i;
2495 //FIXME all pal and rgb srcFormats could do this convertion as well
2496 //FIXME all scalers more complex than bilinear could do half of this transform
2497 if(c->srcRange){
2498 for (i=0; i<dstWidth; i++)
2499 dst[i]= (dst[i]*14071 + 33561947)>>14;
2500 }else{
2501 for (i=0; i<dstWidth; i++)
2502 dst[i]= (FFMIN(dst[i],30189)*19077 - 39057361)>>14;
2507 static inline void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst,
2508 int dstWidth, uint8_t *src1,
2509 uint8_t *src2, int srcW, int xInc)
2511 int i;
2512 unsigned int xpos=0;
2513 for (i=0;i<dstWidth;i++)
2515 register unsigned int xx=xpos>>16;
2516 register unsigned int xalpha=(xpos&0xFFFF)>>9;
2517 dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
2518 dst[i+VOFW]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
2519 /* slower
2520 dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
2521 dst[i+VOFW]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
2523 xpos+=xInc;
2527 inline static void RENAME(hcscale)(SwsContext *c, uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
2528 int srcW, int xInc, int flags, int16_t *hChrFilter,
2529 int16_t *hChrFilterPos, int hChrFilterSize,
2530 int srcFormat, uint8_t *formatConvBuffer,
2531 uint32_t *pal)
2533 int32_t *mmx2FilterPos = c->chrMmx2FilterPos;
2534 int16_t *mmx2Filter = c->chrMmx2Filter;
2535 int canMMX2BeUsed = c->canMMX2BeUsed;
2536 void *funnyUVCode = c->funnyUVCode;
2538 if (srcFormat==PIX_FMT_YUYV422)
2540 RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2541 src1= formatConvBuffer;
2542 src2= formatConvBuffer+VOFW;
2544 else if (srcFormat==PIX_FMT_UYVY422)
2546 RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2547 src1= formatConvBuffer;
2548 src2= formatConvBuffer+VOFW;
2550 else if (srcFormat==PIX_FMT_RGB32)
2552 if(c->chrSrcHSubSample)
2553 RENAME(bgr32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2554 else
2555 RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2556 src1= formatConvBuffer;
2557 src2= formatConvBuffer+VOFW;
2559 else if (srcFormat==PIX_FMT_RGB32_1)
2561 if(c->chrSrcHSubSample)
2562 RENAME(bgr32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
2563 else
2564 RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
2565 src1= formatConvBuffer;
2566 src2= formatConvBuffer+VOFW;
2568 else if (srcFormat==PIX_FMT_BGR24)
2570 if(c->chrSrcHSubSample)
2571 RENAME(bgr24ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2572 else
2573 RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2574 src1= formatConvBuffer;
2575 src2= formatConvBuffer+VOFW;
2577 else if (srcFormat==PIX_FMT_BGR565)
2579 if(c->chrSrcHSubSample)
2580 RENAME(bgr16ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2581 else
2582 RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2583 src1= formatConvBuffer;
2584 src2= formatConvBuffer+VOFW;
2586 else if (srcFormat==PIX_FMT_BGR555)
2588 if(c->chrSrcHSubSample)
2589 RENAME(bgr15ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2590 else
2591 RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2592 src1= formatConvBuffer;
2593 src2= formatConvBuffer+VOFW;
2595 else if (srcFormat==PIX_FMT_BGR32)
2597 if(c->chrSrcHSubSample)
2598 RENAME(rgb32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2599 else
2600 RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2601 src1= formatConvBuffer;
2602 src2= formatConvBuffer+VOFW;
2604 else if (srcFormat==PIX_FMT_BGR32_1)
2606 if(c->chrSrcHSubSample)
2607 RENAME(rgb32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
2608 else
2609 RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
2610 src1= formatConvBuffer;
2611 src2= formatConvBuffer+VOFW;
2613 else if (srcFormat==PIX_FMT_RGB24)
2615 if(c->chrSrcHSubSample)
2616 RENAME(rgb24ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2617 else
2618 RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2619 src1= formatConvBuffer;
2620 src2= formatConvBuffer+VOFW;
2622 else if (srcFormat==PIX_FMT_RGB565)
2624 if(c->chrSrcHSubSample)
2625 RENAME(rgb16ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2626 else
2627 RENAME(rgb16ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2628 src1= formatConvBuffer;
2629 src2= formatConvBuffer+VOFW;
2631 else if (srcFormat==PIX_FMT_RGB555)
2633 if(c->chrSrcHSubSample)
2634 RENAME(rgb15ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2635 else
2636 RENAME(rgb15ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2637 src1= formatConvBuffer;
2638 src2= formatConvBuffer+VOFW;
2640 else if (isGray(srcFormat) || srcFormat==PIX_FMT_MONOBLACK || srcFormat==PIX_FMT_MONOWHITE)
2642 return;
2644 else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
2646 RENAME(palToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
2647 src1= formatConvBuffer;
2648 src2= formatConvBuffer+VOFW;
2651 #if HAVE_MMX
2652 // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
2653 if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
2654 #else
2655 if (!(flags&SWS_FAST_BILINEAR))
2656 #endif
2658 RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
2659 RENAME(hScale)(dst+VOFW, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
2661 else // fast bilinear upscale / crap downscale
2663 #if ARCH_X86 && CONFIG_GPL
2664 #if HAVE_MMX2
2665 int i;
2666 #if defined(PIC)
2667 uint64_t ebxsave __attribute__((aligned(8)));
2668 #endif
2669 if (canMMX2BeUsed)
2671 __asm__ volatile(
2672 #if defined(PIC)
2673 "mov %%"REG_b", %6 \n\t"
2674 #endif
2675 "pxor %%mm7, %%mm7 \n\t"
2676 "mov %0, %%"REG_c" \n\t"
2677 "mov %1, %%"REG_D" \n\t"
2678 "mov %2, %%"REG_d" \n\t"
2679 "mov %3, %%"REG_b" \n\t"
2680 "xor %%"REG_a", %%"REG_a" \n\t" // i
2681 PREFETCH" (%%"REG_c") \n\t"
2682 PREFETCH" 32(%%"REG_c") \n\t"
2683 PREFETCH" 64(%%"REG_c") \n\t"
2685 #if ARCH_X86_64
2687 #define FUNNY_UV_CODE \
2688 "movl (%%"REG_b"), %%esi \n\t"\
2689 "call *%4 \n\t"\
2690 "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
2691 "add %%"REG_S", %%"REG_c" \n\t"\
2692 "add %%"REG_a", %%"REG_D" \n\t"\
2693 "xor %%"REG_a", %%"REG_a" \n\t"\
2695 #else
2697 #define FUNNY_UV_CODE \
2698 "movl (%%"REG_b"), %%esi \n\t"\
2699 "call *%4 \n\t"\
2700 "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
2701 "add %%"REG_a", %%"REG_D" \n\t"\
2702 "xor %%"REG_a", %%"REG_a" \n\t"\
2704 #endif /* ARCH_X86_64 */
2706 FUNNY_UV_CODE
2707 FUNNY_UV_CODE
2708 FUNNY_UV_CODE
2709 FUNNY_UV_CODE
2710 "xor %%"REG_a", %%"REG_a" \n\t" // i
2711 "mov %5, %%"REG_c" \n\t" // src
2712 "mov %1, %%"REG_D" \n\t" // buf1
2713 "add $"AV_STRINGIFY(VOF)", %%"REG_D" \n\t"
2714 PREFETCH" (%%"REG_c") \n\t"
2715 PREFETCH" 32(%%"REG_c") \n\t"
2716 PREFETCH" 64(%%"REG_c") \n\t"
2718 FUNNY_UV_CODE
2719 FUNNY_UV_CODE
2720 FUNNY_UV_CODE
2721 FUNNY_UV_CODE
2723 #if defined(PIC)
2724 "mov %6, %%"REG_b" \n\t"
2725 #endif
2726 :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
2727 "m" (funnyUVCode), "m" (src2)
2728 #if defined(PIC)
2729 ,"m" (ebxsave)
2730 #endif
2731 : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2732 #if !defined(PIC)
2733 ,"%"REG_b
2734 #endif
2736 for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
2738 //printf("%d %d %d\n", dstWidth, i, srcW);
2739 dst[i] = src1[srcW-1]*128;
2740 dst[i+VOFW] = src2[srcW-1]*128;
2743 else
2745 #endif /* HAVE_MMX2 */
2746 x86_reg xInc_shr16 = (x86_reg) (xInc >> 16);
2747 uint16_t xInc_mask = xInc & 0xffff;
2748 __asm__ volatile(
2749 "xor %%"REG_a", %%"REG_a" \n\t" // i
2750 "xor %%"REG_d", %%"REG_d" \n\t" // xx
2751 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
2752 ASMALIGN(4)
2753 "1: \n\t"
2754 "mov %0, %%"REG_S" \n\t"
2755 "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
2756 "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
2757 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
2758 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
2759 "shll $16, %%edi \n\t"
2760 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2761 "mov %1, %%"REG_D" \n\t"
2762 "shrl $9, %%esi \n\t"
2763 "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
2765 "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
2766 "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
2767 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
2768 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
2769 "shll $16, %%edi \n\t"
2770 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
2771 "mov %1, %%"REG_D" \n\t"
2772 "shrl $9, %%esi \n\t"
2773 "movw %%si, "AV_STRINGIFY(VOF)"(%%"REG_D", %%"REG_a", 2) \n\t"
2775 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
2776 "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
2777 "add $1, %%"REG_a" \n\t"
2778 "cmp %2, %%"REG_a" \n\t"
2779 " jb 1b \n\t"
2781 /* GCC 3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
2782 which is needed to support GCC 4.0. */
2783 #if ARCH_X86_64 && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
2784 :: "m" (src1), "m" (dst), "g" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
2785 #else
2786 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
2787 #endif
2788 "r" (src2)
2789 : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
2791 #if HAVE_MMX2
2792 } //if MMX2 can't be used
2793 #endif
2794 #else
2795 RENAME(hcscale_fast)(c, dst, dstWidth, src1, src2, srcW, xInc);
2796 #endif /* ARCH_X86 */
2798 if(c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){
2799 int i;
2800 //FIXME all pal and rgb srcFormats could do this convertion as well
2801 //FIXME all scalers more complex than bilinear could do half of this transform
2802 if(c->srcRange){
2803 for (i=0; i<dstWidth; i++){
2804 dst[i ]= (dst[i ]*1799 + 4081085)>>11; //1469
2805 dst[i+VOFW]= (dst[i+VOFW]*1799 + 4081085)>>11; //1469
2807 }else{
2808 for (i=0; i<dstWidth; i++){
2809 dst[i ]= (FFMIN(dst[i ],30775)*4663 - 9289992)>>12; //-264
2810 dst[i+VOFW]= (FFMIN(dst[i+VOFW],30775)*4663 - 9289992)>>12; //-264
2816 static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
2817 int srcSliceH, uint8_t* dst[], int dstStride[]){
2819 /* load a few things into local vars to make the code more readable? and faster */
2820 const int srcW= c->srcW;
2821 const int dstW= c->dstW;
2822 const int dstH= c->dstH;
2823 const int chrDstW= c->chrDstW;
2824 const int chrSrcW= c->chrSrcW;
2825 const int lumXInc= c->lumXInc;
2826 const int chrXInc= c->chrXInc;
2827 const int dstFormat= c->dstFormat;
2828 const int srcFormat= c->srcFormat;
2829 const int flags= c->flags;
2830 int16_t *vLumFilterPos= c->vLumFilterPos;
2831 int16_t *vChrFilterPos= c->vChrFilterPos;
2832 int16_t *hLumFilterPos= c->hLumFilterPos;
2833 int16_t *hChrFilterPos= c->hChrFilterPos;
2834 int16_t *vLumFilter= c->vLumFilter;
2835 int16_t *vChrFilter= c->vChrFilter;
2836 int16_t *hLumFilter= c->hLumFilter;
2837 int16_t *hChrFilter= c->hChrFilter;
2838 int32_t *lumMmxFilter= c->lumMmxFilter;
2839 int32_t *chrMmxFilter= c->chrMmxFilter;
2840 int32_t *alpMmxFilter= c->alpMmxFilter;
2841 const int vLumFilterSize= c->vLumFilterSize;
2842 const int vChrFilterSize= c->vChrFilterSize;
2843 const int hLumFilterSize= c->hLumFilterSize;
2844 const int hChrFilterSize= c->hChrFilterSize;
2845 int16_t **lumPixBuf= c->lumPixBuf;
2846 int16_t **chrPixBuf= c->chrPixBuf;
2847 int16_t **alpPixBuf= c->alpPixBuf;
2848 const int vLumBufSize= c->vLumBufSize;
2849 const int vChrBufSize= c->vChrBufSize;
2850 uint8_t *formatConvBuffer= c->formatConvBuffer;
2851 const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
2852 const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
2853 int lastDstY;
2854 uint32_t *pal=c->pal_yuv;
2856 /* vars which will change and which we need to store back in the context */
2857 int dstY= c->dstY;
2858 int lumBufIndex= c->lumBufIndex;
2859 int chrBufIndex= c->chrBufIndex;
2860 int lastInLumBuf= c->lastInLumBuf;
2861 int lastInChrBuf= c->lastInChrBuf;
2863 if (isPacked(c->srcFormat)){
2864 src[0]=
2865 src[1]=
2866 src[2]=
2867 src[3]= src[0];
2868 srcStride[0]=
2869 srcStride[1]=
2870 srcStride[2]=
2871 srcStride[3]= srcStride[0];
2873 srcStride[1]<<= c->vChrDrop;
2874 srcStride[2]<<= c->vChrDrop;
2876 //printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
2877 // (int)dst[0], (int)dst[1], (int)dst[2]);
2879 #if 0 //self test FIXME move to a vfilter or something
2881 static volatile int i=0;
2882 i++;
2883 if (srcFormat==PIX_FMT_YUV420P && i==1 && srcSliceH>= c->srcH)
2884 selfTest(src, srcStride, c->srcW, c->srcH);
2885 i--;
2887 #endif
2889 //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
2890 //dstStride[0],dstStride[1],dstStride[2]);
2892 if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0 || dstStride[3]%8 != 0)
2894 static int warnedAlready=0; //FIXME move this into the context perhaps
2895 if (flags & SWS_PRINT_INFO && !warnedAlready)
2897 av_log(c, AV_LOG_WARNING, "Warning: dstStride is not aligned!\n"
2898 " ->cannot do aligned memory accesses anymore\n");
2899 warnedAlready=1;
2903 /* Note the user might start scaling the picture in the middle so this
2904 will not get executed. This is not really intended but works
2905 currently, so people might do it. */
2906 if (srcSliceY ==0){
2907 lumBufIndex=0;
2908 chrBufIndex=0;
2909 dstY=0;
2910 lastInLumBuf= -1;
2911 lastInChrBuf= -1;
2914 lastDstY= dstY;
2916 for (;dstY < dstH; dstY++){
2917 unsigned char *dest =dst[0]+dstStride[0]*dstY;
2918 const int chrDstY= dstY>>c->chrDstVSubSample;
2919 unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
2920 unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
2921 unsigned char *aDest=(CONFIG_SWSCALE_ALPHA && alpPixBuf) ? dst[3]+dstStride[3]*dstY : NULL;
2923 const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
2924 const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
2925 const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
2926 const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
2928 //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
2929 // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
2930 //handle holes (FAST_BILINEAR & weird filters)
2931 if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
2932 if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
2933 //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
2934 assert(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1);
2935 assert(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1);
2937 // Do we have enough lines in this slice to output the dstY line
2938 if (lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
2940 //Do horizontal scaling
2941 while(lastInLumBuf < lastLumSrcY)
2943 uint8_t *src1= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
2944 uint8_t *src2= src[3]+(lastInLumBuf + 1 - srcSliceY)*srcStride[3];
2945 lumBufIndex++;
2946 //printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
2947 assert(lumBufIndex < 2*vLumBufSize);
2948 assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
2949 assert(lastInLumBuf + 1 - srcSliceY >= 0);
2950 //printf("%d %d\n", lumBufIndex, vLumBufSize);
2951 RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, src1, srcW, lumXInc,
2952 flags, hLumFilter, hLumFilterPos, hLumFilterSize,
2953 c->srcFormat, formatConvBuffer,
2954 pal, 0);
2955 if (CONFIG_SWSCALE_ALPHA && alpPixBuf)
2956 RENAME(hyscale)(c, alpPixBuf[ lumBufIndex ], dstW, src2, srcW, lumXInc,
2957 flags, hLumFilter, hLumFilterPos, hLumFilterSize,
2958 c->srcFormat, formatConvBuffer,
2959 pal, 1);
2960 lastInLumBuf++;
2962 while(lastInChrBuf < lastChrSrcY)
2964 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
2965 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
2966 chrBufIndex++;
2967 assert(chrBufIndex < 2*vChrBufSize);
2968 assert(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH));
2969 assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
2970 //FIXME replace parameters through context struct (some at least)
2972 if (!(isGray(srcFormat) || isGray(dstFormat)))
2973 RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
2974 flags, hChrFilter, hChrFilterPos, hChrFilterSize,
2975 c->srcFormat, formatConvBuffer,
2976 pal);
2977 lastInChrBuf++;
2979 //wrap buf index around to stay inside the ring buffer
2980 if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
2981 if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
2983 else // not enough lines left in this slice -> load the rest in the buffer
2985 /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
2986 firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
2987 lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
2988 vChrBufSize, vLumBufSize);*/
2990 //Do horizontal scaling
2991 while(lastInLumBuf+1 < srcSliceY + srcSliceH)
2993 uint8_t *src1= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
2994 uint8_t *src2= src[3]+(lastInLumBuf + 1 - srcSliceY)*srcStride[3];
2995 lumBufIndex++;
2996 assert(lumBufIndex < 2*vLumBufSize);
2997 assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
2998 assert(lastInLumBuf + 1 - srcSliceY >= 0);
2999 RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, src1, srcW, lumXInc,
3000 flags, hLumFilter, hLumFilterPos, hLumFilterSize,
3001 c->srcFormat, formatConvBuffer,
3002 pal, 0);
3003 if (CONFIG_SWSCALE_ALPHA && alpPixBuf)
3004 RENAME(hyscale)(c, alpPixBuf[ lumBufIndex ], dstW, src2, srcW, lumXInc,
3005 flags, hLumFilter, hLumFilterPos, hLumFilterSize,
3006 c->srcFormat, formatConvBuffer,
3007 pal, 1);
3008 lastInLumBuf++;
3010 while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
3012 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
3013 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
3014 chrBufIndex++;
3015 assert(chrBufIndex < 2*vChrBufSize);
3016 assert(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH);
3017 assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
3019 if (!(isGray(srcFormat) || isGray(dstFormat)))
3020 RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
3021 flags, hChrFilter, hChrFilterPos, hChrFilterSize,
3022 c->srcFormat, formatConvBuffer,
3023 pal);
3024 lastInChrBuf++;
3026 //wrap buf index around to stay inside the ring buffer
3027 if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
3028 if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
3029 break; //we can't output a dstY line so let's try with the next slice
3032 #if HAVE_MMX
3033 c->blueDither= ff_dither8[dstY&1];
3034 if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555)
3035 c->greenDither= ff_dither8[dstY&1];
3036 else
3037 c->greenDither= ff_dither4[dstY&1];
3038 c->redDither= ff_dither8[(dstY+1)&1];
3039 #endif
3040 if (dstY < dstH-2)
3042 int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
3043 int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
3044 int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
3045 #if HAVE_MMX
3046 int i;
3047 if (flags & SWS_ACCURATE_RND){
3048 int s= APCK_SIZE / 8;
3049 for (i=0; i<vLumFilterSize; i+=2){
3050 *(void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ];
3051 *(void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)];
3052 lumMmxFilter[s*i+APCK_COEF/4 ]=
3053 lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ]
3054 + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
3055 if (CONFIG_SWSCALE_ALPHA && alpPixBuf){
3056 *(void**)&alpMmxFilter[s*i ]= alpSrcPtr[i ];
3057 *(void**)&alpMmxFilter[s*i+APCK_PTR2/4 ]= alpSrcPtr[i+(vLumFilterSize>1)];
3058 alpMmxFilter[s*i+APCK_COEF/4 ]=
3059 alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4 ];
3062 for (i=0; i<vChrFilterSize; i+=2){
3063 *(void**)&chrMmxFilter[s*i ]= chrSrcPtr[i ];
3064 *(void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrSrcPtr[i+(vChrFilterSize>1)];
3065 chrMmxFilter[s*i+APCK_COEF/4 ]=
3066 chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ]
3067 + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
3069 }else{
3070 for (i=0; i<vLumFilterSize; i++)
3072 lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
3073 lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
3074 lumMmxFilter[4*i+2]=
3075 lumMmxFilter[4*i+3]=
3076 ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
3077 if (CONFIG_SWSCALE_ALPHA && alpPixBuf){
3078 alpMmxFilter[4*i+0]= (int32_t)alpSrcPtr[i];
3079 alpMmxFilter[4*i+1]= (uint64_t)alpSrcPtr[i] >> 32;
3080 alpMmxFilter[4*i+2]=
3081 alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2];
3084 for (i=0; i<vChrFilterSize; i++)
3086 chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
3087 chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
3088 chrMmxFilter[4*i+2]=
3089 chrMmxFilter[4*i+3]=
3090 ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
3093 #endif
3094 if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
3095 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
3096 if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
3097 RENAME(yuv2nv12X)(c,
3098 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
3099 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
3100 dest, uDest, dstW, chrDstW, dstFormat);
3102 else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12 like
3104 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
3105 if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
3106 if (vLumFilterSize == 1 && vChrFilterSize == 1) // unscaled YV12
3108 int16_t *lumBuf = lumPixBuf[0];
3109 int16_t *chrBuf= chrPixBuf[0];
3110 int16_t *alpBuf= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? alpPixBuf[0] : NULL;
3111 RENAME(yuv2yuv1)(c, lumBuf, chrBuf, alpBuf, dest, uDest, vDest, aDest, dstW, chrDstW);
3113 else //General YV12
3115 RENAME(yuv2yuvX)(c,
3116 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
3117 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
3118 alpSrcPtr, dest, uDest, vDest, aDest, dstW, chrDstW);
3121 else
3123 assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
3124 assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
3125 if (vLumFilterSize == 1 && vChrFilterSize == 2) //unscaled RGB
3127 int chrAlpha= vChrFilter[2*dstY+1];
3128 if(flags & SWS_FULL_CHR_H_INT){
3129 yuv2rgbXinC_full(c, //FIXME write a packed1_full function
3130 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
3131 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
3132 alpSrcPtr, dest, dstW, dstY);
3133 }else{
3134 RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
3135 alpPixBuf ? *alpSrcPtr : NULL,
3136 dest, dstW, chrAlpha, dstFormat, flags, dstY);
3139 else if (vLumFilterSize == 2 && vChrFilterSize == 2) //bilinear upscale RGB
3141 int lumAlpha= vLumFilter[2*dstY+1];
3142 int chrAlpha= vChrFilter[2*dstY+1];
3143 lumMmxFilter[2]=
3144 lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
3145 chrMmxFilter[2]=
3146 chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
3147 if(flags & SWS_FULL_CHR_H_INT){
3148 yuv2rgbXinC_full(c, //FIXME write a packed2_full function
3149 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
3150 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
3151 alpSrcPtr, dest, dstW, dstY);
3152 }else{
3153 RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
3154 alpPixBuf ? *alpSrcPtr : NULL, alpPixBuf ? *(alpSrcPtr+1) : NULL,
3155 dest, dstW, lumAlpha, chrAlpha, dstY);
3158 else //general RGB
3160 if(flags & SWS_FULL_CHR_H_INT){
3161 yuv2rgbXinC_full(c,
3162 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
3163 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
3164 alpSrcPtr, dest, dstW, dstY);
3165 }else{
3166 RENAME(yuv2packedX)(c,
3167 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
3168 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
3169 alpSrcPtr, dest, dstW, dstY);
3174 else // hmm looks like we can't use MMX here without overwriting this array's tail
3176 int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
3177 int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
3178 int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
3179 if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
3180 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
3181 if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
3182 yuv2nv12XinC(
3183 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
3184 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
3185 dest, uDest, dstW, chrDstW, dstFormat);
3187 else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12
3189 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
3190 if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
3191 yuv2yuvXinC(
3192 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
3193 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
3194 alpSrcPtr, dest, uDest, vDest, aDest, dstW, chrDstW);
3196 else
3198 assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
3199 assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
3200 if(flags & SWS_FULL_CHR_H_INT){
3201 yuv2rgbXinC_full(c,
3202 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
3203 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
3204 alpSrcPtr, dest, dstW, dstY);
3205 }else{
3206 yuv2packedXinC(c,
3207 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
3208 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
3209 alpSrcPtr, dest, dstW, dstY);
3215 if ((dstFormat == PIX_FMT_YUVA420P) && !alpPixBuf)
3216 fillPlane(dst[3], dstStride[3], dstW, dstY-lastDstY, lastDstY, 255);
3218 #if HAVE_MMX
3219 if (flags & SWS_CPU_CAPS_MMX2 ) __asm__ volatile("sfence":::"memory");
3220 /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
3221 if (flags & SWS_CPU_CAPS_3DNOW) __asm__ volatile("femms" :::"memory");
3222 else __asm__ volatile("emms" :::"memory");
3223 #endif
3224 /* store changed local vars back in the context */
3225 c->dstY= dstY;
3226 c->lumBufIndex= lumBufIndex;
3227 c->chrBufIndex= chrBufIndex;
3228 c->lastInLumBuf= lastInLumBuf;
3229 c->lastInChrBuf= lastInChrBuf;
3231 return dstY - lastDstY;