video: remove lowres support, cut "too slow" message
[mplayer.git] / libvo / osd_template.c
blobb53cb811d3e3093ad0783595883392b0ec1de336
1 /*
2 * generic alpha renderers for all YUV modes and RGB depths
3 * Optimized by Nick and Michael.
5 * This file is part of MPlayer.
7 * MPlayer is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * MPlayer is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with MPlayer; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #undef PREFETCH
23 #undef EMMS
24 #undef PREFETCHW
25 #undef PAVGB
27 #if HAVE_AMD3DNOW
28 #define PREFETCH "prefetch"
29 #define PREFETCHW "prefetchw"
30 #define PAVGB "pavgusb"
31 #elif HAVE_MMX2
32 #define PREFETCH "prefetchnta"
33 #define PREFETCHW "prefetcht0"
34 #define PAVGB "pavgb"
35 #else
36 #define PREFETCH " # nop"
37 #define PREFETCHW " # nop"
38 #endif
40 #if HAVE_AMD3DNOW
41 /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
42 #define EMMS "femms"
43 #else
44 #define EMMS "emms"
45 #endif
47 static inline void RENAME(vo_draw_alpha_yv12)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
48 int y;
49 #if HAVE_MMX
50 __asm__ volatile(
51 "pcmpeqb %%mm5, %%mm5\n\t" // F..F
52 "movq %%mm5, %%mm4\n\t"
53 "movq %%mm5, %%mm7\n\t"
54 "psllw $8, %%mm5\n\t" //FF00FF00FF00
55 "psrlw $8, %%mm4\n\t" //00FF00FF00FF
56 ::);
57 #endif
58 for(y=0;y<h;y++){
59 register int x;
60 #if HAVE_MMX
61 __asm__ volatile(
62 PREFETCHW" %0\n\t"
63 PREFETCH" %1\n\t"
64 PREFETCH" %2\n\t"
65 ::"m"(*dstbase),"m"(*srca),"m"(*src):"memory");
66 for(x=0;x<w;x+=8){
67 __asm__ volatile(
68 "movl %1, %%eax\n\t"
69 "orl 4%1, %%eax\n\t"
70 " jz 1f\n\t"
71 PREFETCHW" 32%0\n\t"
72 PREFETCH" 32%1\n\t"
73 PREFETCH" 32%2\n\t"
74 "movq %0, %%mm0\n\t" // dstbase
75 "movq %%mm0, %%mm1\n\t"
76 "pand %%mm4, %%mm0\n\t" //0Y0Y0Y0Y
77 "psrlw $8, %%mm1\n\t" //0Y0Y0Y0Y
78 "movq %1, %%mm2\n\t" //srca HGFEDCBA
79 "paddb %%mm7, %%mm2\n\t"
80 "movq %%mm2, %%mm3\n\t"
81 "pand %%mm4, %%mm2\n\t" //0G0E0C0A
82 "psrlw $8, %%mm3\n\t" //0H0F0D0B
83 "pmullw %%mm2, %%mm0\n\t"
84 "pmullw %%mm3, %%mm1\n\t"
85 "psrlw $8, %%mm0\n\t"
86 "pand %%mm5, %%mm1\n\t"
87 "por %%mm1, %%mm0\n\t"
88 "paddb %2, %%mm0\n\t"
89 "movq %%mm0, %0\n\t"
90 "1:\n\t"
91 :: "m" (dstbase[x]), "m" (srca[x]), "m" (src[x])
92 : "%eax");
94 #else
95 for(x=0;x<w;x++){
96 if(srca[x]) dstbase[x]=((dstbase[x]*srca[x])>>8)+src[x];
98 #endif
99 src+=srcstride;
100 srca+=srcstride;
101 dstbase+=dststride;
103 #if HAVE_MMX
104 __asm__ volatile(EMMS:::"memory");
105 #endif
106 return;
109 static inline void RENAME(vo_draw_alpha_yuy2)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
110 int y;
111 #if HAVE_MMX
112 __asm__ volatile(
113 "pxor %%mm7, %%mm7\n\t"
114 "pcmpeqb %%mm5, %%mm5\n\t" // F..F
115 "movq %%mm5, %%mm6\n\t"
116 "movq %%mm5, %%mm4\n\t"
117 "psllw $8, %%mm5\n\t" //FF00FF00FF00
118 "psrlw $8, %%mm4\n\t" //00FF00FF00FF
119 ::);
120 #endif
121 for(y=0;y<h;y++){
122 register int x;
123 #if HAVE_MMX
124 __asm__ volatile(
125 PREFETCHW" %0\n\t"
126 PREFETCH" %1\n\t"
127 PREFETCH" %2\n\t"
128 ::"m"(*dstbase),"m"(*srca),"m"(*src));
129 for(x=0;x<w;x+=4){
130 __asm__ volatile(
131 "movl %1, %%eax\n\t"
132 "orl %%eax, %%eax\n\t"
133 " jz 1f\n\t"
134 PREFETCHW" 32%0\n\t"
135 PREFETCH" 32%1\n\t"
136 PREFETCH" 32%2\n\t"
137 "movq %0, %%mm0\n\t" // dstbase
138 "movq %%mm0, %%mm1\n\t"
139 "pand %%mm4, %%mm0\n\t" //0Y0Y0Y0Y
140 "movd %%eax, %%mm2\n\t" //srca 0000DCBA
141 "paddb %%mm6, %%mm2\n\t"
142 "punpcklbw %%mm7, %%mm2\n\t" //srca 0D0C0B0A
143 "pmullw %%mm2, %%mm0\n\t"
144 "psrlw $8, %%mm0\n\t"
145 "pand %%mm5, %%mm1\n\t" //U0V0U0V0
146 "movd %2, %%mm2\n\t" //src 0000DCBA
147 "punpcklbw %%mm7, %%mm2\n\t" //srca 0D0C0B0A
148 "por %%mm1, %%mm0\n\t"
149 "paddb %%mm2, %%mm0\n\t"
150 "movq %%mm0, %0\n\t"
151 "1:\n\t"
152 :: "m" (dstbase[x*2]), "m" (srca[x]), "m" (src[x])
153 : "%eax");
155 #else
156 for(x=0;x<w;x++){
157 if(srca[x]) {
158 dstbase[2*x]=((dstbase[2*x]*srca[x])>>8)+src[x];
159 dstbase[2*x+1]=((((signed)dstbase[2*x+1]-128)*srca[x])>>8)+128;
162 #endif
163 src+=srcstride;
164 srca+=srcstride;
165 dstbase+=dststride;
167 #if HAVE_MMX
168 __asm__ volatile(EMMS:::"memory");
169 #endif
170 return;
173 static inline void RENAME(vo_draw_alpha_rgb24)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
174 int y;
175 #if HAVE_MMX
176 __asm__ volatile(
177 "pxor %%mm7, %%mm7\n\t"
178 "pcmpeqb %%mm6, %%mm6\n\t" // F..F
179 ::);
180 #endif
181 for(y=0;y<h;y++){
182 register unsigned char *dst = dstbase;
183 register int x;
184 #if ARCH_X86 && (!ARCH_X86_64 || HAVE_MMX)
185 #if HAVE_MMX
186 __asm__ volatile(
187 PREFETCHW" %0\n\t"
188 PREFETCH" %1\n\t"
189 PREFETCH" %2\n\t"
190 ::"m"(*dst),"m"(*srca),"m"(*src):"memory");
191 for(x=0;x<w;x+=2){
192 if(srca[x] || srca[x+1])
193 __asm__ volatile(
194 PREFETCHW" 32%0\n\t"
195 PREFETCH" 32%1\n\t"
196 PREFETCH" 32%2\n\t"
197 "movq %0, %%mm0\n\t" // dstbase
198 "movq %%mm0, %%mm1\n\t"
199 "movq %%mm0, %%mm5\n\t"
200 "punpcklbw %%mm7, %%mm0\n\t"
201 "punpckhbw %%mm7, %%mm1\n\t"
202 "movd %1, %%mm2\n\t" // srca ABCD0000
203 "paddb %%mm6, %%mm2\n\t"
204 "punpcklbw %%mm2, %%mm2\n\t" // srca AABBCCDD
205 "punpcklbw %%mm2, %%mm2\n\t" // srca AAAABBBB
206 "psrlq $8, %%mm2\n\t" // srca AAABBBB0
207 "movq %%mm2, %%mm3\n\t"
208 "punpcklbw %%mm7, %%mm2\n\t" // srca 0A0A0A0B
209 "punpckhbw %%mm7, %%mm3\n\t" // srca 0B0B0B00
210 "pmullw %%mm2, %%mm0\n\t"
211 "pmullw %%mm3, %%mm1\n\t"
212 "psrlw $8, %%mm0\n\t"
213 "psrlw $8, %%mm1\n\t"
214 "packuswb %%mm1, %%mm0\n\t"
215 "movd %2, %%mm2 \n\t" // src ABCD0000
216 "punpcklbw %%mm2, %%mm2\n\t" // src AABBCCDD
217 "punpcklbw %%mm2, %%mm2\n\t" // src AAAABBBB
218 "psrlq $8, %%mm2\n\t" // src AAABBBB0
219 "paddb %%mm2, %%mm0\n\t"
220 "pand %4, %%mm5\n\t"
221 "pand %3, %%mm0\n\t"
222 "por %%mm0, %%mm5\n\t"
223 "movq %%mm5, %0\n\t"
224 :: "m" (dst[0]), "m" (srca[x]), "m" (src[x]), "m"(mask24hl), "m"(mask24lh));
225 dst += 6;
227 #else /* HAVE_MMX */
228 for(x=0;x<w;x++){
229 if(srca[x]){
230 __asm__ volatile(
231 "movzbl (%0), %%ecx\n\t"
232 "movzbl 1(%0), %%eax\n\t"
234 "imull %1, %%ecx\n\t"
235 "imull %1, %%eax\n\t"
237 "addl %2, %%ecx\n\t"
238 "addl %2, %%eax\n\t"
240 "movb %%ch, (%0)\n\t"
241 "movb %%ah, 1(%0)\n\t"
243 "movzbl 2(%0), %%eax\n\t"
244 "imull %1, %%eax\n\t"
245 "addl %2, %%eax\n\t"
246 "movb %%ah, 2(%0)\n\t"
248 :"D" (dst),
249 "r" ((unsigned)srca[x]),
250 "r" (((unsigned)src[x])<<8)
251 :"%eax", "%ecx"
254 dst += 3;
256 #endif /* !HAVE_MMX */
257 #else /*non x86 arch or x86_64 with MMX disabled */
258 for(x=0;x<w;x++){
259 if(srca[x]){
260 dst[0]=((dst[0]*srca[x])>>8)+src[x];
261 dst[1]=((dst[1]*srca[x])>>8)+src[x];
262 dst[2]=((dst[2]*srca[x])>>8)+src[x];
264 dst+=3; // 24bpp
266 #endif /* arch_x86 */
267 src+=srcstride;
268 srca+=srcstride;
269 dstbase+=dststride;
271 #if HAVE_MMX
272 __asm__ volatile(EMMS:::"memory");
273 #endif
274 return;
277 static inline void RENAME(vo_draw_alpha_rgb32)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
278 int y;
279 #if HAVE_BIGENDIAN
280 dstbase++;
281 #endif
282 #if HAVE_MMX
283 #if HAVE_AMD3DNOW
284 __asm__ volatile(
285 "pxor %%mm7, %%mm7\n\t"
286 "pcmpeqb %%mm6, %%mm6\n\t" // F..F
287 ::);
288 #else /* HAVE_AMD3DNOW */
289 __asm__ volatile(
290 "pxor %%mm7, %%mm7\n\t"
291 "pcmpeqb %%mm5, %%mm5\n\t" // F..F
292 "movq %%mm5, %%mm4\n\t"
293 "psllw $8, %%mm5\n\t" //FF00FF00FF00
294 "psrlw $8, %%mm4\n\t" //00FF00FF00FF
295 ::);
296 #endif /* HAVE_AMD3DNOW */
297 #endif /* HAVE_MMX */
298 for(y=0;y<h;y++){
299 register int x;
300 #if ARCH_X86 && (!ARCH_X86_64 || HAVE_MMX)
301 #if HAVE_MMX
302 #if HAVE_AMD3DNOW
303 __asm__ volatile(
304 PREFETCHW" %0\n\t"
305 PREFETCH" %1\n\t"
306 PREFETCH" %2\n\t"
307 ::"m"(*dstbase),"m"(*srca),"m"(*src):"memory");
308 for(x=0;x<w;x+=2){
309 if(srca[x] || srca[x+1])
310 __asm__ volatile(
311 PREFETCHW" 32%0\n\t"
312 PREFETCH" 32%1\n\t"
313 PREFETCH" 32%2\n\t"
314 "movq %0, %%mm0\n\t" // dstbase
315 "movq %%mm0, %%mm1\n\t"
316 "punpcklbw %%mm7, %%mm0\n\t"
317 "punpckhbw %%mm7, %%mm1\n\t"
318 "movd %1, %%mm2\n\t" // srca ABCD0000
319 "paddb %%mm6, %%mm2\n\t"
320 "punpcklbw %%mm2, %%mm2\n\t" // srca AABBCCDD
321 "punpcklbw %%mm2, %%mm2\n\t" // srca AAAABBBB
322 "movq %%mm2, %%mm3\n\t"
323 "punpcklbw %%mm7, %%mm2\n\t" // srca 0A0A0A0A
324 "punpckhbw %%mm7, %%mm3\n\t" // srca 0B0B0B0B
325 "pmullw %%mm2, %%mm0\n\t"
326 "pmullw %%mm3, %%mm1\n\t"
327 "psrlw $8, %%mm0\n\t"
328 "psrlw $8, %%mm1\n\t"
329 "packuswb %%mm1, %%mm0\n\t"
330 "movd %2, %%mm2 \n\t" // src ABCD0000
331 "punpcklbw %%mm2, %%mm2\n\t" // src AABBCCDD
332 "punpcklbw %%mm2, %%mm2\n\t" // src AAAABBBB
333 "paddb %%mm2, %%mm0\n\t"
334 "movq %%mm0, %0\n\t"
335 :: "m" (dstbase[4*x]), "m" (srca[x]), "m" (src[x]));
337 #else //this is faster for intels crap
338 __asm__ volatile(
339 PREFETCHW" %0\n\t"
340 PREFETCH" %1\n\t"
341 PREFETCH" %2\n\t"
342 ::"m"(*dstbase),"m"(*srca),"m"(*src):"memory");
343 for(x=0;x<w;x+=4){
344 __asm__ volatile(
345 "movl %1, %%eax\n\t"
346 "orl %%eax, %%eax\n\t"
347 " jz 1f\n\t"
348 PREFETCHW" 32%0\n\t"
349 PREFETCH" 32%1\n\t"
350 PREFETCH" 32%2\n\t"
351 "movq %0, %%mm0\n\t" // dstbase
352 "movq %%mm0, %%mm1\n\t"
353 "pand %%mm4, %%mm0\n\t" //0R0B0R0B
354 "psrlw $8, %%mm1\n\t" //0?0G0?0G
355 "movd %%eax, %%mm2\n\t" //srca 0000DCBA
356 "paddb %3, %%mm2\n\t"
357 "punpcklbw %%mm2, %%mm2\n\t" //srca DDCCBBAA
358 "movq %%mm2, %%mm3\n\t"
359 "punpcklbw %%mm7, %%mm2\n\t" //srca 0B0B0A0A
360 "pmullw %%mm2, %%mm0\n\t"
361 "pmullw %%mm2, %%mm1\n\t"
362 "psrlw $8, %%mm0\n\t"
363 "pand %%mm5, %%mm1\n\t"
364 "por %%mm1, %%mm0\n\t"
365 "movd %2, %%mm2 \n\t" //src 0000DCBA
366 "punpcklbw %%mm2, %%mm2\n\t" //src DDCCBBAA
367 "movq %%mm2, %%mm6\n\t"
368 "punpcklbw %%mm2, %%mm2\n\t" //src BBBBAAAA
369 "paddb %%mm2, %%mm0\n\t"
370 "movq %%mm0, %0\n\t"
372 "movq 8%0, %%mm0\n\t" // dstbase
373 "movq %%mm0, %%mm1\n\t"
374 "pand %%mm4, %%mm0\n\t" //0R0B0R0B
375 "psrlw $8, %%mm1\n\t" //0?0G0?0G
376 "punpckhbw %%mm7, %%mm3\n\t" //srca 0D0D0C0C
377 "pmullw %%mm3, %%mm0\n\t"
378 "pmullw %%mm3, %%mm1\n\t"
379 "psrlw $8, %%mm0\n\t"
380 "pand %%mm5, %%mm1\n\t"
381 "por %%mm1, %%mm0\n\t"
382 "punpckhbw %%mm6, %%mm6\n\t" //src DDDDCCCC
383 "paddb %%mm6, %%mm0\n\t"
384 "movq %%mm0, 8%0\n\t"
385 "1:\n\t"
386 :: "m" (dstbase[4*x]), "m" (srca[x]), "m" (src[x]), "m" (bFF)
387 : "%eax");
389 #endif
390 #else /* HAVE_MMX */
391 for(x=0;x<w;x++){
392 if(srca[x]){
393 __asm__ volatile(
394 "movzbl (%0), %%ecx\n\t"
395 "movzbl 1(%0), %%eax\n\t"
396 "movzbl 2(%0), %%edx\n\t"
398 "imull %1, %%ecx\n\t"
399 "imull %1, %%eax\n\t"
400 "imull %1, %%edx\n\t"
402 "addl %2, %%ecx\n\t"
403 "addl %2, %%eax\n\t"
404 "addl %2, %%edx\n\t"
406 "movb %%ch, (%0)\n\t"
407 "movb %%ah, 1(%0)\n\t"
408 "movb %%dh, 2(%0)\n\t"
411 :"r" (&dstbase[4*x]),
412 "r" ((unsigned)srca[x]),
413 "r" (((unsigned)src[x])<<8)
414 :"%eax", "%ecx", "%edx"
418 #endif /* HAVE_MMX */
419 #else /*non x86 arch or x86_64 with MMX disabled */
420 for(x=0;x<w;x++){
421 if(srca[x]){
422 dstbase[4*x+0]=((dstbase[4*x+0]*srca[x])>>8)+src[x];
423 dstbase[4*x+1]=((dstbase[4*x+1]*srca[x])>>8)+src[x];
424 dstbase[4*x+2]=((dstbase[4*x+2]*srca[x])>>8)+src[x];
427 #endif /* arch_x86 */
428 src+=srcstride;
429 srca+=srcstride;
430 dstbase+=dststride;
432 #if HAVE_MMX
433 __asm__ volatile(EMMS:::"memory");
434 #endif
435 return;