VO, AO: remove obsolete/problematic VO/AO drivers
[mplayer.git] / libvo / osd_template.c
blobae88af7d4a40d4f11cf7b2cc79dc6b6f169e403e
1 /*
2 * generic alpha renderers for all YUV modes and RGB depths
3 * Optimized by Nick and Michael.
5 * This file is part of MPlayer.
7 * MPlayer is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * MPlayer is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with MPlayer; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #undef PREFETCH
23 #undef EMMS
24 #undef PREFETCHW
25 #undef PAVGB
27 #if HAVE_AMD3DNOW
28 #define PREFETCH "prefetch"
29 #define PREFETCHW "prefetchw"
30 #define PAVGB "pavgusb"
31 #elif HAVE_MMX2
32 #define PREFETCH "prefetchnta"
33 #define PREFETCHW "prefetcht0"
34 #define PAVGB "pavgb"
35 #else
36 #define PREFETCH " # nop"
37 #define PREFETCHW " # nop"
38 #endif
40 #if HAVE_AMD3DNOW
41 /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
42 #define EMMS "femms"
43 #else
44 #define EMMS "emms"
45 #endif
47 static inline void RENAME(vo_draw_alpha_yv12)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
48 int y;
49 #if defined(FAST_OSD) && !HAVE_MMX
50 w=w>>1;
51 #endif
52 #if HAVE_MMX
53 __asm__ volatile(
54 "pcmpeqb %%mm5, %%mm5\n\t" // F..F
55 "movq %%mm5, %%mm4\n\t"
56 "movq %%mm5, %%mm7\n\t"
57 "psllw $8, %%mm5\n\t" //FF00FF00FF00
58 "psrlw $8, %%mm4\n\t" //00FF00FF00FF
59 ::);
60 #endif
61 for(y=0;y<h;y++){
62 register int x;
63 #if HAVE_MMX
64 __asm__ volatile(
65 PREFETCHW" %0\n\t"
66 PREFETCH" %1\n\t"
67 PREFETCH" %2\n\t"
68 ::"m"(*dstbase),"m"(*srca),"m"(*src):"memory");
69 for(x=0;x<w;x+=8){
70 __asm__ volatile(
71 "movl %1, %%eax\n\t"
72 "orl 4%1, %%eax\n\t"
73 " jz 1f\n\t"
74 PREFETCHW" 32%0\n\t"
75 PREFETCH" 32%1\n\t"
76 PREFETCH" 32%2\n\t"
77 "movq %0, %%mm0\n\t" // dstbase
78 "movq %%mm0, %%mm1\n\t"
79 "pand %%mm4, %%mm0\n\t" //0Y0Y0Y0Y
80 "psrlw $8, %%mm1\n\t" //0Y0Y0Y0Y
81 "movq %1, %%mm2\n\t" //srca HGFEDCBA
82 "paddb %%mm7, %%mm2\n\t"
83 "movq %%mm2, %%mm3\n\t"
84 "pand %%mm4, %%mm2\n\t" //0G0E0C0A
85 "psrlw $8, %%mm3\n\t" //0H0F0D0B
86 "pmullw %%mm2, %%mm0\n\t"
87 "pmullw %%mm3, %%mm1\n\t"
88 "psrlw $8, %%mm0\n\t"
89 "pand %%mm5, %%mm1\n\t"
90 "por %%mm1, %%mm0\n\t"
91 "paddb %2, %%mm0\n\t"
92 "movq %%mm0, %0\n\t"
93 "1:\n\t"
94 :: "m" (dstbase[x]), "m" (srca[x]), "m" (src[x])
95 : "%eax");
97 #else
98 for(x=0;x<w;x++){
99 #ifdef FAST_OSD
100 if(srca[2*x+0]) dstbase[2*x+0]=src[2*x+0];
101 if(srca[2*x+1]) dstbase[2*x+1]=src[2*x+1];
102 #else
103 if(srca[x]) dstbase[x]=((dstbase[x]*srca[x])>>8)+src[x];
104 #endif
106 #endif
107 src+=srcstride;
108 srca+=srcstride;
109 dstbase+=dststride;
111 #if HAVE_MMX
112 __asm__ volatile(EMMS:::"memory");
113 #endif
114 return;
117 static inline void RENAME(vo_draw_alpha_yuy2)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
118 int y;
119 #if defined(FAST_OSD) && !HAVE_MMX
120 w=w>>1;
121 #endif
122 #if HAVE_MMX
123 __asm__ volatile(
124 "pxor %%mm7, %%mm7\n\t"
125 "pcmpeqb %%mm5, %%mm5\n\t" // F..F
126 "movq %%mm5, %%mm6\n\t"
127 "movq %%mm5, %%mm4\n\t"
128 "psllw $8, %%mm5\n\t" //FF00FF00FF00
129 "psrlw $8, %%mm4\n\t" //00FF00FF00FF
130 ::);
131 #endif
132 for(y=0;y<h;y++){
133 register int x;
134 #if HAVE_MMX
135 __asm__ volatile(
136 PREFETCHW" %0\n\t"
137 PREFETCH" %1\n\t"
138 PREFETCH" %2\n\t"
139 ::"m"(*dstbase),"m"(*srca),"m"(*src));
140 for(x=0;x<w;x+=4){
141 __asm__ volatile(
142 "movl %1, %%eax\n\t"
143 "orl %%eax, %%eax\n\t"
144 " jz 1f\n\t"
145 PREFETCHW" 32%0\n\t"
146 PREFETCH" 32%1\n\t"
147 PREFETCH" 32%2\n\t"
148 "movq %0, %%mm0\n\t" // dstbase
149 "movq %%mm0, %%mm1\n\t"
150 "pand %%mm4, %%mm0\n\t" //0Y0Y0Y0Y
151 "movd %%eax, %%mm2\n\t" //srca 0000DCBA
152 "paddb %%mm6, %%mm2\n\t"
153 "punpcklbw %%mm7, %%mm2\n\t" //srca 0D0C0B0A
154 "pmullw %%mm2, %%mm0\n\t"
155 "psrlw $8, %%mm0\n\t"
156 "pand %%mm5, %%mm1\n\t" //U0V0U0V0
157 "movd %2, %%mm2\n\t" //src 0000DCBA
158 "punpcklbw %%mm7, %%mm2\n\t" //srca 0D0C0B0A
159 "por %%mm1, %%mm0\n\t"
160 "paddb %%mm2, %%mm0\n\t"
161 "movq %%mm0, %0\n\t"
162 "1:\n\t"
163 :: "m" (dstbase[x*2]), "m" (srca[x]), "m" (src[x])
164 : "%eax");
166 #else
167 for(x=0;x<w;x++){
168 #ifdef FAST_OSD
169 if(srca[2*x+0]) dstbase[4*x+0]=src[2*x+0];
170 if(srca[2*x+1]) dstbase[4*x+2]=src[2*x+1];
171 #else
172 if(srca[x]) {
173 dstbase[2*x]=((dstbase[2*x]*srca[x])>>8)+src[x];
174 dstbase[2*x+1]=((((signed)dstbase[2*x+1]-128)*srca[x])>>8)+128;
176 #endif
178 #endif
179 src+=srcstride;
180 srca+=srcstride;
181 dstbase+=dststride;
183 #if HAVE_MMX
184 __asm__ volatile(EMMS:::"memory");
185 #endif
186 return;
189 static inline void RENAME(vo_draw_alpha_rgb24)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
190 int y;
191 #if HAVE_MMX
192 __asm__ volatile(
193 "pxor %%mm7, %%mm7\n\t"
194 "pcmpeqb %%mm6, %%mm6\n\t" // F..F
195 ::);
196 #endif
197 for(y=0;y<h;y++){
198 register unsigned char *dst = dstbase;
199 register int x;
200 #if ARCH_X86 && (!ARCH_X86_64 || HAVE_MMX)
201 #if HAVE_MMX
202 __asm__ volatile(
203 PREFETCHW" %0\n\t"
204 PREFETCH" %1\n\t"
205 PREFETCH" %2\n\t"
206 ::"m"(*dst),"m"(*srca),"m"(*src):"memory");
207 for(x=0;x<w;x+=2){
208 if(srca[x] || srca[x+1])
209 __asm__ volatile(
210 PREFETCHW" 32%0\n\t"
211 PREFETCH" 32%1\n\t"
212 PREFETCH" 32%2\n\t"
213 "movq %0, %%mm0\n\t" // dstbase
214 "movq %%mm0, %%mm1\n\t"
215 "movq %%mm0, %%mm5\n\t"
216 "punpcklbw %%mm7, %%mm0\n\t"
217 "punpckhbw %%mm7, %%mm1\n\t"
218 "movd %1, %%mm2\n\t" // srca ABCD0000
219 "paddb %%mm6, %%mm2\n\t"
220 "punpcklbw %%mm2, %%mm2\n\t" // srca AABBCCDD
221 "punpcklbw %%mm2, %%mm2\n\t" // srca AAAABBBB
222 "psrlq $8, %%mm2\n\t" // srca AAABBBB0
223 "movq %%mm2, %%mm3\n\t"
224 "punpcklbw %%mm7, %%mm2\n\t" // srca 0A0A0A0B
225 "punpckhbw %%mm7, %%mm3\n\t" // srca 0B0B0B00
226 "pmullw %%mm2, %%mm0\n\t"
227 "pmullw %%mm3, %%mm1\n\t"
228 "psrlw $8, %%mm0\n\t"
229 "psrlw $8, %%mm1\n\t"
230 "packuswb %%mm1, %%mm0\n\t"
231 "movd %2, %%mm2 \n\t" // src ABCD0000
232 "punpcklbw %%mm2, %%mm2\n\t" // src AABBCCDD
233 "punpcklbw %%mm2, %%mm2\n\t" // src AAAABBBB
234 "psrlq $8, %%mm2\n\t" // src AAABBBB0
235 "paddb %%mm2, %%mm0\n\t"
236 "pand %4, %%mm5\n\t"
237 "pand %3, %%mm0\n\t"
238 "por %%mm0, %%mm5\n\t"
239 "movq %%mm5, %0\n\t"
240 :: "m" (dst[0]), "m" (srca[x]), "m" (src[x]), "m"(mask24hl), "m"(mask24lh));
241 dst += 6;
243 #else /* HAVE_MMX */
244 for(x=0;x<w;x++){
245 if(srca[x]){
246 __asm__ volatile(
247 "movzbl (%0), %%ecx\n\t"
248 "movzbl 1(%0), %%eax\n\t"
250 "imull %1, %%ecx\n\t"
251 "imull %1, %%eax\n\t"
253 "addl %2, %%ecx\n\t"
254 "addl %2, %%eax\n\t"
256 "movb %%ch, (%0)\n\t"
257 "movb %%ah, 1(%0)\n\t"
259 "movzbl 2(%0), %%eax\n\t"
260 "imull %1, %%eax\n\t"
261 "addl %2, %%eax\n\t"
262 "movb %%ah, 2(%0)\n\t"
264 :"D" (dst),
265 "r" ((unsigned)srca[x]),
266 "r" (((unsigned)src[x])<<8)
267 :"%eax", "%ecx"
270 dst += 3;
272 #endif /* !HAVE_MMX */
273 #else /*non x86 arch or x86_64 with MMX disabled */
274 for(x=0;x<w;x++){
275 if(srca[x]){
276 #ifdef FAST_OSD
277 dst[0]=dst[1]=dst[2]=src[x];
278 #else
279 dst[0]=((dst[0]*srca[x])>>8)+src[x];
280 dst[1]=((dst[1]*srca[x])>>8)+src[x];
281 dst[2]=((dst[2]*srca[x])>>8)+src[x];
282 #endif
284 dst+=3; // 24bpp
286 #endif /* arch_x86 */
287 src+=srcstride;
288 srca+=srcstride;
289 dstbase+=dststride;
291 #if HAVE_MMX
292 __asm__ volatile(EMMS:::"memory");
293 #endif
294 return;
297 static inline void RENAME(vo_draw_alpha_rgb32)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
298 int y;
299 #if HAVE_BIGENDIAN
300 dstbase++;
301 #endif
302 #if HAVE_MMX
303 #if HAVE_AMD3DNOW
304 __asm__ volatile(
305 "pxor %%mm7, %%mm7\n\t"
306 "pcmpeqb %%mm6, %%mm6\n\t" // F..F
307 ::);
308 #else /* HAVE_AMD3DNOW */
309 __asm__ volatile(
310 "pxor %%mm7, %%mm7\n\t"
311 "pcmpeqb %%mm5, %%mm5\n\t" // F..F
312 "movq %%mm5, %%mm4\n\t"
313 "psllw $8, %%mm5\n\t" //FF00FF00FF00
314 "psrlw $8, %%mm4\n\t" //00FF00FF00FF
315 ::);
316 #endif /* HAVE_AMD3DNOW */
317 #endif /* HAVE_MMX */
318 for(y=0;y<h;y++){
319 register int x;
320 #if ARCH_X86 && (!ARCH_X86_64 || HAVE_MMX)
321 #if HAVE_MMX
322 #if HAVE_AMD3DNOW
323 __asm__ volatile(
324 PREFETCHW" %0\n\t"
325 PREFETCH" %1\n\t"
326 PREFETCH" %2\n\t"
327 ::"m"(*dstbase),"m"(*srca),"m"(*src):"memory");
328 for(x=0;x<w;x+=2){
329 if(srca[x] || srca[x+1])
330 __asm__ volatile(
331 PREFETCHW" 32%0\n\t"
332 PREFETCH" 32%1\n\t"
333 PREFETCH" 32%2\n\t"
334 "movq %0, %%mm0\n\t" // dstbase
335 "movq %%mm0, %%mm1\n\t"
336 "punpcklbw %%mm7, %%mm0\n\t"
337 "punpckhbw %%mm7, %%mm1\n\t"
338 "movd %1, %%mm2\n\t" // srca ABCD0000
339 "paddb %%mm6, %%mm2\n\t"
340 "punpcklbw %%mm2, %%mm2\n\t" // srca AABBCCDD
341 "punpcklbw %%mm2, %%mm2\n\t" // srca AAAABBBB
342 "movq %%mm2, %%mm3\n\t"
343 "punpcklbw %%mm7, %%mm2\n\t" // srca 0A0A0A0A
344 "punpckhbw %%mm7, %%mm3\n\t" // srca 0B0B0B0B
345 "pmullw %%mm2, %%mm0\n\t"
346 "pmullw %%mm3, %%mm1\n\t"
347 "psrlw $8, %%mm0\n\t"
348 "psrlw $8, %%mm1\n\t"
349 "packuswb %%mm1, %%mm0\n\t"
350 "movd %2, %%mm2 \n\t" // src ABCD0000
351 "punpcklbw %%mm2, %%mm2\n\t" // src AABBCCDD
352 "punpcklbw %%mm2, %%mm2\n\t" // src AAAABBBB
353 "paddb %%mm2, %%mm0\n\t"
354 "movq %%mm0, %0\n\t"
355 :: "m" (dstbase[4*x]), "m" (srca[x]), "m" (src[x]));
357 #else //this is faster for intels crap
358 __asm__ volatile(
359 PREFETCHW" %0\n\t"
360 PREFETCH" %1\n\t"
361 PREFETCH" %2\n\t"
362 ::"m"(*dstbase),"m"(*srca),"m"(*src):"memory");
363 for(x=0;x<w;x+=4){
364 __asm__ volatile(
365 "movl %1, %%eax\n\t"
366 "orl %%eax, %%eax\n\t"
367 " jz 1f\n\t"
368 PREFETCHW" 32%0\n\t"
369 PREFETCH" 32%1\n\t"
370 PREFETCH" 32%2\n\t"
371 "movq %0, %%mm0\n\t" // dstbase
372 "movq %%mm0, %%mm1\n\t"
373 "pand %%mm4, %%mm0\n\t" //0R0B0R0B
374 "psrlw $8, %%mm1\n\t" //0?0G0?0G
375 "movd %%eax, %%mm2\n\t" //srca 0000DCBA
376 "paddb %3, %%mm2\n\t"
377 "punpcklbw %%mm2, %%mm2\n\t" //srca DDCCBBAA
378 "movq %%mm2, %%mm3\n\t"
379 "punpcklbw %%mm7, %%mm2\n\t" //srca 0B0B0A0A
380 "pmullw %%mm2, %%mm0\n\t"
381 "pmullw %%mm2, %%mm1\n\t"
382 "psrlw $8, %%mm0\n\t"
383 "pand %%mm5, %%mm1\n\t"
384 "por %%mm1, %%mm0\n\t"
385 "movd %2, %%mm2 \n\t" //src 0000DCBA
386 "punpcklbw %%mm2, %%mm2\n\t" //src DDCCBBAA
387 "movq %%mm2, %%mm6\n\t"
388 "punpcklbw %%mm2, %%mm2\n\t" //src BBBBAAAA
389 "paddb %%mm2, %%mm0\n\t"
390 "movq %%mm0, %0\n\t"
392 "movq 8%0, %%mm0\n\t" // dstbase
393 "movq %%mm0, %%mm1\n\t"
394 "pand %%mm4, %%mm0\n\t" //0R0B0R0B
395 "psrlw $8, %%mm1\n\t" //0?0G0?0G
396 "punpckhbw %%mm7, %%mm3\n\t" //srca 0D0D0C0C
397 "pmullw %%mm3, %%mm0\n\t"
398 "pmullw %%mm3, %%mm1\n\t"
399 "psrlw $8, %%mm0\n\t"
400 "pand %%mm5, %%mm1\n\t"
401 "por %%mm1, %%mm0\n\t"
402 "punpckhbw %%mm6, %%mm6\n\t" //src DDDDCCCC
403 "paddb %%mm6, %%mm0\n\t"
404 "movq %%mm0, 8%0\n\t"
405 "1:\n\t"
406 :: "m" (dstbase[4*x]), "m" (srca[x]), "m" (src[x]), "m" (bFF)
407 : "%eax");
409 #endif
410 #else /* HAVE_MMX */
411 for(x=0;x<w;x++){
412 if(srca[x]){
413 __asm__ volatile(
414 "movzbl (%0), %%ecx\n\t"
415 "movzbl 1(%0), %%eax\n\t"
416 "movzbl 2(%0), %%edx\n\t"
418 "imull %1, %%ecx\n\t"
419 "imull %1, %%eax\n\t"
420 "imull %1, %%edx\n\t"
422 "addl %2, %%ecx\n\t"
423 "addl %2, %%eax\n\t"
424 "addl %2, %%edx\n\t"
426 "movb %%ch, (%0)\n\t"
427 "movb %%ah, 1(%0)\n\t"
428 "movb %%dh, 2(%0)\n\t"
431 :"r" (&dstbase[4*x]),
432 "r" ((unsigned)srca[x]),
433 "r" (((unsigned)src[x])<<8)
434 :"%eax", "%ecx", "%edx"
438 #endif /* HAVE_MMX */
439 #else /*non x86 arch or x86_64 with MMX disabled */
440 for(x=0;x<w;x++){
441 if(srca[x]){
442 #ifdef FAST_OSD
443 dstbase[4*x+0]=dstbase[4*x+1]=dstbase[4*x+2]=src[x];
444 #else
445 dstbase[4*x+0]=((dstbase[4*x+0]*srca[x])>>8)+src[x];
446 dstbase[4*x+1]=((dstbase[4*x+1]*srca[x])>>8)+src[x];
447 dstbase[4*x+2]=((dstbase[4*x+2]*srca[x])>>8)+src[x];
448 #endif
451 #endif /* arch_x86 */
452 src+=srcstride;
453 srca+=srcstride;
454 dstbase+=dststride;
456 #if HAVE_MMX
457 __asm__ volatile(EMMS:::"memory");
458 #endif
459 return;