vo_gl: fix image corruption with PBOs when playing 10 bit video
[mplayer.git] / libvo / osd_template.c
blob6d8305a3c5a8dc347d73aaf83ad0f85e19a4ee27
1 /*
2 * generic alpha renderers for all YUV modes and RGB depths
3 * Optimized by Nick and Michael.
5 * This file is part of MPlayer.
7 * MPlayer is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * MPlayer is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with MPlayer; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #undef PREFETCH
23 #undef EMMS
24 #undef PREFETCHW
25 #undef PAVGB
27 #if HAVE_AMD3DNOW
28 #define PREFETCH "prefetch"
29 #define PREFETCHW "prefetchw"
30 #define PAVGB "pavgusb"
31 #elif HAVE_MMX2
32 #define PREFETCH "prefetchnta"
33 #define PREFETCHW "prefetcht0"
34 #define PAVGB "pavgb"
35 #else
36 #define PREFETCH " # nop"
37 #define PREFETCHW " # nop"
38 #endif
40 #if HAVE_AMD3DNOW
41 /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
42 #define EMMS "femms"
43 #else
44 #define EMMS "emms"
45 #endif
47 static inline void RENAME(vo_draw_alpha_yv12)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
48 int y;
49 #if defined(FAST_OSD) && !HAVE_MMX
50 w=w>>1;
51 #endif
52 #if HAVE_MMX
53 __asm__ volatile(
54 "pcmpeqb %%mm5, %%mm5\n\t" // F..F
55 "movq %%mm5, %%mm4\n\t"
56 "movq %%mm5, %%mm7\n\t"
57 "psllw $8, %%mm5\n\t" //FF00FF00FF00
58 "psrlw $8, %%mm4\n\t" //00FF00FF00FF
59 ::);
60 #endif
61 for(y=0;y<h;y++){
62 register int x;
63 #if HAVE_MMX
64 __asm__ volatile(
65 PREFETCHW" %0\n\t"
66 PREFETCH" %1\n\t"
67 PREFETCH" %2\n\t"
68 ::"m"(*dstbase),"m"(*srca),"m"(*src):"memory");
69 for(x=0;x<w;x+=8){
70 __asm__ volatile(
71 "movl %1, %%eax\n\t"
72 "orl 4%1, %%eax\n\t"
73 " jz 1f\n\t"
74 PREFETCHW" 32%0\n\t"
75 PREFETCH" 32%1\n\t"
76 PREFETCH" 32%2\n\t"
77 "movq %0, %%mm0\n\t" // dstbase
78 "movq %%mm0, %%mm1\n\t"
79 "pand %%mm4, %%mm0\n\t" //0Y0Y0Y0Y
80 "psrlw $8, %%mm1\n\t" //0Y0Y0Y0Y
81 "movq %1, %%mm2\n\t" //srca HGFEDCBA
82 "paddb %%mm7, %%mm2\n\t"
83 "movq %%mm2, %%mm3\n\t"
84 "pand %%mm4, %%mm2\n\t" //0G0E0C0A
85 "psrlw $8, %%mm3\n\t" //0H0F0D0B
86 "pmullw %%mm2, %%mm0\n\t"
87 "pmullw %%mm3, %%mm1\n\t"
88 "psrlw $8, %%mm0\n\t"
89 "pand %%mm5, %%mm1\n\t"
90 "por %%mm1, %%mm0\n\t"
91 "paddb %2, %%mm0\n\t"
92 "movq %%mm0, %0\n\t"
93 "1:\n\t"
94 :: "m" (dstbase[x]), "m" (srca[x]), "m" (src[x])
95 : "%eax");
97 #else
98 for(x=0;x<w;x++){
99 #ifdef FAST_OSD
100 if(srca[2*x+0]) dstbase[2*x+0]=src[2*x+0];
101 if(srca[2*x+1]) dstbase[2*x+1]=src[2*x+1];
102 #else
103 if(srca[x]) dstbase[x]=((dstbase[x]*srca[x])>>8)+src[x];
104 #endif
106 #endif
107 src+=srcstride;
108 srca+=srcstride;
109 dstbase+=dststride;
111 #if HAVE_MMX
112 __asm__ volatile(EMMS:::"memory");
113 #endif
114 return;
117 static inline void RENAME(vo_draw_alpha_yuy2)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
118 int y;
119 #if defined(FAST_OSD) && !HAVE_MMX
120 w=w>>1;
121 #endif
122 #if HAVE_MMX
123 __asm__ volatile(
124 "pxor %%mm7, %%mm7\n\t"
125 "pcmpeqb %%mm5, %%mm5\n\t" // F..F
126 "movq %%mm5, %%mm6\n\t"
127 "movq %%mm5, %%mm4\n\t"
128 "psllw $8, %%mm5\n\t" //FF00FF00FF00
129 "psrlw $8, %%mm4\n\t" //00FF00FF00FF
130 ::);
131 #endif
132 for(y=0;y<h;y++){
133 register int x;
134 #if HAVE_MMX
135 __asm__ volatile(
136 PREFETCHW" %0\n\t"
137 PREFETCH" %1\n\t"
138 PREFETCH" %2\n\t"
139 ::"m"(*dstbase),"m"(*srca),"m"(*src));
140 for(x=0;x<w;x+=4){
141 __asm__ volatile(
142 "movl %1, %%eax\n\t"
143 "orl %%eax, %%eax\n\t"
144 " jz 1f\n\t"
145 PREFETCHW" 32%0\n\t"
146 PREFETCH" 32%1\n\t"
147 PREFETCH" 32%2\n\t"
148 "movq %0, %%mm0\n\t" // dstbase
149 "movq %%mm0, %%mm1\n\t"
150 "pand %%mm4, %%mm0\n\t" //0Y0Y0Y0Y
151 "movd %%eax, %%mm2\n\t" //srca 0000DCBA
152 "paddb %%mm6, %%mm2\n\t"
153 "punpcklbw %%mm7, %%mm2\n\t" //srca 0D0C0B0A
154 "pmullw %%mm2, %%mm0\n\t"
155 "psrlw $8, %%mm0\n\t"
156 "pand %%mm5, %%mm1\n\t" //U0V0U0V0
157 "movd %2, %%mm2\n\t" //src 0000DCBA
158 "punpcklbw %%mm7, %%mm2\n\t" //srca 0D0C0B0A
159 "por %%mm1, %%mm0\n\t"
160 "paddb %%mm2, %%mm0\n\t"
161 "movq %%mm0, %0\n\t"
162 "1:\n\t"
163 :: "m" (dstbase[x*2]), "m" (srca[x]), "m" (src[x])
164 : "%eax");
166 #else
167 for(x=0;x<w;x++){
168 #ifdef FAST_OSD
169 if(srca[2*x+0]) dstbase[4*x+0]=src[2*x+0];
170 if(srca[2*x+1]) dstbase[4*x+2]=src[2*x+1];
171 #else
172 if(srca[x]) {
173 dstbase[2*x]=((dstbase[2*x]*srca[x])>>8)+src[x];
174 dstbase[2*x+1]=((((signed)dstbase[2*x+1]-128)*srca[x])>>8)+128;
176 #endif
178 #endif
179 src+=srcstride;
180 srca+=srcstride;
181 dstbase+=dststride;
183 #if HAVE_MMX
184 __asm__ volatile(EMMS:::"memory");
185 #endif
186 return;
189 static inline void RENAME(vo_draw_alpha_uyvy)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
190 int y;
191 #if defined(FAST_OSD)
192 w=w>>1;
193 #endif
194 for(y=0;y<h;y++){
195 register int x;
196 for(x=0;x<w;x++){
197 #ifdef FAST_OSD
198 if(srca[2*x+0]) dstbase[4*x+2]=src[2*x+0];
199 if(srca[2*x+1]) dstbase[4*x+0]=src[2*x+1];
200 #else
201 if(srca[x]) {
202 dstbase[2*x+1]=((dstbase[2*x+1]*srca[x])>>8)+src[x];
203 dstbase[2*x]=((((signed)dstbase[2*x]-128)*srca[x])>>8)+128;
205 #endif
207 src+=srcstride;
208 srca+=srcstride;
209 dstbase+=dststride;
213 static inline void RENAME(vo_draw_alpha_rgb24)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
214 int y;
215 #if HAVE_MMX
216 __asm__ volatile(
217 "pxor %%mm7, %%mm7\n\t"
218 "pcmpeqb %%mm6, %%mm6\n\t" // F..F
219 ::);
220 #endif
221 for(y=0;y<h;y++){
222 register unsigned char *dst = dstbase;
223 register int x;
224 #if ARCH_X86 && (!ARCH_X86_64 || HAVE_MMX)
225 #if HAVE_MMX
226 __asm__ volatile(
227 PREFETCHW" %0\n\t"
228 PREFETCH" %1\n\t"
229 PREFETCH" %2\n\t"
230 ::"m"(*dst),"m"(*srca),"m"(*src):"memory");
231 for(x=0;x<w;x+=2){
232 if(srca[x] || srca[x+1])
233 __asm__ volatile(
234 PREFETCHW" 32%0\n\t"
235 PREFETCH" 32%1\n\t"
236 PREFETCH" 32%2\n\t"
237 "movq %0, %%mm0\n\t" // dstbase
238 "movq %%mm0, %%mm1\n\t"
239 "movq %%mm0, %%mm5\n\t"
240 "punpcklbw %%mm7, %%mm0\n\t"
241 "punpckhbw %%mm7, %%mm1\n\t"
242 "movd %1, %%mm2\n\t" // srca ABCD0000
243 "paddb %%mm6, %%mm2\n\t"
244 "punpcklbw %%mm2, %%mm2\n\t" // srca AABBCCDD
245 "punpcklbw %%mm2, %%mm2\n\t" // srca AAAABBBB
246 "psrlq $8, %%mm2\n\t" // srca AAABBBB0
247 "movq %%mm2, %%mm3\n\t"
248 "punpcklbw %%mm7, %%mm2\n\t" // srca 0A0A0A0B
249 "punpckhbw %%mm7, %%mm3\n\t" // srca 0B0B0B00
250 "pmullw %%mm2, %%mm0\n\t"
251 "pmullw %%mm3, %%mm1\n\t"
252 "psrlw $8, %%mm0\n\t"
253 "psrlw $8, %%mm1\n\t"
254 "packuswb %%mm1, %%mm0\n\t"
255 "movd %2, %%mm2 \n\t" // src ABCD0000
256 "punpcklbw %%mm2, %%mm2\n\t" // src AABBCCDD
257 "punpcklbw %%mm2, %%mm2\n\t" // src AAAABBBB
258 "psrlq $8, %%mm2\n\t" // src AAABBBB0
259 "paddb %%mm2, %%mm0\n\t"
260 "pand %4, %%mm5\n\t"
261 "pand %3, %%mm0\n\t"
262 "por %%mm0, %%mm5\n\t"
263 "movq %%mm5, %0\n\t"
264 :: "m" (dst[0]), "m" (srca[x]), "m" (src[x]), "m"(mask24hl), "m"(mask24lh));
265 dst += 6;
267 #else /* HAVE_MMX */
268 for(x=0;x<w;x++){
269 if(srca[x]){
270 __asm__ volatile(
271 "movzbl (%0), %%ecx\n\t"
272 "movzbl 1(%0), %%eax\n\t"
274 "imull %1, %%ecx\n\t"
275 "imull %1, %%eax\n\t"
277 "addl %2, %%ecx\n\t"
278 "addl %2, %%eax\n\t"
280 "movb %%ch, (%0)\n\t"
281 "movb %%ah, 1(%0)\n\t"
283 "movzbl 2(%0), %%eax\n\t"
284 "imull %1, %%eax\n\t"
285 "addl %2, %%eax\n\t"
286 "movb %%ah, 2(%0)\n\t"
288 :"D" (dst),
289 "r" ((unsigned)srca[x]),
290 "r" (((unsigned)src[x])<<8)
291 :"%eax", "%ecx"
294 dst += 3;
296 #endif /* !HAVE_MMX */
297 #else /*non x86 arch or x86_64 with MMX disabled */
298 for(x=0;x<w;x++){
299 if(srca[x]){
300 #ifdef FAST_OSD
301 dst[0]=dst[1]=dst[2]=src[x];
302 #else
303 dst[0]=((dst[0]*srca[x])>>8)+src[x];
304 dst[1]=((dst[1]*srca[x])>>8)+src[x];
305 dst[2]=((dst[2]*srca[x])>>8)+src[x];
306 #endif
308 dst+=3; // 24bpp
310 #endif /* arch_x86 */
311 src+=srcstride;
312 srca+=srcstride;
313 dstbase+=dststride;
315 #if HAVE_MMX
316 __asm__ volatile(EMMS:::"memory");
317 #endif
318 return;
321 static inline void RENAME(vo_draw_alpha_rgb32)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
322 int y;
323 #if HAVE_BIGENDIAN
324 dstbase++;
325 #endif
326 #if HAVE_MMX
327 #if HAVE_AMD3DNOW
328 __asm__ volatile(
329 "pxor %%mm7, %%mm7\n\t"
330 "pcmpeqb %%mm6, %%mm6\n\t" // F..F
331 ::);
332 #else /* HAVE_AMD3DNOW */
333 __asm__ volatile(
334 "pxor %%mm7, %%mm7\n\t"
335 "pcmpeqb %%mm5, %%mm5\n\t" // F..F
336 "movq %%mm5, %%mm4\n\t"
337 "psllw $8, %%mm5\n\t" //FF00FF00FF00
338 "psrlw $8, %%mm4\n\t" //00FF00FF00FF
339 ::);
340 #endif /* HAVE_AMD3DNOW */
341 #endif /* HAVE_MMX */
342 for(y=0;y<h;y++){
343 register int x;
344 #if ARCH_X86 && (!ARCH_X86_64 || HAVE_MMX)
345 #if HAVE_MMX
346 #if HAVE_AMD3DNOW
347 __asm__ volatile(
348 PREFETCHW" %0\n\t"
349 PREFETCH" %1\n\t"
350 PREFETCH" %2\n\t"
351 ::"m"(*dstbase),"m"(*srca),"m"(*src):"memory");
352 for(x=0;x<w;x+=2){
353 if(srca[x] || srca[x+1])
354 __asm__ volatile(
355 PREFETCHW" 32%0\n\t"
356 PREFETCH" 32%1\n\t"
357 PREFETCH" 32%2\n\t"
358 "movq %0, %%mm0\n\t" // dstbase
359 "movq %%mm0, %%mm1\n\t"
360 "punpcklbw %%mm7, %%mm0\n\t"
361 "punpckhbw %%mm7, %%mm1\n\t"
362 "movd %1, %%mm2\n\t" // srca ABCD0000
363 "paddb %%mm6, %%mm2\n\t"
364 "punpcklbw %%mm2, %%mm2\n\t" // srca AABBCCDD
365 "punpcklbw %%mm2, %%mm2\n\t" // srca AAAABBBB
366 "movq %%mm2, %%mm3\n\t"
367 "punpcklbw %%mm7, %%mm2\n\t" // srca 0A0A0A0A
368 "punpckhbw %%mm7, %%mm3\n\t" // srca 0B0B0B0B
369 "pmullw %%mm2, %%mm0\n\t"
370 "pmullw %%mm3, %%mm1\n\t"
371 "psrlw $8, %%mm0\n\t"
372 "psrlw $8, %%mm1\n\t"
373 "packuswb %%mm1, %%mm0\n\t"
374 "movd %2, %%mm2 \n\t" // src ABCD0000
375 "punpcklbw %%mm2, %%mm2\n\t" // src AABBCCDD
376 "punpcklbw %%mm2, %%mm2\n\t" // src AAAABBBB
377 "paddb %%mm2, %%mm0\n\t"
378 "movq %%mm0, %0\n\t"
379 :: "m" (dstbase[4*x]), "m" (srca[x]), "m" (src[x]));
381 #else //this is faster for intels crap
382 __asm__ volatile(
383 PREFETCHW" %0\n\t"
384 PREFETCH" %1\n\t"
385 PREFETCH" %2\n\t"
386 ::"m"(*dstbase),"m"(*srca),"m"(*src):"memory");
387 for(x=0;x<w;x+=4){
388 __asm__ volatile(
389 "movl %1, %%eax\n\t"
390 "orl %%eax, %%eax\n\t"
391 " jz 1f\n\t"
392 PREFETCHW" 32%0\n\t"
393 PREFETCH" 32%1\n\t"
394 PREFETCH" 32%2\n\t"
395 "movq %0, %%mm0\n\t" // dstbase
396 "movq %%mm0, %%mm1\n\t"
397 "pand %%mm4, %%mm0\n\t" //0R0B0R0B
398 "psrlw $8, %%mm1\n\t" //0?0G0?0G
399 "movd %%eax, %%mm2\n\t" //srca 0000DCBA
400 "paddb %3, %%mm2\n\t"
401 "punpcklbw %%mm2, %%mm2\n\t" //srca DDCCBBAA
402 "movq %%mm2, %%mm3\n\t"
403 "punpcklbw %%mm7, %%mm2\n\t" //srca 0B0B0A0A
404 "pmullw %%mm2, %%mm0\n\t"
405 "pmullw %%mm2, %%mm1\n\t"
406 "psrlw $8, %%mm0\n\t"
407 "pand %%mm5, %%mm1\n\t"
408 "por %%mm1, %%mm0\n\t"
409 "movd %2, %%mm2 \n\t" //src 0000DCBA
410 "punpcklbw %%mm2, %%mm2\n\t" //src DDCCBBAA
411 "movq %%mm2, %%mm6\n\t"
412 "punpcklbw %%mm2, %%mm2\n\t" //src BBBBAAAA
413 "paddb %%mm2, %%mm0\n\t"
414 "movq %%mm0, %0\n\t"
416 "movq 8%0, %%mm0\n\t" // dstbase
417 "movq %%mm0, %%mm1\n\t"
418 "pand %%mm4, %%mm0\n\t" //0R0B0R0B
419 "psrlw $8, %%mm1\n\t" //0?0G0?0G
420 "punpckhbw %%mm7, %%mm3\n\t" //srca 0D0D0C0C
421 "pmullw %%mm3, %%mm0\n\t"
422 "pmullw %%mm3, %%mm1\n\t"
423 "psrlw $8, %%mm0\n\t"
424 "pand %%mm5, %%mm1\n\t"
425 "por %%mm1, %%mm0\n\t"
426 "punpckhbw %%mm6, %%mm6\n\t" //src DDDDCCCC
427 "paddb %%mm6, %%mm0\n\t"
428 "movq %%mm0, 8%0\n\t"
429 "1:\n\t"
430 :: "m" (dstbase[4*x]), "m" (srca[x]), "m" (src[x]), "m" (bFF)
431 : "%eax");
433 #endif
434 #else /* HAVE_MMX */
435 for(x=0;x<w;x++){
436 if(srca[x]){
437 __asm__ volatile(
438 "movzbl (%0), %%ecx\n\t"
439 "movzbl 1(%0), %%eax\n\t"
440 "movzbl 2(%0), %%edx\n\t"
442 "imull %1, %%ecx\n\t"
443 "imull %1, %%eax\n\t"
444 "imull %1, %%edx\n\t"
446 "addl %2, %%ecx\n\t"
447 "addl %2, %%eax\n\t"
448 "addl %2, %%edx\n\t"
450 "movb %%ch, (%0)\n\t"
451 "movb %%ah, 1(%0)\n\t"
452 "movb %%dh, 2(%0)\n\t"
455 :"r" (&dstbase[4*x]),
456 "r" ((unsigned)srca[x]),
457 "r" (((unsigned)src[x])<<8)
458 :"%eax", "%ecx", "%edx"
462 #endif /* HAVE_MMX */
463 #else /*non x86 arch or x86_64 with MMX disabled */
464 for(x=0;x<w;x++){
465 if(srca[x]){
466 #ifdef FAST_OSD
467 dstbase[4*x+0]=dstbase[4*x+1]=dstbase[4*x+2]=src[x];
468 #else
469 dstbase[4*x+0]=((dstbase[4*x+0]*srca[x])>>8)+src[x];
470 dstbase[4*x+1]=((dstbase[4*x+1]*srca[x])>>8)+src[x];
471 dstbase[4*x+2]=((dstbase[4*x+2]*srca[x])>>8)+src[x];
472 #endif
475 #endif /* arch_x86 */
476 src+=srcstride;
477 srca+=srcstride;
478 dstbase+=dststride;
480 #if HAVE_MMX
481 __asm__ volatile(EMMS:::"memory");
482 #endif
483 return;