Merge svn changes up to r28461
[mplayer.git] / libvo / osd_template.c
blobbc37b9d4604cb4fd4d0cd3ba12e108a9e5e91c65
1 // Generic alpha renderers for all YUV modes and RGB depths.
2 // Optimized by Nick and Michael
3 // Code from Michael Niedermayer (michaelni@gmx.at) is under GPL
5 #undef PREFETCH
6 #undef EMMS
7 #undef PREFETCHW
8 #undef PAVGB
10 #if HAVE_AMD3DNOW
11 #define PREFETCH "prefetch"
12 #define PREFETCHW "prefetchw"
13 #define PAVGB "pavgusb"
14 #elif HAVE_MMX2
15 #define PREFETCH "prefetchnta"
16 #define PREFETCHW "prefetcht0"
17 #define PAVGB "pavgb"
18 #else
19 #define PREFETCH " # nop"
20 #define PREFETCHW " # nop"
21 #endif
23 #if HAVE_AMD3DNOW
24 /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
25 #define EMMS "femms"
26 #else
27 #define EMMS "emms"
28 #endif
30 static inline void RENAME(vo_draw_alpha_yv12)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
31 int y;
32 #if defined(FAST_OSD) && !HAVE_MMX
33 w=w>>1;
34 #endif
35 #if HAVE_MMX
36 __asm__ volatile(
37 "pcmpeqb %%mm5, %%mm5\n\t" // F..F
38 "movq %%mm5, %%mm4\n\t"
39 "movq %%mm5, %%mm7\n\t"
40 "psllw $8, %%mm5\n\t" //FF00FF00FF00
41 "psrlw $8, %%mm4\n\t" //00FF00FF00FF
42 ::);
43 #endif
44 for(y=0;y<h;y++){
45 register int x;
46 #if HAVE_MMX
47 __asm__ volatile(
48 PREFETCHW" %0\n\t"
49 PREFETCH" %1\n\t"
50 PREFETCH" %2\n\t"
51 ::"m"(*dstbase),"m"(*srca),"m"(*src):"memory");
52 for(x=0;x<w;x+=8){
53 __asm__ volatile(
54 "movl %1, %%eax\n\t"
55 "orl 4%1, %%eax\n\t"
56 " jz 1f\n\t"
57 PREFETCHW" 32%0\n\t"
58 PREFETCH" 32%1\n\t"
59 PREFETCH" 32%2\n\t"
60 "movq %0, %%mm0\n\t" // dstbase
61 "movq %%mm0, %%mm1\n\t"
62 "pand %%mm4, %%mm0\n\t" //0Y0Y0Y0Y
63 "psrlw $8, %%mm1\n\t" //0Y0Y0Y0Y
64 "movq %1, %%mm2\n\t" //srca HGFEDCBA
65 "paddb %%mm7, %%mm2\n\t"
66 "movq %%mm2, %%mm3\n\t"
67 "pand %%mm4, %%mm2\n\t" //0G0E0C0A
68 "psrlw $8, %%mm3\n\t" //0H0F0D0B
69 "pmullw %%mm2, %%mm0\n\t"
70 "pmullw %%mm3, %%mm1\n\t"
71 "psrlw $8, %%mm0\n\t"
72 "pand %%mm5, %%mm1\n\t"
73 "por %%mm1, %%mm0\n\t"
74 "paddb %2, %%mm0\n\t"
75 "movq %%mm0, %0\n\t"
76 "1:\n\t"
77 :: "m" (dstbase[x]), "m" (srca[x]), "m" (src[x])
78 : "%eax");
80 #else
81 for(x=0;x<w;x++){
82 #ifdef FAST_OSD
83 if(srca[2*x+0]) dstbase[2*x+0]=src[2*x+0];
84 if(srca[2*x+1]) dstbase[2*x+1]=src[2*x+1];
85 #else
86 if(srca[x]) dstbase[x]=((dstbase[x]*srca[x])>>8)+src[x];
87 #endif
89 #endif
90 src+=srcstride;
91 srca+=srcstride;
92 dstbase+=dststride;
94 #if HAVE_MMX
95 __asm__ volatile(EMMS:::"memory");
96 #endif
97 return;
100 static inline void RENAME(vo_draw_alpha_yuy2)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
101 int y;
102 #if defined(FAST_OSD) && !HAVE_MMX
103 w=w>>1;
104 #endif
105 #if HAVE_MMX
106 __asm__ volatile(
107 "pxor %%mm7, %%mm7\n\t"
108 "pcmpeqb %%mm5, %%mm5\n\t" // F..F
109 "movq %%mm5, %%mm6\n\t"
110 "movq %%mm5, %%mm4\n\t"
111 "psllw $8, %%mm5\n\t" //FF00FF00FF00
112 "psrlw $8, %%mm4\n\t" //00FF00FF00FF
113 ::);
114 #endif
115 for(y=0;y<h;y++){
116 register int x;
117 #if HAVE_MMX
118 __asm__ volatile(
119 PREFETCHW" %0\n\t"
120 PREFETCH" %1\n\t"
121 PREFETCH" %2\n\t"
122 ::"m"(*dstbase),"m"(*srca),"m"(*src));
123 for(x=0;x<w;x+=4){
124 __asm__ volatile(
125 "movl %1, %%eax\n\t"
126 "orl %%eax, %%eax\n\t"
127 " jz 1f\n\t"
128 PREFETCHW" 32%0\n\t"
129 PREFETCH" 32%1\n\t"
130 PREFETCH" 32%2\n\t"
131 "movq %0, %%mm0\n\t" // dstbase
132 "movq %%mm0, %%mm1\n\t"
133 "pand %%mm4, %%mm0\n\t" //0Y0Y0Y0Y
134 "movd %%eax, %%mm2\n\t" //srca 0000DCBA
135 "paddb %%mm6, %%mm2\n\t"
136 "punpcklbw %%mm7, %%mm2\n\t" //srca 0D0C0B0A
137 "pmullw %%mm2, %%mm0\n\t"
138 "psrlw $8, %%mm0\n\t"
139 "pand %%mm5, %%mm1\n\t" //U0V0U0V0
140 "movd %2, %%mm2\n\t" //src 0000DCBA
141 "punpcklbw %%mm7, %%mm2\n\t" //srca 0D0C0B0A
142 "por %%mm1, %%mm0\n\t"
143 "paddb %%mm2, %%mm0\n\t"
144 "movq %%mm0, %0\n\t"
145 "1:\n\t"
146 :: "m" (dstbase[x*2]), "m" (srca[x]), "m" (src[x])
147 : "%eax");
149 #else
150 for(x=0;x<w;x++){
151 #ifdef FAST_OSD
152 if(srca[2*x+0]) dstbase[4*x+0]=src[2*x+0];
153 if(srca[2*x+1]) dstbase[4*x+2]=src[2*x+1];
154 #else
155 if(srca[x]) {
156 dstbase[2*x]=((dstbase[2*x]*srca[x])>>8)+src[x];
157 dstbase[2*x+1]=((((signed)dstbase[2*x+1]-128)*srca[x])>>8)+128;
159 #endif
161 #endif
162 src+=srcstride;
163 srca+=srcstride;
164 dstbase+=dststride;
166 #if HAVE_MMX
167 __asm__ volatile(EMMS:::"memory");
168 #endif
169 return;
172 static inline void RENAME(vo_draw_alpha_uyvy)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
173 int y;
174 #if defined(FAST_OSD)
175 w=w>>1;
176 #endif
177 for(y=0;y<h;y++){
178 register int x;
179 for(x=0;x<w;x++){
180 #ifdef FAST_OSD
181 if(srca[2*x+0]) dstbase[4*x+2]=src[2*x+0];
182 if(srca[2*x+1]) dstbase[4*x+0]=src[2*x+1];
183 #else
184 if(srca[x]) {
185 dstbase[2*x+1]=((dstbase[2*x+1]*srca[x])>>8)+src[x];
186 dstbase[2*x]=((((signed)dstbase[2*x]-128)*srca[x])>>8)+128;
188 #endif
190 src+=srcstride;
191 srca+=srcstride;
192 dstbase+=dststride;
196 static inline void RENAME(vo_draw_alpha_rgb24)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
197 int y;
198 #if HAVE_MMX
199 __asm__ volatile(
200 "pxor %%mm7, %%mm7\n\t"
201 "pcmpeqb %%mm6, %%mm6\n\t" // F..F
202 ::);
203 #endif
204 for(y=0;y<h;y++){
205 register unsigned char *dst = dstbase;
206 register int x;
207 #if ARCH_X86 && (!ARCH_X86_64 || HAVE_MMX)
208 #if HAVE_MMX
209 __asm__ volatile(
210 PREFETCHW" %0\n\t"
211 PREFETCH" %1\n\t"
212 PREFETCH" %2\n\t"
213 ::"m"(*dst),"m"(*srca),"m"(*src):"memory");
214 for(x=0;x<w;x+=2){
215 if(srca[x] || srca[x+1])
216 __asm__ volatile(
217 PREFETCHW" 32%0\n\t"
218 PREFETCH" 32%1\n\t"
219 PREFETCH" 32%2\n\t"
220 "movq %0, %%mm0\n\t" // dstbase
221 "movq %%mm0, %%mm1\n\t"
222 "movq %%mm0, %%mm5\n\t"
223 "punpcklbw %%mm7, %%mm0\n\t"
224 "punpckhbw %%mm7, %%mm1\n\t"
225 "movd %1, %%mm2\n\t" // srca ABCD0000
226 "paddb %%mm6, %%mm2\n\t"
227 "punpcklbw %%mm2, %%mm2\n\t" // srca AABBCCDD
228 "punpcklbw %%mm2, %%mm2\n\t" // srca AAAABBBB
229 "psrlq $8, %%mm2\n\t" // srca AAABBBB0
230 "movq %%mm2, %%mm3\n\t"
231 "punpcklbw %%mm7, %%mm2\n\t" // srca 0A0A0A0B
232 "punpckhbw %%mm7, %%mm3\n\t" // srca 0B0B0B00
233 "pmullw %%mm2, %%mm0\n\t"
234 "pmullw %%mm3, %%mm1\n\t"
235 "psrlw $8, %%mm0\n\t"
236 "psrlw $8, %%mm1\n\t"
237 "packuswb %%mm1, %%mm0\n\t"
238 "movd %2, %%mm2 \n\t" // src ABCD0000
239 "punpcklbw %%mm2, %%mm2\n\t" // src AABBCCDD
240 "punpcklbw %%mm2, %%mm2\n\t" // src AAAABBBB
241 "psrlq $8, %%mm2\n\t" // src AAABBBB0
242 "paddb %%mm2, %%mm0\n\t"
243 "pand %4, %%mm5\n\t"
244 "pand %3, %%mm0\n\t"
245 "por %%mm0, %%mm5\n\t"
246 "movq %%mm5, %0\n\t"
247 :: "m" (dst[0]), "m" (srca[x]), "m" (src[x]), "m"(mask24hl), "m"(mask24lh));
248 dst += 6;
250 #else /* HAVE_MMX */
251 for(x=0;x<w;x++){
252 if(srca[x]){
253 __asm__ volatile(
254 "movzbl (%0), %%ecx\n\t"
255 "movzbl 1(%0), %%eax\n\t"
257 "imull %1, %%ecx\n\t"
258 "imull %1, %%eax\n\t"
260 "addl %2, %%ecx\n\t"
261 "addl %2, %%eax\n\t"
263 "movb %%ch, (%0)\n\t"
264 "movb %%ah, 1(%0)\n\t"
266 "movzbl 2(%0), %%eax\n\t"
267 "imull %1, %%eax\n\t"
268 "addl %2, %%eax\n\t"
269 "movb %%ah, 2(%0)\n\t"
271 :"D" (dst),
272 "r" ((unsigned)srca[x]),
273 "r" (((unsigned)src[x])<<8)
274 :"%eax", "%ecx"
277 dst += 3;
279 #endif /* !HAVE_MMX */
280 #else /*non x86 arch or x86_64 with MMX disabled */
281 for(x=0;x<w;x++){
282 if(srca[x]){
283 #ifdef FAST_OSD
284 dst[0]=dst[1]=dst[2]=src[x];
285 #else
286 dst[0]=((dst[0]*srca[x])>>8)+src[x];
287 dst[1]=((dst[1]*srca[x])>>8)+src[x];
288 dst[2]=((dst[2]*srca[x])>>8)+src[x];
289 #endif
291 dst+=3; // 24bpp
293 #endif /* arch_x86 */
294 src+=srcstride;
295 srca+=srcstride;
296 dstbase+=dststride;
298 #if HAVE_MMX
299 __asm__ volatile(EMMS:::"memory");
300 #endif
301 return;
304 static inline void RENAME(vo_draw_alpha_rgb32)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
305 int y;
306 #ifdef WORDS_BIGENDIAN
307 dstbase++;
308 #endif
309 #if HAVE_MMX
310 #if HAVE_AMD3DNOW
311 __asm__ volatile(
312 "pxor %%mm7, %%mm7\n\t"
313 "pcmpeqb %%mm6, %%mm6\n\t" // F..F
314 ::);
315 #else /* HAVE_AMD3DNOW */
316 __asm__ volatile(
317 "pxor %%mm7, %%mm7\n\t"
318 "pcmpeqb %%mm5, %%mm5\n\t" // F..F
319 "movq %%mm5, %%mm4\n\t"
320 "psllw $8, %%mm5\n\t" //FF00FF00FF00
321 "psrlw $8, %%mm4\n\t" //00FF00FF00FF
322 ::);
323 #endif /* HAVE_AMD3DNOW */
324 #endif /* HAVE_MMX */
325 for(y=0;y<h;y++){
326 register int x;
327 #if ARCH_X86 && (!ARCH_X86_64 || HAVE_MMX)
328 #if HAVE_MMX
329 #if HAVE_AMD3DNOW
330 __asm__ volatile(
331 PREFETCHW" %0\n\t"
332 PREFETCH" %1\n\t"
333 PREFETCH" %2\n\t"
334 ::"m"(*dstbase),"m"(*srca),"m"(*src):"memory");
335 for(x=0;x<w;x+=2){
336 if(srca[x] || srca[x+1])
337 __asm__ volatile(
338 PREFETCHW" 32%0\n\t"
339 PREFETCH" 32%1\n\t"
340 PREFETCH" 32%2\n\t"
341 "movq %0, %%mm0\n\t" // dstbase
342 "movq %%mm0, %%mm1\n\t"
343 "punpcklbw %%mm7, %%mm0\n\t"
344 "punpckhbw %%mm7, %%mm1\n\t"
345 "movd %1, %%mm2\n\t" // srca ABCD0000
346 "paddb %%mm6, %%mm2\n\t"
347 "punpcklbw %%mm2, %%mm2\n\t" // srca AABBCCDD
348 "punpcklbw %%mm2, %%mm2\n\t" // srca AAAABBBB
349 "movq %%mm2, %%mm3\n\t"
350 "punpcklbw %%mm7, %%mm2\n\t" // srca 0A0A0A0A
351 "punpckhbw %%mm7, %%mm3\n\t" // srca 0B0B0B0B
352 "pmullw %%mm2, %%mm0\n\t"
353 "pmullw %%mm3, %%mm1\n\t"
354 "psrlw $8, %%mm0\n\t"
355 "psrlw $8, %%mm1\n\t"
356 "packuswb %%mm1, %%mm0\n\t"
357 "movd %2, %%mm2 \n\t" // src ABCD0000
358 "punpcklbw %%mm2, %%mm2\n\t" // src AABBCCDD
359 "punpcklbw %%mm2, %%mm2\n\t" // src AAAABBBB
360 "paddb %%mm2, %%mm0\n\t"
361 "movq %%mm0, %0\n\t"
362 :: "m" (dstbase[4*x]), "m" (srca[x]), "m" (src[x]));
364 #else //this is faster for intels crap
365 __asm__ volatile(
366 PREFETCHW" %0\n\t"
367 PREFETCH" %1\n\t"
368 PREFETCH" %2\n\t"
369 ::"m"(*dstbase),"m"(*srca),"m"(*src):"memory");
370 for(x=0;x<w;x+=4){
371 __asm__ volatile(
372 "movl %1, %%eax\n\t"
373 "orl %%eax, %%eax\n\t"
374 " jz 1f\n\t"
375 PREFETCHW" 32%0\n\t"
376 PREFETCH" 32%1\n\t"
377 PREFETCH" 32%2\n\t"
378 "movq %0, %%mm0\n\t" // dstbase
379 "movq %%mm0, %%mm1\n\t"
380 "pand %%mm4, %%mm0\n\t" //0R0B0R0B
381 "psrlw $8, %%mm1\n\t" //0?0G0?0G
382 "movd %%eax, %%mm2\n\t" //srca 0000DCBA
383 "paddb %3, %%mm2\n\t"
384 "punpcklbw %%mm2, %%mm2\n\t" //srca DDCCBBAA
385 "movq %%mm2, %%mm3\n\t"
386 "punpcklbw %%mm7, %%mm2\n\t" //srca 0B0B0A0A
387 "pmullw %%mm2, %%mm0\n\t"
388 "pmullw %%mm2, %%mm1\n\t"
389 "psrlw $8, %%mm0\n\t"
390 "pand %%mm5, %%mm1\n\t"
391 "por %%mm1, %%mm0\n\t"
392 "movd %2, %%mm2 \n\t" //src 0000DCBA
393 "punpcklbw %%mm2, %%mm2\n\t" //src DDCCBBAA
394 "movq %%mm2, %%mm6\n\t"
395 "punpcklbw %%mm2, %%mm2\n\t" //src BBBBAAAA
396 "paddb %%mm2, %%mm0\n\t"
397 "movq %%mm0, %0\n\t"
399 "movq 8%0, %%mm0\n\t" // dstbase
400 "movq %%mm0, %%mm1\n\t"
401 "pand %%mm4, %%mm0\n\t" //0R0B0R0B
402 "psrlw $8, %%mm1\n\t" //0?0G0?0G
403 "punpckhbw %%mm7, %%mm3\n\t" //srca 0D0D0C0C
404 "pmullw %%mm3, %%mm0\n\t"
405 "pmullw %%mm3, %%mm1\n\t"
406 "psrlw $8, %%mm0\n\t"
407 "pand %%mm5, %%mm1\n\t"
408 "por %%mm1, %%mm0\n\t"
409 "punpckhbw %%mm6, %%mm6\n\t" //src DDDDCCCC
410 "paddb %%mm6, %%mm0\n\t"
411 "movq %%mm0, 8%0\n\t"
412 "1:\n\t"
413 :: "m" (dstbase[4*x]), "m" (srca[x]), "m" (src[x]), "m" (bFF)
414 : "%eax");
416 #endif
417 #else /* HAVE_MMX */
418 for(x=0;x<w;x++){
419 if(srca[x]){
420 __asm__ volatile(
421 "movzbl (%0), %%ecx\n\t"
422 "movzbl 1(%0), %%eax\n\t"
423 "movzbl 2(%0), %%edx\n\t"
425 "imull %1, %%ecx\n\t"
426 "imull %1, %%eax\n\t"
427 "imull %1, %%edx\n\t"
429 "addl %2, %%ecx\n\t"
430 "addl %2, %%eax\n\t"
431 "addl %2, %%edx\n\t"
433 "movb %%ch, (%0)\n\t"
434 "movb %%ah, 1(%0)\n\t"
435 "movb %%dh, 2(%0)\n\t"
438 :"r" (&dstbase[4*x]),
439 "r" ((unsigned)srca[x]),
440 "r" (((unsigned)src[x])<<8)
441 :"%eax", "%ecx", "%edx"
445 #endif /* HAVE_MMX */
446 #else /*non x86 arch or x86_64 with MMX disabled */
447 for(x=0;x<w;x++){
448 if(srca[x]){
449 #ifdef FAST_OSD
450 dstbase[4*x+0]=dstbase[4*x+1]=dstbase[4*x+2]=src[x];
451 #else
452 dstbase[4*x+0]=((dstbase[4*x+0]*srca[x])>>8)+src[x];
453 dstbase[4*x+1]=((dstbase[4*x+1]*srca[x])>>8)+src[x];
454 dstbase[4*x+2]=((dstbase[4*x+2]*srca[x])>>8)+src[x];
455 #endif
458 #endif /* arch_x86 */
459 src+=srcstride;
460 srca+=srcstride;
461 dstbase+=dststride;
463 #if HAVE_MMX
464 __asm__ volatile(EMMS:::"memory");
465 #endif
466 return;