Increase probe buffer size to 32kB, this makes ac3 auto-detection far more reliable.
[mplayer/glamo.git] / libmpcodecs / vf_spp.c
blobebce63601f4ff9494c4a8622e098e767f09f916b
1 /*
2 * Copyright (C) 2003 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of MPlayer.
6 * MPlayer is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * MPlayer is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License along
17 * with MPlayer; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 * This implementation is based on an algorithm described in
23 * "Aria Nosratinia Embedded Post-Processing for
24 * Enhancement of Compressed Images (1999)"
25 * (http://citeseer.nj.nec.com/nosratinia99embedded.html)
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <inttypes.h>
33 #include <math.h>
35 #include "config.h"
37 #include "mp_msg.h"
38 #include "cpudetect.h"
40 #include "libavutil/internal.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavcodec/avcodec.h"
43 #include "libavcodec/dsputil.h"
45 #undef fprintf
46 #undef free
47 #undef malloc
49 #if HAVE_MALLOC_H
50 #include <malloc.h>
51 #endif
53 #include "img_format.h"
54 #include "mp_image.h"
55 #include "vf.h"
56 #include "libvo/fastmemcpy.h"
58 #define XMIN(a,b) ((a) < (b) ? (a) : (b))
60 //===========================================================================//
61 static const uint8_t __attribute__((aligned(8))) dither[8][8]={
62 { 0, 48, 12, 60, 3, 51, 15, 63, },
63 { 32, 16, 44, 28, 35, 19, 47, 31, },
64 { 8, 56, 4, 52, 11, 59, 7, 55, },
65 { 40, 24, 36, 20, 43, 27, 39, 23, },
66 { 2, 50, 14, 62, 1, 49, 13, 61, },
67 { 34, 18, 46, 30, 33, 17, 45, 29, },
68 { 10, 58, 6, 54, 9, 57, 5, 53, },
69 { 42, 26, 38, 22, 41, 25, 37, 21, },
72 static const uint8_t offset[127][2]= {
73 {0,0},
74 {0,0}, {4,4},
75 {0,0}, {2,2}, {6,4}, {4,6},
76 {0,0}, {5,1}, {2,2}, {7,3}, {4,4}, {1,5}, {6,6}, {3,7},
78 {0,0}, {4,0}, {1,1}, {5,1}, {3,2}, {7,2}, {2,3}, {6,3},
79 {0,4}, {4,4}, {1,5}, {5,5}, {3,6}, {7,6}, {2,7}, {6,7},
81 {0,0}, {0,2}, {0,4}, {0,6}, {1,1}, {1,3}, {1,5}, {1,7},
82 {2,0}, {2,2}, {2,4}, {2,6}, {3,1}, {3,3}, {3,5}, {3,7},
83 {4,0}, {4,2}, {4,4}, {4,6}, {5,1}, {5,3}, {5,5}, {5,7},
84 {6,0}, {6,2}, {6,4}, {6,6}, {7,1}, {7,3}, {7,5}, {7,7},
86 {0,0}, {4,4}, {0,4}, {4,0}, {2,2}, {6,6}, {2,6}, {6,2},
87 {0,2}, {4,6}, {0,6}, {4,2}, {2,0}, {6,4}, {2,4}, {6,0},
88 {1,1}, {5,5}, {1,5}, {5,1}, {3,3}, {7,7}, {3,7}, {7,3},
89 {1,3}, {5,7}, {1,7}, {5,3}, {3,1}, {7,5}, {3,5}, {7,1},
90 {0,1}, {4,5}, {0,5}, {4,1}, {2,3}, {6,7}, {2,7}, {6,3},
91 {0,3}, {4,7}, {0,7}, {4,3}, {2,1}, {6,5}, {2,5}, {6,1},
92 {1,0}, {5,4}, {1,4}, {5,0}, {3,2}, {7,6}, {3,6}, {7,2},
93 {1,2}, {5,6}, {1,6}, {5,2}, {3,0}, {7,4}, {3,4}, {7,0},
96 struct vf_priv_s {
97 int log2_count;
98 int qp;
99 int mode;
100 int mpeg2;
101 int temp_stride;
102 uint8_t *src;
103 int16_t *temp;
104 AVCodecContext *avctx;
105 DSPContext dsp;
106 char *non_b_qp;
109 #define SHIFT 22
111 static void hardthresh_c(DCTELEM dst[64], DCTELEM src[64], int qp, uint8_t *permutation){
112 int i;
113 int bias= 0; //FIXME
114 unsigned int threshold1, threshold2;
116 threshold1= qp*((1<<4) - bias) - 1;
117 threshold2= (threshold1<<1);
119 memset(dst, 0, 64*sizeof(DCTELEM));
120 dst[0]= (src[0] + 4)>>3;
122 for(i=1; i<64; i++){
123 int level= src[i];
124 if(((unsigned)(level+threshold1))>threshold2){
125 const int j= permutation[i];
126 dst[j]= (level + 4)>>3;
131 static void softthresh_c(DCTELEM dst[64], DCTELEM src[64], int qp, uint8_t *permutation){
132 int i;
133 int bias= 0; //FIXME
134 unsigned int threshold1, threshold2;
136 threshold1= qp*((1<<4) - bias) - 1;
137 threshold2= (threshold1<<1);
139 memset(dst, 0, 64*sizeof(DCTELEM));
140 dst[0]= (src[0] + 4)>>3;
142 for(i=1; i<64; i++){
143 int level= src[i];
144 if(((unsigned)(level+threshold1))>threshold2){
145 const int j= permutation[i];
146 if(level>0)
147 dst[j]= (level - threshold1 + 4)>>3;
148 else
149 dst[j]= (level + threshold1 + 4)>>3;
154 #if HAVE_MMX
155 static void hardthresh_mmx(DCTELEM dst[64], DCTELEM src[64], int qp, uint8_t *permutation){
156 int bias= 0; //FIXME
157 unsigned int threshold1;
159 threshold1= qp*((1<<4) - bias) - 1;
161 __asm__ volatile(
162 #define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
163 "movq " #src0 ", %%mm0 \n\t"\
164 "movq " #src1 ", %%mm1 \n\t"\
165 "movq " #src2 ", %%mm2 \n\t"\
166 "movq " #src3 ", %%mm3 \n\t"\
167 "psubw %%mm4, %%mm0 \n\t"\
168 "psubw %%mm4, %%mm1 \n\t"\
169 "psubw %%mm4, %%mm2 \n\t"\
170 "psubw %%mm4, %%mm3 \n\t"\
171 "paddusw %%mm5, %%mm0 \n\t"\
172 "paddusw %%mm5, %%mm1 \n\t"\
173 "paddusw %%mm5, %%mm2 \n\t"\
174 "paddusw %%mm5, %%mm3 \n\t"\
175 "paddw %%mm6, %%mm0 \n\t"\
176 "paddw %%mm6, %%mm1 \n\t"\
177 "paddw %%mm6, %%mm2 \n\t"\
178 "paddw %%mm6, %%mm3 \n\t"\
179 "psubusw %%mm6, %%mm0 \n\t"\
180 "psubusw %%mm6, %%mm1 \n\t"\
181 "psubusw %%mm6, %%mm2 \n\t"\
182 "psubusw %%mm6, %%mm3 \n\t"\
183 "psraw $3, %%mm0 \n\t"\
184 "psraw $3, %%mm1 \n\t"\
185 "psraw $3, %%mm2 \n\t"\
186 "psraw $3, %%mm3 \n\t"\
188 "movq %%mm0, %%mm7 \n\t"\
189 "punpcklwd %%mm2, %%mm0 \n\t" /*A*/\
190 "punpckhwd %%mm2, %%mm7 \n\t" /*C*/\
191 "movq %%mm1, %%mm2 \n\t"\
192 "punpcklwd %%mm3, %%mm1 \n\t" /*B*/\
193 "punpckhwd %%mm3, %%mm2 \n\t" /*D*/\
194 "movq %%mm0, %%mm3 \n\t"\
195 "punpcklwd %%mm1, %%mm0 \n\t" /*A*/\
196 "punpckhwd %%mm7, %%mm3 \n\t" /*C*/\
197 "punpcklwd %%mm2, %%mm7 \n\t" /*B*/\
198 "punpckhwd %%mm2, %%mm1 \n\t" /*D*/\
200 "movq %%mm0, " #dst0 " \n\t"\
201 "movq %%mm7, " #dst1 " \n\t"\
202 "movq %%mm3, " #dst2 " \n\t"\
203 "movq %%mm1, " #dst3 " \n\t"
205 "movd %2, %%mm4 \n\t"
206 "movd %3, %%mm5 \n\t"
207 "movd %4, %%mm6 \n\t"
208 "packssdw %%mm4, %%mm4 \n\t"
209 "packssdw %%mm5, %%mm5 \n\t"
210 "packssdw %%mm6, %%mm6 \n\t"
211 "packssdw %%mm4, %%mm4 \n\t"
212 "packssdw %%mm5, %%mm5 \n\t"
213 "packssdw %%mm6, %%mm6 \n\t"
214 REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
215 REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
216 REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
217 REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
218 : : "r" (src), "r" (dst), "g" (threshold1+1), "g" (threshold1+5), "g" (threshold1-4) //FIXME maybe more accurate then needed?
220 dst[0]= (src[0] + 4)>>3;
223 static void softthresh_mmx(DCTELEM dst[64], DCTELEM src[64], int qp, uint8_t *permutation){
224 int bias= 0; //FIXME
225 unsigned int threshold1;
227 threshold1= qp*((1<<4) - bias) - 1;
229 __asm__ volatile(
230 #undef REQUANT_CORE
231 #define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
232 "movq " #src0 ", %%mm0 \n\t"\
233 "movq " #src1 ", %%mm1 \n\t"\
234 "pxor %%mm6, %%mm6 \n\t"\
235 "pxor %%mm7, %%mm7 \n\t"\
236 "pcmpgtw %%mm0, %%mm6 \n\t"\
237 "pcmpgtw %%mm1, %%mm7 \n\t"\
238 "pxor %%mm6, %%mm0 \n\t"\
239 "pxor %%mm7, %%mm1 \n\t"\
240 "psubusw %%mm4, %%mm0 \n\t"\
241 "psubusw %%mm4, %%mm1 \n\t"\
242 "pxor %%mm6, %%mm0 \n\t"\
243 "pxor %%mm7, %%mm1 \n\t"\
244 "movq " #src2 ", %%mm2 \n\t"\
245 "movq " #src3 ", %%mm3 \n\t"\
246 "pxor %%mm6, %%mm6 \n\t"\
247 "pxor %%mm7, %%mm7 \n\t"\
248 "pcmpgtw %%mm2, %%mm6 \n\t"\
249 "pcmpgtw %%mm3, %%mm7 \n\t"\
250 "pxor %%mm6, %%mm2 \n\t"\
251 "pxor %%mm7, %%mm3 \n\t"\
252 "psubusw %%mm4, %%mm2 \n\t"\
253 "psubusw %%mm4, %%mm3 \n\t"\
254 "pxor %%mm6, %%mm2 \n\t"\
255 "pxor %%mm7, %%mm3 \n\t"\
257 "paddsw %%mm5, %%mm0 \n\t"\
258 "paddsw %%mm5, %%mm1 \n\t"\
259 "paddsw %%mm5, %%mm2 \n\t"\
260 "paddsw %%mm5, %%mm3 \n\t"\
261 "psraw $3, %%mm0 \n\t"\
262 "psraw $3, %%mm1 \n\t"\
263 "psraw $3, %%mm2 \n\t"\
264 "psraw $3, %%mm3 \n\t"\
266 "movq %%mm0, %%mm7 \n\t"\
267 "punpcklwd %%mm2, %%mm0 \n\t" /*A*/\
268 "punpckhwd %%mm2, %%mm7 \n\t" /*C*/\
269 "movq %%mm1, %%mm2 \n\t"\
270 "punpcklwd %%mm3, %%mm1 \n\t" /*B*/\
271 "punpckhwd %%mm3, %%mm2 \n\t" /*D*/\
272 "movq %%mm0, %%mm3 \n\t"\
273 "punpcklwd %%mm1, %%mm0 \n\t" /*A*/\
274 "punpckhwd %%mm7, %%mm3 \n\t" /*C*/\
275 "punpcklwd %%mm2, %%mm7 \n\t" /*B*/\
276 "punpckhwd %%mm2, %%mm1 \n\t" /*D*/\
278 "movq %%mm0, " #dst0 " \n\t"\
279 "movq %%mm7, " #dst1 " \n\t"\
280 "movq %%mm3, " #dst2 " \n\t"\
281 "movq %%mm1, " #dst3 " \n\t"
283 "movd %2, %%mm4 \n\t"
284 "movd %3, %%mm5 \n\t"
285 "packssdw %%mm4, %%mm4 \n\t"
286 "packssdw %%mm5, %%mm5 \n\t"
287 "packssdw %%mm4, %%mm4 \n\t"
288 "packssdw %%mm5, %%mm5 \n\t"
289 REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
290 REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
291 REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
292 REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
293 : : "r" (src), "r" (dst), "g" (threshold1), "rm" (4) //FIXME maybe more accurate then needed?
296 dst[0]= (src[0] + 4)>>3;
298 #endif
300 static inline void add_block(int16_t *dst, int stride, DCTELEM block[64]){
301 int y;
303 for(y=0; y<8; y++){
304 *(uint32_t*)&dst[0 + y*stride]+= *(uint32_t*)&block[0 + y*8];
305 *(uint32_t*)&dst[2 + y*stride]+= *(uint32_t*)&block[2 + y*8];
306 *(uint32_t*)&dst[4 + y*stride]+= *(uint32_t*)&block[4 + y*8];
307 *(uint32_t*)&dst[6 + y*stride]+= *(uint32_t*)&block[6 + y*8];
311 static void store_slice_c(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale){
312 int y, x;
314 #define STORE(pos) \
315 temp= ((src[x + y*src_stride + pos]<<log2_scale) + d[pos])>>6;\
316 if(temp & 0x100) temp= ~(temp>>31);\
317 dst[x + y*dst_stride + pos]= temp;
319 for(y=0; y<height; y++){
320 const uint8_t *d= dither[y];
321 for(x=0; x<width; x+=8){
322 int temp;
323 STORE(0);
324 STORE(1);
325 STORE(2);
326 STORE(3);
327 STORE(4);
328 STORE(5);
329 STORE(6);
330 STORE(7);
335 #if HAVE_MMX
336 static void store_slice_mmx(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale){
337 int y;
339 for(y=0; y<height; y++){
340 uint8_t *dst1= dst;
341 int16_t *src1= src;
342 __asm__ volatile(
343 "movq (%3), %%mm3 \n\t"
344 "movq (%3), %%mm4 \n\t"
345 "movd %4, %%mm2 \n\t"
346 "pxor %%mm0, %%mm0 \n\t"
347 "punpcklbw %%mm0, %%mm3 \n\t"
348 "punpckhbw %%mm0, %%mm4 \n\t"
349 "psraw %%mm2, %%mm3 \n\t"
350 "psraw %%mm2, %%mm4 \n\t"
351 "movd %5, %%mm2 \n\t"
352 "1: \n\t"
353 "movq (%0), %%mm0 \n\t"
354 "movq 8(%0), %%mm1 \n\t"
355 "paddw %%mm3, %%mm0 \n\t"
356 "paddw %%mm4, %%mm1 \n\t"
357 "psraw %%mm2, %%mm0 \n\t"
358 "psraw %%mm2, %%mm1 \n\t"
359 "packuswb %%mm1, %%mm0 \n\t"
360 "movq %%mm0, (%1) \n\t"
361 "add $16, %0 \n\t"
362 "add $8, %1 \n\t"
363 "cmp %2, %1 \n\t"
364 " jb 1b \n\t"
365 : "+r" (src1), "+r"(dst1)
366 : "r"(dst + width), "r"(dither[y]), "g"(log2_scale), "g"(6-log2_scale)
368 src += src_stride;
369 dst += dst_stride;
371 // if(width != mmxw)
372 // store_slice_c(dst + mmxw, src + mmxw, dst_stride, src_stride, width - mmxw, log2_scale);
374 #endif
376 static void (*store_slice)(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale)= store_slice_c;
378 static void (*requantize)(DCTELEM dst[64], DCTELEM src[64], int qp, uint8_t *permutation)= hardthresh_c;
380 static void filter(struct vf_priv_s *p, uint8_t *dst, uint8_t *src, int dst_stride, int src_stride, int width, int height, uint8_t *qp_store, int qp_stride, int is_luma){
381 int x, y, i;
382 const int count= 1<<p->log2_count;
383 const int stride= is_luma ? p->temp_stride : ((width+16+15)&(~15));
384 uint64_t __attribute__((aligned(16))) block_align[32];
385 DCTELEM *block = (DCTELEM *)block_align;
386 DCTELEM *block2= (DCTELEM *)(block_align+16);
388 if (!src || !dst) return; // HACK avoid crash for Y8 colourspace
389 for(y=0; y<height; y++){
390 int index= 8 + 8*stride + y*stride;
391 fast_memcpy(p->src + index, src + y*src_stride, width);
392 for(x=0; x<8; x++){
393 p->src[index - x - 1]= p->src[index + x ];
394 p->src[index + width + x ]= p->src[index + width - x - 1];
397 for(y=0; y<8; y++){
398 fast_memcpy(p->src + ( 7-y)*stride, p->src + ( y+8)*stride, stride);
399 fast_memcpy(p->src + (height+8+y)*stride, p->src + (height-y+7)*stride, stride);
401 //FIXME (try edge emu)
403 for(y=0; y<height+8; y+=8){
404 memset(p->temp + (8+y)*stride, 0, 8*stride*sizeof(int16_t));
405 for(x=0; x<width+8; x+=8){
406 const int qps= 3 + is_luma;
407 int qp;
409 if(p->qp)
410 qp= p->qp;
411 else{
412 qp= qp_store[ (XMIN(x, width-1)>>qps) + (XMIN(y, height-1)>>qps) * qp_stride];
413 if(p->mpeg2) qp = FFMAX(1, qp>>1);
415 for(i=0; i<count; i++){
416 const int x1= x + offset[i+count-1][0];
417 const int y1= y + offset[i+count-1][1];
418 const int index= x1 + y1*stride;
419 p->dsp.get_pixels(block, p->src + index, stride);
420 p->dsp.fdct(block);
421 requantize(block2, block, qp, p->dsp.idct_permutation);
422 p->dsp.idct(block2);
423 add_block(p->temp + index, stride, block2);
426 if(y)
427 store_slice(dst + (y-8)*dst_stride, p->temp + 8 + y*stride, dst_stride, stride, width, XMIN(8, height+8-y), 6-p->log2_count);
429 #if 0
430 for(y=0; y<height; y++){
431 for(x=0; x<width; x++){
432 if((((x>>6) ^ (y>>6)) & 1) == 0)
433 dst[x + y*dst_stride]= p->src[8 + 8*stride + x + y*stride];
434 if((x&63) == 0 || (y&63)==0)
435 dst[x + y*dst_stride] += 128;
438 #endif
439 //FIXME reorder for better caching
442 static int config(struct vf_instance_s* vf,
443 int width, int height, int d_width, int d_height,
444 unsigned int flags, unsigned int outfmt){
445 int h= (height+16+15)&(~15);
447 vf->priv->temp_stride= (width+16+15)&(~15);
448 vf->priv->temp= malloc(vf->priv->temp_stride*h*sizeof(int16_t));
449 vf->priv->src = malloc(vf->priv->temp_stride*h*sizeof(uint8_t));
451 return vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
454 static void get_image(struct vf_instance_s* vf, mp_image_t *mpi){
455 if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
456 // ok, we can do pp in-place (or pp disabled):
457 vf->dmpi=vf_get_image(vf->next,mpi->imgfmt,
458 mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height);
459 mpi->planes[0]=vf->dmpi->planes[0];
460 mpi->stride[0]=vf->dmpi->stride[0];
461 mpi->width=vf->dmpi->width;
462 if(mpi->flags&MP_IMGFLAG_PLANAR){
463 mpi->planes[1]=vf->dmpi->planes[1];
464 mpi->planes[2]=vf->dmpi->planes[2];
465 mpi->stride[1]=vf->dmpi->stride[1];
466 mpi->stride[2]=vf->dmpi->stride[2];
468 mpi->flags|=MP_IMGFLAG_DIRECT;
471 static int put_image(struct vf_instance_s* vf, mp_image_t *mpi, double pts){
472 mp_image_t *dmpi;
474 if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
475 // no DR, so get a new image! hope we'll get DR buffer:
476 dmpi=vf_get_image(vf->next,mpi->imgfmt,
477 MP_IMGTYPE_TEMP,
478 MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
479 mpi->width,mpi->height);
480 vf_clone_mpi_attributes(dmpi, mpi);
481 }else{
482 dmpi=vf->dmpi;
485 vf->priv->mpeg2= mpi->qscale_type;
486 if(mpi->pict_type != 3 && mpi->qscale && !vf->priv->qp){
487 if(!vf->priv->non_b_qp)
488 vf->priv->non_b_qp= malloc(mpi->qstride * ((mpi->h + 15) >> 4));
489 fast_memcpy(vf->priv->non_b_qp, mpi->qscale, mpi->qstride * ((mpi->h + 15) >> 4));
491 if(vf->priv->log2_count || !(mpi->flags&MP_IMGFLAG_DIRECT)){
492 char *qp_tab= vf->priv->non_b_qp;
493 if((vf->priv->mode&4) || !qp_tab)
494 qp_tab= mpi->qscale;
496 if(qp_tab || vf->priv->qp){
497 filter(vf->priv, dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0], mpi->w, mpi->h, qp_tab, mpi->qstride, 1);
498 filter(vf->priv, dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, qp_tab, mpi->qstride, 0);
499 filter(vf->priv, dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, qp_tab, mpi->qstride, 0);
500 }else{
501 memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]);
502 memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]);
503 memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]);
507 #if HAVE_MMX
508 if(gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t");
509 #endif
510 #if HAVE_MMX2
511 if(gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t");
512 #endif
514 return vf_next_put_image(vf,dmpi, pts);
517 static void uninit(struct vf_instance_s* vf){
518 if(!vf->priv) return;
520 if(vf->priv->temp) free(vf->priv->temp);
521 vf->priv->temp= NULL;
522 if(vf->priv->src) free(vf->priv->src);
523 vf->priv->src= NULL;
524 if(vf->priv->avctx) free(vf->priv->avctx);
525 vf->priv->avctx= NULL;
526 if(vf->priv->non_b_qp) free(vf->priv->non_b_qp);
527 vf->priv->non_b_qp= NULL;
529 free(vf->priv);
530 vf->priv=NULL;
533 //===========================================================================//
534 static int query_format(struct vf_instance_s* vf, unsigned int fmt){
535 switch(fmt){
536 case IMGFMT_YVU9:
537 case IMGFMT_IF09:
538 case IMGFMT_YV12:
539 case IMGFMT_I420:
540 case IMGFMT_IYUV:
541 case IMGFMT_CLPL:
542 case IMGFMT_Y800:
543 case IMGFMT_Y8:
544 case IMGFMT_444P:
545 case IMGFMT_422P:
546 case IMGFMT_411P:
547 return vf_next_query_format(vf,fmt);
549 return 0;
552 static int control(struct vf_instance_s* vf, int request, void* data){
553 switch(request){
554 case VFCTRL_QUERY_MAX_PP_LEVEL:
555 return 6;
556 case VFCTRL_SET_PP_LEVEL:
557 vf->priv->log2_count= *((unsigned int*)data);
558 return CONTROL_TRUE;
560 return vf_next_control(vf,request,data);
563 static int open(vf_instance_t *vf, char* args){
565 int log2c=-1;
567 vf->config=config;
568 vf->put_image=put_image;
569 vf->get_image=get_image;
570 vf->query_format=query_format;
571 vf->uninit=uninit;
572 vf->control= control;
573 vf->priv=malloc(sizeof(struct vf_priv_s));
574 memset(vf->priv, 0, sizeof(struct vf_priv_s));
576 avcodec_init();
578 vf->priv->avctx= avcodec_alloc_context();
579 dsputil_init(&vf->priv->dsp, vf->priv->avctx);
581 vf->priv->log2_count= 3;
583 if (args) sscanf(args, "%d:%d:%d", &log2c, &vf->priv->qp, &vf->priv->mode);
585 if( log2c >=0 && log2c <=6 )
586 vf->priv->log2_count = log2c;
588 if(vf->priv->qp < 0)
589 vf->priv->qp = 0;
591 switch(vf->priv->mode&3){
592 default:
593 case 0: requantize= hardthresh_c; break;
594 case 1: requantize= softthresh_c; break;
597 #if HAVE_MMX
598 if(gCpuCaps.hasMMX){
599 store_slice= store_slice_mmx;
600 switch(vf->priv->mode&3){
601 case 0: requantize= hardthresh_mmx; break;
602 case 1: requantize= softthresh_mmx; break;
605 #endif
607 return 1;
610 const vf_info_t vf_info_spp = {
611 "simple postprocess",
612 "spp",
613 "Michael Niedermayer",
615 open,
616 NULL