2 * Copyright (C) 2003 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of MPlayer.
6 * MPlayer is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * MPlayer is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License along
17 * with MPlayer; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 * This implementation is based on an algorithm described in
23 * "Aria Nosratinia Embedded Post-Processing for
24 * Enhancement of Compressed Images (1999)"
25 * (http://citeseer.nj.nec.com/nosratinia99embedded.html)
38 #include "cpudetect.h"
40 #include "libavutil/intreadwrite.h"
41 #include "libavcodec/avcodec.h"
42 #include "libavcodec/dsputil.h"
44 #include "img_format.h"
47 #include "libvo/fastmemcpy.h"
49 #define XMIN(a,b) ((a) < (b) ? (a) : (b))
51 //===========================================================================//
52 static const uint8_t __attribute__((aligned(8))) dither
[8][8]={
53 { 0, 48, 12, 60, 3, 51, 15, 63, },
54 { 32, 16, 44, 28, 35, 19, 47, 31, },
55 { 8, 56, 4, 52, 11, 59, 7, 55, },
56 { 40, 24, 36, 20, 43, 27, 39, 23, },
57 { 2, 50, 14, 62, 1, 49, 13, 61, },
58 { 34, 18, 46, 30, 33, 17, 45, 29, },
59 { 10, 58, 6, 54, 9, 57, 5, 53, },
60 { 42, 26, 38, 22, 41, 25, 37, 21, },
63 static const uint8_t offset
[127][2]= {
66 {0,0}, {2,2}, {6,4}, {4,6},
67 {0,0}, {5,1}, {2,2}, {7,3}, {4,4}, {1,5}, {6,6}, {3,7},
69 {0,0}, {4,0}, {1,1}, {5,1}, {3,2}, {7,2}, {2,3}, {6,3},
70 {0,4}, {4,4}, {1,5}, {5,5}, {3,6}, {7,6}, {2,7}, {6,7},
72 {0,0}, {0,2}, {0,4}, {0,6}, {1,1}, {1,3}, {1,5}, {1,7},
73 {2,0}, {2,2}, {2,4}, {2,6}, {3,1}, {3,3}, {3,5}, {3,7},
74 {4,0}, {4,2}, {4,4}, {4,6}, {5,1}, {5,3}, {5,5}, {5,7},
75 {6,0}, {6,2}, {6,4}, {6,6}, {7,1}, {7,3}, {7,5}, {7,7},
77 {0,0}, {4,4}, {0,4}, {4,0}, {2,2}, {6,6}, {2,6}, {6,2},
78 {0,2}, {4,6}, {0,6}, {4,2}, {2,0}, {6,4}, {2,4}, {6,0},
79 {1,1}, {5,5}, {1,5}, {5,1}, {3,3}, {7,7}, {3,7}, {7,3},
80 {1,3}, {5,7}, {1,7}, {5,3}, {3,1}, {7,5}, {3,5}, {7,1},
81 {0,1}, {4,5}, {0,5}, {4,1}, {2,3}, {6,7}, {2,7}, {6,3},
82 {0,3}, {4,7}, {0,7}, {4,3}, {2,1}, {6,5}, {2,5}, {6,1},
83 {1,0}, {5,4}, {1,4}, {5,0}, {3,2}, {7,6}, {3,6}, {7,2},
84 {1,2}, {5,6}, {1,6}, {5,2}, {3,0}, {7,4}, {3,4}, {7,0},
95 AVCodecContext
*avctx
;
102 static void hardthresh_c(DCTELEM dst
[64], DCTELEM src
[64], int qp
, uint8_t *permutation
){
105 unsigned int threshold1
, threshold2
;
107 threshold1
= qp
*((1<<4) - bias
) - 1;
108 threshold2
= (threshold1
<<1);
110 memset(dst
, 0, 64*sizeof(DCTELEM
));
111 dst
[0]= (src
[0] + 4)>>3;
115 if(((unsigned)(level
+threshold1
))>threshold2
){
116 const int j
= permutation
[i
];
117 dst
[j
]= (level
+ 4)>>3;
122 static void softthresh_c(DCTELEM dst
[64], DCTELEM src
[64], int qp
, uint8_t *permutation
){
125 unsigned int threshold1
, threshold2
;
127 threshold1
= qp
*((1<<4) - bias
) - 1;
128 threshold2
= (threshold1
<<1);
130 memset(dst
, 0, 64*sizeof(DCTELEM
));
131 dst
[0]= (src
[0] + 4)>>3;
135 if(((unsigned)(level
+threshold1
))>threshold2
){
136 const int j
= permutation
[i
];
138 dst
[j
]= (level
- threshold1
+ 4)>>3;
140 dst
[j
]= (level
+ threshold1
+ 4)>>3;
146 static void hardthresh_mmx(DCTELEM dst
[64], DCTELEM src
[64], int qp
, uint8_t *permutation
){
148 unsigned int threshold1
;
150 threshold1
= qp
*((1<<4) - bias
) - 1;
153 #define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
154 "movq " #src0 ", %%mm0 \n\t"\
155 "movq " #src1 ", %%mm1 \n\t"\
156 "movq " #src2 ", %%mm2 \n\t"\
157 "movq " #src3 ", %%mm3 \n\t"\
158 "psubw %%mm4, %%mm0 \n\t"\
159 "psubw %%mm4, %%mm1 \n\t"\
160 "psubw %%mm4, %%mm2 \n\t"\
161 "psubw %%mm4, %%mm3 \n\t"\
162 "paddusw %%mm5, %%mm0 \n\t"\
163 "paddusw %%mm5, %%mm1 \n\t"\
164 "paddusw %%mm5, %%mm2 \n\t"\
165 "paddusw %%mm5, %%mm3 \n\t"\
166 "paddw %%mm6, %%mm0 \n\t"\
167 "paddw %%mm6, %%mm1 \n\t"\
168 "paddw %%mm6, %%mm2 \n\t"\
169 "paddw %%mm6, %%mm3 \n\t"\
170 "psubusw %%mm6, %%mm0 \n\t"\
171 "psubusw %%mm6, %%mm1 \n\t"\
172 "psubusw %%mm6, %%mm2 \n\t"\
173 "psubusw %%mm6, %%mm3 \n\t"\
174 "psraw $3, %%mm0 \n\t"\
175 "psraw $3, %%mm1 \n\t"\
176 "psraw $3, %%mm2 \n\t"\
177 "psraw $3, %%mm3 \n\t"\
179 "movq %%mm0, %%mm7 \n\t"\
180 "punpcklwd %%mm2, %%mm0 \n\t" /*A*/\
181 "punpckhwd %%mm2, %%mm7 \n\t" /*C*/\
182 "movq %%mm1, %%mm2 \n\t"\
183 "punpcklwd %%mm3, %%mm1 \n\t" /*B*/\
184 "punpckhwd %%mm3, %%mm2 \n\t" /*D*/\
185 "movq %%mm0, %%mm3 \n\t"\
186 "punpcklwd %%mm1, %%mm0 \n\t" /*A*/\
187 "punpckhwd %%mm7, %%mm3 \n\t" /*C*/\
188 "punpcklwd %%mm2, %%mm7 \n\t" /*B*/\
189 "punpckhwd %%mm2, %%mm1 \n\t" /*D*/\
191 "movq %%mm0, " #dst0 " \n\t"\
192 "movq %%mm7, " #dst1 " \n\t"\
193 "movq %%mm3, " #dst2 " \n\t"\
194 "movq %%mm1, " #dst3 " \n\t"
196 "movd %2, %%mm4 \n\t"
197 "movd %3, %%mm5 \n\t"
198 "movd %4, %%mm6 \n\t"
199 "packssdw %%mm4, %%mm4 \n\t"
200 "packssdw %%mm5, %%mm5 \n\t"
201 "packssdw %%mm6, %%mm6 \n\t"
202 "packssdw %%mm4, %%mm4 \n\t"
203 "packssdw %%mm5, %%mm5 \n\t"
204 "packssdw %%mm6, %%mm6 \n\t"
205 REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
206 REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
207 REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
208 REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
209 : : "r" (src
), "r" (dst
), "g" (threshold1
+1), "g" (threshold1
+5), "g" (threshold1
-4) //FIXME maybe more accurate then needed?
211 dst
[0]= (src
[0] + 4)>>3;
214 static void softthresh_mmx(DCTELEM dst
[64], DCTELEM src
[64], int qp
, uint8_t *permutation
){
216 unsigned int threshold1
;
218 threshold1
= qp
*((1<<4) - bias
) - 1;
222 #define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
223 "movq " #src0 ", %%mm0 \n\t"\
224 "movq " #src1 ", %%mm1 \n\t"\
225 "pxor %%mm6, %%mm6 \n\t"\
226 "pxor %%mm7, %%mm7 \n\t"\
227 "pcmpgtw %%mm0, %%mm6 \n\t"\
228 "pcmpgtw %%mm1, %%mm7 \n\t"\
229 "pxor %%mm6, %%mm0 \n\t"\
230 "pxor %%mm7, %%mm1 \n\t"\
231 "psubusw %%mm4, %%mm0 \n\t"\
232 "psubusw %%mm4, %%mm1 \n\t"\
233 "pxor %%mm6, %%mm0 \n\t"\
234 "pxor %%mm7, %%mm1 \n\t"\
235 "movq " #src2 ", %%mm2 \n\t"\
236 "movq " #src3 ", %%mm3 \n\t"\
237 "pxor %%mm6, %%mm6 \n\t"\
238 "pxor %%mm7, %%mm7 \n\t"\
239 "pcmpgtw %%mm2, %%mm6 \n\t"\
240 "pcmpgtw %%mm3, %%mm7 \n\t"\
241 "pxor %%mm6, %%mm2 \n\t"\
242 "pxor %%mm7, %%mm3 \n\t"\
243 "psubusw %%mm4, %%mm2 \n\t"\
244 "psubusw %%mm4, %%mm3 \n\t"\
245 "pxor %%mm6, %%mm2 \n\t"\
246 "pxor %%mm7, %%mm3 \n\t"\
248 "paddsw %%mm5, %%mm0 \n\t"\
249 "paddsw %%mm5, %%mm1 \n\t"\
250 "paddsw %%mm5, %%mm2 \n\t"\
251 "paddsw %%mm5, %%mm3 \n\t"\
252 "psraw $3, %%mm0 \n\t"\
253 "psraw $3, %%mm1 \n\t"\
254 "psraw $3, %%mm2 \n\t"\
255 "psraw $3, %%mm3 \n\t"\
257 "movq %%mm0, %%mm7 \n\t"\
258 "punpcklwd %%mm2, %%mm0 \n\t" /*A*/\
259 "punpckhwd %%mm2, %%mm7 \n\t" /*C*/\
260 "movq %%mm1, %%mm2 \n\t"\
261 "punpcklwd %%mm3, %%mm1 \n\t" /*B*/\
262 "punpckhwd %%mm3, %%mm2 \n\t" /*D*/\
263 "movq %%mm0, %%mm3 \n\t"\
264 "punpcklwd %%mm1, %%mm0 \n\t" /*A*/\
265 "punpckhwd %%mm7, %%mm3 \n\t" /*C*/\
266 "punpcklwd %%mm2, %%mm7 \n\t" /*B*/\
267 "punpckhwd %%mm2, %%mm1 \n\t" /*D*/\
269 "movq %%mm0, " #dst0 " \n\t"\
270 "movq %%mm7, " #dst1 " \n\t"\
271 "movq %%mm3, " #dst2 " \n\t"\
272 "movq %%mm1, " #dst3 " \n\t"
274 "movd %2, %%mm4 \n\t"
275 "movd %3, %%mm5 \n\t"
276 "packssdw %%mm4, %%mm4 \n\t"
277 "packssdw %%mm5, %%mm5 \n\t"
278 "packssdw %%mm4, %%mm4 \n\t"
279 "packssdw %%mm5, %%mm5 \n\t"
280 REQUANT_CORE( (%1), 8(%1), 16(%1), 24(%1), (%0), 8(%0), 64(%0), 72(%0))
281 REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
282 REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
283 REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
284 : : "r" (src
), "r" (dst
), "g" (threshold1
), "rm" (4) //FIXME maybe more accurate then needed?
287 dst
[0]= (src
[0] + 4)>>3;
291 static inline void add_block(int16_t *dst
, int stride
, DCTELEM block
[64]){
295 *(uint32_t*)&dst
[0 + y
*stride
]+= *(uint32_t*)&block
[0 + y
*8];
296 *(uint32_t*)&dst
[2 + y
*stride
]+= *(uint32_t*)&block
[2 + y
*8];
297 *(uint32_t*)&dst
[4 + y
*stride
]+= *(uint32_t*)&block
[4 + y
*8];
298 *(uint32_t*)&dst
[6 + y
*stride
]+= *(uint32_t*)&block
[6 + y
*8];
302 static void store_slice_c(uint8_t *dst
, int16_t *src
, int dst_stride
, int src_stride
, int width
, int height
, int log2_scale
){
306 temp= ((src[x + y*src_stride + pos]<<log2_scale) + d[pos])>>6;\
307 if(temp & 0x100) temp= ~(temp>>31);\
308 dst[x + y*dst_stride + pos]= temp;
310 for(y
=0; y
<height
; y
++){
311 const uint8_t *d
= dither
[y
];
312 for(x
=0; x
<width
; x
+=8){
327 static void store_slice_mmx(uint8_t *dst
, int16_t *src
, int dst_stride
, int src_stride
, int width
, int height
, int log2_scale
){
330 for(y
=0; y
<height
; y
++){
334 "movq (%3), %%mm3 \n\t"
335 "movq (%3), %%mm4 \n\t"
336 "movd %4, %%mm2 \n\t"
337 "pxor %%mm0, %%mm0 \n\t"
338 "punpcklbw %%mm0, %%mm3 \n\t"
339 "punpckhbw %%mm0, %%mm4 \n\t"
340 "psraw %%mm2, %%mm3 \n\t"
341 "psraw %%mm2, %%mm4 \n\t"
342 "movd %5, %%mm2 \n\t"
344 "movq (%0), %%mm0 \n\t"
345 "movq 8(%0), %%mm1 \n\t"
346 "paddw %%mm3, %%mm0 \n\t"
347 "paddw %%mm4, %%mm1 \n\t"
348 "psraw %%mm2, %%mm0 \n\t"
349 "psraw %%mm2, %%mm1 \n\t"
350 "packuswb %%mm1, %%mm0 \n\t"
351 "movq %%mm0, (%1) \n\t"
356 : "+r" (src1
), "+r"(dst1
)
357 : "r"(dst
+ width
), "r"(dither
[y
]), "g"(log2_scale
), "g"(6-log2_scale
)
363 // store_slice_c(dst + mmxw, src + mmxw, dst_stride, src_stride, width - mmxw, log2_scale);
367 static void (*store_slice
)(uint8_t *dst
, int16_t *src
, int dst_stride
, int src_stride
, int width
, int height
, int log2_scale
)= store_slice_c
;
369 static void (*requantize
)(DCTELEM dst
[64], DCTELEM src
[64], int qp
, uint8_t *permutation
)= hardthresh_c
;
371 static void filter(struct vf_priv_s
*p
, uint8_t *dst
, uint8_t *src
, int dst_stride
, int src_stride
, int width
, int height
, uint8_t *qp_store
, int qp_stride
, int is_luma
){
373 const int count
= 1<<p
->log2_count
;
374 const int stride
= is_luma
? p
->temp_stride
: ((width
+16+15)&(~15));
375 uint64_t __attribute__((aligned(16))) block_align
[32];
376 DCTELEM
*block
= (DCTELEM
*)block_align
;
377 DCTELEM
*block2
= (DCTELEM
*)(block_align
+16);
379 if (!src
|| !dst
) return; // HACK avoid crash for Y8 colourspace
380 for(y
=0; y
<height
; y
++){
381 int index
= 8 + 8*stride
+ y
*stride
;
382 fast_memcpy(p
->src
+ index
, src
+ y
*src_stride
, width
);
384 p
->src
[index
- x
- 1]= p
->src
[index
+ x
];
385 p
->src
[index
+ width
+ x
]= p
->src
[index
+ width
- x
- 1];
389 fast_memcpy(p
->src
+ ( 7-y
)*stride
, p
->src
+ ( y
+8)*stride
, stride
);
390 fast_memcpy(p
->src
+ (height
+8+y
)*stride
, p
->src
+ (height
-y
+7)*stride
, stride
);
392 //FIXME (try edge emu)
394 for(y
=0; y
<height
+8; y
+=8){
395 memset(p
->temp
+ (8+y
)*stride
, 0, 8*stride
*sizeof(int16_t));
396 for(x
=0; x
<width
+8; x
+=8){
397 const int qps
= 3 + is_luma
;
403 qp
= qp_store
[ (XMIN(x
, width
-1)>>qps
) + (XMIN(y
, height
-1)>>qps
) * qp_stride
];
404 qp
= FFMAX(1, norm_qscale(qp
, p
->mpeg2
));
406 for(i
=0; i
<count
; i
++){
407 const int x1
= x
+ offset
[i
+count
-1][0];
408 const int y1
= y
+ offset
[i
+count
-1][1];
409 const int index
= x1
+ y1
*stride
;
410 p
->dsp
.get_pixels(block
, p
->src
+ index
, stride
);
412 requantize(block2
, block
, qp
, p
->dsp
.idct_permutation
);
414 add_block(p
->temp
+ index
, stride
, block2
);
418 store_slice(dst
+ (y
-8)*dst_stride
, p
->temp
+ 8 + y
*stride
, dst_stride
, stride
, width
, XMIN(8, height
+8-y
), 6-p
->log2_count
);
421 for(y
=0; y
<height
; y
++){
422 for(x
=0; x
<width
; x
++){
423 if((((x
>>6) ^ (y
>>6)) & 1) == 0)
424 dst
[x
+ y
*dst_stride
]= p
->src
[8 + 8*stride
+ x
+ y
*stride
];
425 if((x
&63) == 0 || (y
&63)==0)
426 dst
[x
+ y
*dst_stride
] += 128;
430 //FIXME reorder for better caching
433 static int config(struct vf_instance
* vf
,
434 int width
, int height
, int d_width
, int d_height
,
435 unsigned int flags
, unsigned int outfmt
){
436 int h
= (height
+16+15)&(~15);
438 vf
->priv
->temp_stride
= (width
+16+15)&(~15);
439 vf
->priv
->temp
= malloc(vf
->priv
->temp_stride
*h
*sizeof(int16_t));
440 vf
->priv
->src
= malloc(vf
->priv
->temp_stride
*h
*sizeof(uint8_t));
442 return vf_next_config(vf
,width
,height
,d_width
,d_height
,flags
,outfmt
);
445 static void get_image(struct vf_instance
* vf
, mp_image_t
*mpi
){
446 if(mpi
->flags
&MP_IMGFLAG_PRESERVE
) return; // don't change
447 // ok, we can do pp in-place (or pp disabled):
448 vf
->dmpi
=vf_get_image(vf
->next
,mpi
->imgfmt
,
449 mpi
->type
, mpi
->flags
| MP_IMGFLAG_READABLE
, mpi
->width
, mpi
->height
);
450 mpi
->planes
[0]=vf
->dmpi
->planes
[0];
451 mpi
->stride
[0]=vf
->dmpi
->stride
[0];
452 mpi
->width
=vf
->dmpi
->width
;
453 if(mpi
->flags
&MP_IMGFLAG_PLANAR
){
454 mpi
->planes
[1]=vf
->dmpi
->planes
[1];
455 mpi
->planes
[2]=vf
->dmpi
->planes
[2];
456 mpi
->stride
[1]=vf
->dmpi
->stride
[1];
457 mpi
->stride
[2]=vf
->dmpi
->stride
[2];
459 mpi
->flags
|=MP_IMGFLAG_DIRECT
;
462 static int put_image(struct vf_instance
* vf
, mp_image_t
*mpi
, double pts
){
465 if(!(mpi
->flags
&MP_IMGFLAG_DIRECT
)){
466 // no DR, so get a new image! hope we'll get DR buffer:
467 dmpi
=vf_get_image(vf
->next
,mpi
->imgfmt
,
469 MP_IMGFLAG_ACCEPT_STRIDE
|MP_IMGFLAG_PREFER_ALIGNED_STRIDE
,
470 mpi
->width
,mpi
->height
);
471 vf_clone_mpi_attributes(dmpi
, mpi
);
476 vf
->priv
->mpeg2
= mpi
->qscale_type
;
477 if(mpi
->pict_type
!= 3 && mpi
->qscale
&& !vf
->priv
->qp
){
478 int w
= mpi
->qstride
;
479 int h
= (mpi
->h
+ 15) >> 4;
481 w
= (mpi
->w
+ 15) >> 4;
484 if(!vf
->priv
->non_b_qp
)
485 vf
->priv
->non_b_qp
= malloc(w
*h
);
486 fast_memcpy(vf
->priv
->non_b_qp
, mpi
->qscale
, w
*h
);
488 if(vf
->priv
->log2_count
|| !(mpi
->flags
&MP_IMGFLAG_DIRECT
)){
489 char *qp_tab
= vf
->priv
->non_b_qp
;
490 if((vf
->priv
->mode
&4) || !qp_tab
)
493 if(qp_tab
|| vf
->priv
->qp
){
494 filter(vf
->priv
, dmpi
->planes
[0], mpi
->planes
[0], dmpi
->stride
[0], mpi
->stride
[0], mpi
->w
, mpi
->h
, qp_tab
, mpi
->qstride
, 1);
495 filter(vf
->priv
, dmpi
->planes
[1], mpi
->planes
[1], dmpi
->stride
[1], mpi
->stride
[1], mpi
->w
>>mpi
->chroma_x_shift
, mpi
->h
>>mpi
->chroma_y_shift
, qp_tab
, mpi
->qstride
, 0);
496 filter(vf
->priv
, dmpi
->planes
[2], mpi
->planes
[2], dmpi
->stride
[2], mpi
->stride
[2], mpi
->w
>>mpi
->chroma_x_shift
, mpi
->h
>>mpi
->chroma_y_shift
, qp_tab
, mpi
->qstride
, 0);
498 memcpy_pic(dmpi
->planes
[0], mpi
->planes
[0], mpi
->w
, mpi
->h
, dmpi
->stride
[0], mpi
->stride
[0]);
499 memcpy_pic(dmpi
->planes
[1], mpi
->planes
[1], mpi
->w
>>mpi
->chroma_x_shift
, mpi
->h
>>mpi
->chroma_y_shift
, dmpi
->stride
[1], mpi
->stride
[1]);
500 memcpy_pic(dmpi
->planes
[2], mpi
->planes
[2], mpi
->w
>>mpi
->chroma_x_shift
, mpi
->h
>>mpi
->chroma_y_shift
, dmpi
->stride
[2], mpi
->stride
[2]);
505 if(gCpuCaps
.hasMMX
) __asm__
volatile ("emms\n\t");
508 if(gCpuCaps
.hasMMX2
) __asm__
volatile ("sfence\n\t");
511 return vf_next_put_image(vf
,dmpi
, pts
);
514 static void uninit(struct vf_instance
* vf
){
515 if(!vf
->priv
) return;
517 if(vf
->priv
->temp
) free(vf
->priv
->temp
);
518 vf
->priv
->temp
= NULL
;
519 if(vf
->priv
->src
) free(vf
->priv
->src
);
521 if(vf
->priv
->avctx
) free(vf
->priv
->avctx
);
522 vf
->priv
->avctx
= NULL
;
523 if(vf
->priv
->non_b_qp
) free(vf
->priv
->non_b_qp
);
524 vf
->priv
->non_b_qp
= NULL
;
530 //===========================================================================//
531 static int query_format(struct vf_instance
* vf
, unsigned int fmt
){
544 return vf_next_query_format(vf
,fmt
);
549 static int control(struct vf_instance
* vf
, int request
, void* data
){
551 case VFCTRL_QUERY_MAX_PP_LEVEL
:
553 case VFCTRL_SET_PP_LEVEL
:
554 vf
->priv
->log2_count
= *((unsigned int*)data
);
557 return vf_next_control(vf
,request
,data
);
560 static int vf_open(vf_instance_t
*vf
, char *args
){
565 vf
->put_image
=put_image
;
566 vf
->get_image
=get_image
;
567 vf
->query_format
=query_format
;
569 vf
->control
= control
;
570 vf
->priv
=malloc(sizeof(struct vf_priv_s
));
571 memset(vf
->priv
, 0, sizeof(struct vf_priv_s
));
575 vf
->priv
->avctx
= avcodec_alloc_context();
576 dsputil_init(&vf
->priv
->dsp
, vf
->priv
->avctx
);
578 vf
->priv
->log2_count
= 3;
580 if (args
) sscanf(args
, "%d:%d:%d", &log2c
, &vf
->priv
->qp
, &vf
->priv
->mode
);
582 if( log2c
>=0 && log2c
<=6 )
583 vf
->priv
->log2_count
= log2c
;
588 switch(vf
->priv
->mode
&3){
590 case 0: requantize
= hardthresh_c
; break;
591 case 1: requantize
= softthresh_c
; break;
596 store_slice
= store_slice_mmx
;
597 switch(vf
->priv
->mode
&3){
598 case 0: requantize
= hardthresh_mmx
; break;
599 case 1: requantize
= softthresh_mmx
; break;
607 const vf_info_t vf_info_spp
= {
608 "simple postprocess",
610 "Michael Niedermayer",