1 // Copyright 2003, 2004 David Hilvert <dhilvert@auricle.dyndns.org>,
2 // <dhilvert@ugcs.caltech.edu>
4 /* This file is part of the Anti-Lamenessing Engine.
6 The Anti-Lamenessing Engine is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 The Anti-Lamenessing Engine is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with the Anti-Lamenessing Engine; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * ipc.h: A render subclass implementing an iterative image reconstruction
23 * algorithm based on the work of Michal Irani and Shmuel Peleg. The
24 * projection and backprojection functions used are user-configurable.
25 * For more information on the theory behind the algorithm, see the last
26 * section and appendix of:
28 * http://www.wisdom.weizmann.ac.il/~irani/abstracts/superResolution.html
30 * The algorithm in the source paper looks something like this (PSF' is the
31 * backprojection kernel, and corresponds to what the authors of the paper call
34 * ===============================================================
35 * Forward Backward Points of comparison
36 * ---------------------------------------------------------------
44 * | ---------+ <--- difference
49 * ===============================================================
51 * This assumes a single colorspace representation. However, consumer cameras
52 * sometimes perform sharpening in non-linear colorspace, whereas lens and
53 * sensor blurring occurs in linear colorspace. Hence, there can be two
54 * colorspaces involved; ALE accounts for this with linear and non-linear
55 * colorspace PSFs. Hence, the algorithm we use looks something like:
57 * ===============================================================
58 * Forward Backward Points of comparison
59 * ---------------------------------------------------------------
67 * | ----------+ <--- difference,
74 * unlinearize linearize
78 * lsim_nl(n) lreal_nl(n)
84 * | ----------+ <--- difference
87 * nlsimulated(n) real_nl
97 * ===============================================================
103 #include "../image.h"
104 #include "../render.h"
105 #include "psf/rasterizer.h"
106 #include "psf/normalizer.h"
107 #include "psf/backprojector.h"
109 class ipc
: public render
{
113 image
*approximation
;
115 unsigned int iterations
;
116 psf
*lresponse
, *nlresponse
;
117 int exposure_register
;
118 int use_weighted_median
;
122 * Calculate the simulated input frame SIMULATED from the image
123 * approximation APPROXIMATION, iterating over image approximation
129 image
*approximation
;
133 image
*nlsim_weights
;
135 const raster
*lresponse
;
136 const raster
*nlresponse
;
141 class simulate_linear
: public thread::decompose_domain
,
143 point
*subdomain_extents
;
146 void prepare_subdomains(unsigned int N
) {
147 subdomain_extents
= new point
[N
* 2];
149 for (unsigned int n
= 0; n
< N
; n
++) {
150 point
*se
= subdomain_extents
+ 2 * n
;
152 for (int d
= 0; d
< 2; d
++) {
153 se
[0][d
] = extents
[0][d
];
154 se
[1][d
] = extents
[1][d
];
159 void subdomain_algorithm(unsigned int thread
,
160 int i_min
, int i_max
, int j_min
, int j_max
) {
162 point
*extents
= subdomain_extents
+ thread
* 2;
165 * Linear filtering, iterating over approximation pixels
168 for (int i
= i_min
; i
< i_max
; i
++)
169 for (int j
= j_min
; j
< j_max
; j
++) {
171 if (is_excluded_r(approximation
->offset(), i
, j
, frame_num
))
175 * Obtain the position Q and dimensions D of
176 * image approximation pixel (i, j) in the coordinate
177 * system of the simulated frame.
180 point p
= point(i
+ approximation
->offset()[0], j
+ approximation
->offset()[1]);
185 * XXX: This appears to calculate the wrong thing.
188 if (is_excluded_f(p
, frame_num
))
191 t
.unscaled_map_area_inverse(p
, &q
, d
);
194 * Convenient variables for expressing the boundaries
195 * of the mapped area.
198 ale_pos top
= q
[0] - d
[0];
199 ale_pos bot
= q
[0] + d
[0];
200 ale_pos lef
= q
[1] - d
[1];
201 ale_pos rig
= q
[1] + d
[1];
204 * Iterate over all simulated frame pixels influenced
205 * by the scene pixel (i, j), as determined by the
209 for (int ii
= (int) floor(top
+ lresponse
->min_i());
210 ii
<= ceil(bot
+ lresponse
->max_i()); ii
++)
211 for (int jj
= (int) floor(lef
+ lresponse
->min_j());
212 jj
<= ceil(rig
+ lresponse
->max_j()); jj
++) {
215 || ii
>= (int) lsimulated
->height()
217 || jj
>= (int) lsimulated
->width())
220 if (ii
< extents
[0][0])
222 if (jj
< extents
[0][1])
224 if (ii
> extents
[1][0])
226 if (jj
> extents
[1][1])
229 class rasterizer::psf_result r
=
230 (*lresponse
)(top
- ii
, bot
- ii
,
232 lresponse
->select(ii
, jj
),
233 lsimulated
->get_channels(ii
, jj
));
237 if (lsimulated
->get_bayer() == IMAGE_BAYER_NONE
) {
238 lsimulated
->pix(ii
, jj
) +=
239 r(approximation
->get_pixel(i
, j
));
240 lsim_weights
->pix(ii
, jj
) +=
243 int k
= ((image_bayer_ale_real
*)lsimulated
)->bayer_color(ii
, jj
);
244 lsimulated
->chan(ii
, jj
, k
) +=
245 r(approximation
->get_pixel(i
, j
))[k
];
246 lsim_weights
->chan(ii
, jj
, k
) +=
255 void finish_subdomains(unsigned int N
) {
260 for (unsigned int n
= 0; n
< N
; n
++) {
261 point
*se
= subdomain_extents
+ 2 * n
;
263 for (int d
= 0; d
< 2; d
++) {
264 if (se
[0][d
] < extents
[0][d
])
265 extents
[0][d
] = se
[0][d
];
266 if (se
[1][d
] > extents
[1][d
])
267 extents
[1][d
] = se
[1][d
];
271 delete[] subdomain_extents
;
275 simulate_linear(sim_args s
) : decompose_domain(0, s
.approximation
->height(),
276 0, s
.approximation
->width()),
282 class simulate_nonlinear
: public thread::decompose_domain
,
284 point
*subdomain_extents
;
287 void prepare_subdomains(unsigned int N
) {
288 subdomain_extents
= new point
[N
* 2];
290 for (unsigned int n
= 0; n
< N
; n
++) {
291 point
*se
= subdomain_extents
+ 2 * n
;
293 for (int d
= 0; d
< 2; d
++) {
294 se
[0][d
] = extents
[0][d
];
295 se
[1][d
] = extents
[1][d
];
300 void subdomain_algorithm(unsigned int thread
,
301 int i_min
, int i_max
, int j_min
, int j_max
) {
303 point
*extents
= subdomain_extents
+ thread
* 2;
309 for (int i
= i_min
; i
< i_max
; i
++)
310 for (int j
= j_min
; j
< j_max
; j
++) {
313 * Convenient variables for expressing the boundaries
314 * of the mapped area.
317 ale_pos top
= i
- 0.5;
318 ale_pos bot
= i
+ 0.5;
319 ale_pos lef
= j
- 0.5;
320 ale_pos rig
= j
+ 0.5;
323 * Iterate over all simulated frame pixels influenced
324 * by the scene pixel (i, j), as determined by the
328 for (int ii
= (int) floor(top
+ nlresponse
->min_i());
329 ii
<= ceil(bot
+ nlresponse
->max_i()); ii
++)
330 for (int jj
= (int) floor(lef
+ nlresponse
->min_j());
331 jj
<= ceil(rig
+ nlresponse
->max_j()); jj
++) {
334 || ii
>= (int) nlsimulated
->height()
336 || jj
>= (int) nlsimulated
->width())
339 if (ii
< extents
[0][0])
341 if (jj
< extents
[0][1])
343 if (ii
> extents
[1][0])
345 if (jj
> extents
[1][1])
348 class rasterizer::psf_result r
=
349 (*nlresponse
)(top
- ii
, bot
- ii
,
351 nlresponse
->select(ii
, jj
));
355 nlsimulated
->pix(ii
, jj
) +=
356 r(exp
->unlinearize(lsimulated
->get_pixel(i
, j
)));
357 nlsim_weights
->pix(ii
, jj
) +=
365 void finish_subdomains(unsigned int N
) {
370 for (unsigned int n
= 0; n
< N
; n
++) {
371 point
*se
= subdomain_extents
+ 2 * n
;
373 for (int d
= 0; d
< 2; d
++) {
374 if (se
[0][d
] < extents
[0][d
])
375 extents
[0][d
] = se
[0][d
];
376 if (se
[1][d
] > extents
[1][d
])
377 extents
[1][d
] = se
[1][d
];
381 delete[] subdomain_extents
;
385 simulate_nonlinear(sim_args s
) : decompose_domain(0, s
.lsimulated
->height(),
386 0, s
.lsimulated
->width()),
391 void _ip_frame_simulate(int frame_num
, image
*approximation
,
392 image
*lsimulated
, image
*nlsimulated
,
393 transformation t
, const raster
*lresponse
,
394 const raster
*nlresponse
, const exposure
&exp
,
400 * Initializations for linear filtering
403 image
*lsim_weights
= NULL
;
405 if (lsimulated
->get_bayer() == IMAGE_BAYER_NONE
) {
406 lsim_weights
= new image_ale_real(
407 lsimulated
->height(),
409 lsimulated
->depth());
411 lsim_weights
= new image_bayer_ale_real(
412 lsimulated
->height(),
415 lsimulated
->get_bayer());
417 assert (lsim_weights
);
419 args
.frame_num
= frame_num
;
420 args
.approximation
= approximation
;
421 args
.lsimulated
= lsimulated
;
422 args
.nlsimulated
= nlsimulated
;
423 args
.lsim_weights
= lsim_weights
;
425 args
.lresponse
= lresponse
;
426 args
.nlresponse
= nlresponse
;
428 args
.extents
= extents
;
434 simulate_linear
sl(args
);
441 for (unsigned int ii
= 0; ii
< lsimulated
->height(); ii
++)
442 for (unsigned int jj
= 0; jj
< lsimulated
->width(); jj
++) {
443 const ale_real weight_floor
= 1e-10;
446 for (int k
= 0; k
< 3; k
++) {
447 if ((lsimulated
->get_channels(ii
, jj
) & (1 << k
)) == 0)
450 ale_real weight
= lsim_weights
->chan(ii
, jj
, k
);
452 if (!(weight
> weight_floor
))
453 lsimulated
->chan(ii
, jj
, k
)
454 /= zero
; /* Generate a non-finite value */
456 lsimulated
->chan(ii
, jj
, k
) /= weight
;
467 * Return if there is no non-linear step.
470 if (nlsimulated
== NULL
) {
475 * Initialize non-linear
478 image_ale_real
*nlsim_weights
= new image_ale_real(
479 nlsimulated
->height(),
480 nlsimulated
->width(),
481 nlsimulated
->depth());
483 args
.nlsim_weights
= nlsim_weights
;
486 * Simulate non-linear
489 simulate_nonlinear
snl(args
);
493 * Normalize non-linear
496 for (unsigned int ii
= 0; ii
< nlsimulated
->height(); ii
++)
497 for (unsigned int jj
= 0; jj
< nlsimulated
->width(); jj
++) {
498 pixel weight
= nlsim_weights
->get_pixel(ii
, jj
);
499 ale_real weight_floor
= 1e-10;
502 for (int k
= 0; k
< 3; k
++)
503 if (!(weight
[k
] > weight_floor
))
504 nlsimulated
->pix(ii
, jj
)[k
]
505 /= zero
; /* Generate a non-finite value */
507 nlsimulated
->pix(ii
, jj
)
512 * Finalize non-linear
515 delete nlsim_weights
;
518 struct correction_t
{
522 int use_weighted_median
;
527 image_weighted_median
*iwm
;
541 * Create correction structures.
543 correction_t(int use_weighted_median
, double weight_limit
, unsigned int h
, unsigned int w
, unsigned int d
) {
544 this->use_weighted_median
= use_weighted_median
;
545 if (use_weighted_median
)
546 iwm
= new image_weighted_median(h
, w
, d
);
547 this->weight_limit
= weight_limit
;
548 c
= new image_ale_real(h
, w
, d
);
549 cc
= new image_ale_real(h
, w
, d
);
553 * Destroy correction structures
556 if (use_weighted_median
)
565 pixel
get_count(int i
, int j
) {
566 if (use_weighted_median
)
567 return iwm
->get_weights()->get_pixel(i
, j
);
569 return cc
->get_pixel(i
, j
);
573 * Check for weight limit
575 int weight_limited(int i
, int j
) {
577 && get_count(i
, j
)[0] >= weight_limit
578 && get_count(i
, j
)[1] >= weight_limit
579 && get_count(i
, j
)[2] >= weight_limit
)
588 pixel
get_correction(int i
, int j
) {
589 if (use_weighted_median
)
590 return iwm
->get_colors()->get_pixel(i
, j
);
592 return c
->get_pixel(i
, j
)
593 / cc
->get_pixel(i
, j
);
599 void frame_end(int frame_num
) {
600 if (use_weighted_median
) {
602 for (unsigned int i
= 0; i
< c
->height(); i
++)
603 for (unsigned int j
= 0; j
< c
->width(); j
++) {
606 * Update the median calculator
608 pixel cval
= c
->get_pixel(i
, j
);
609 pixel ccval
= cc
->get_pixel(i
, j
);
610 iwm
->accumulate(i
, j
, frame_num
, cval
/ ccval
, ccval
);
615 c
->pix(i
, j
) = pixel::zero();
616 cc
->pix(i
, j
) = pixel::zero();
622 * Update correction structures, using either a weighted mean update or
623 * a weighted median update.
625 void update(int i
, int j
, pixel value_times_weight
, pixel weight
) {
626 c
->pix(i
, j
) += value_times_weight
;
627 cc
->pix(i
, j
) += weight
;
632 * For each pixel in APPROXIMATION, calculate the differences
633 * between SIMULATED and REAL pixels influenced by this pixel,
634 * and back-project the differences onto the correction array
635 * C. The number of frames backprojected to each pixel in C is
638 * Since APPROXIMATION can always, given sufficient computational
639 * resources, be configured to be of finer resolution than SIMULATED,
640 * we map APPROXIMATION pixels onto the SIMULATED grid rather than vice
641 * versa. This should reduce the error incurred by approximations in
642 * d2::transformation::unscaled_map_area*().
644 * This approach requires multiplying the resultant integral by a
645 * corrective factor, since the grids are generally of different
649 struct correct_args
{
651 image
*approximation
;
658 const backprojector
*lresponse
;
659 const backprojector
*nlresponse
;
663 class correct_nonlinear
: public thread::decompose_domain
,
664 private correct_args
{
667 void subdomain_algorithm(unsigned int thread
,
668 int i_min
, int i_max
, int j_min
, int j_max
) {
671 * Unlinearize values from lsimulated.
674 for (int i
= i_min
; i
< i_max
; i
++)
675 for (int j
= j_min
; j
< j_max
; j
++)
676 lreal
->set_pixel(i
, j
, real
->exp().unlinearize(
677 lsimulated
->get_pixel(i
, j
)));
681 * Backproject from real to lreal, iterating over all pixels
685 for (int i
= i_min
; i
< i_max
; i
++)
686 for (int j
= j_min
; j
< j_max
; j
++) {
689 * XXX: Is this right?
692 if (is_excluded_r(approximation
->offset(), i
, j
, frame_num
))
696 * Convenient variables for expressing the boundaries
697 * of the mapped area.
700 ale_pos top
= i
- 0.5;
701 ale_pos bot
= i
+ 0.5;
702 ale_pos lef
= j
- 0.5;
703 ale_pos rig
= j
+ 0.5;
706 * Iterate over non-linear pixels influenced by linear
710 for (int ii
= (int) floor(top
+ nlresponse
->min_i());
711 ii
<= ceil(bot
+ nlresponse
->max_i()); ii
++)
712 for (int jj
= (int) floor(lef
+ nlresponse
->min_j());
713 jj
<= ceil(rig
+ nlresponse
->max_j()); jj
++) {
716 || ii
>= (int) nlsimulated
->height()
718 || jj
>= (int) nlsimulated
->width())
721 class backprojector::psf_result r
=
722 (*nlresponse
)(top
- ii
, bot
- ii
,
724 nlresponse
->select(ii
, jj
));
728 real
->exp().unlinearize(real
->get_pixel(ii
, jj
));
731 nlsimulated
->get_pixel(ii
, jj
);
733 if (!finite(comp_simu
[0])
734 || !finite(comp_simu
[1])
735 || !finite(comp_simu
[2]))
739 * Backprojection value.
742 pixel bpv
= r(comp_real
- comp_simu
);
748 lreal
->pix(i
, j
) += bpv
;
756 for (int i
= i_min
; i
< i_max
; i
++)
757 for (int j
= j_min
; j
< j_max
; j
++)
758 lreal
->set_pixel(i
, j
, real
->exp().linearize(
759 lreal
->get_pixel(i
, j
)));
764 correct_nonlinear(correct_args c
) : decompose_domain(0, c
.lreal
->height(),
765 0, c
.lreal
->width()),
770 class correct_linear
: public thread::decompose_domain
,
771 private correct_args
{
774 void subdomain_algorithm(unsigned int thread
,
775 int i_min
, int i_max
, int j_min
, int j_max
) {
778 * Iterate over all pixels in the approximation.
781 for (int i
= i_min
; i
< i_max
; i
++)
782 for (int j
= j_min
; j
< j_max
; j
++) {
785 * Check correction count against any weight limit.
788 if (cu
->weight_limited(i
, j
))
792 * Obtain the position Q and dimensions D of image
793 * approximation pixel (i, j) in the coordinate system
794 * of the simulated (and real) frame.
797 point p
= point(i
+ approximation
->offset()[0], j
+ approximation
->offset()[1]);
801 t
.unscaled_map_area_inverse(p
, &q
, d
);
804 * Convenient variables for expressing the boundaries
805 * of the mapped area.
808 ale_pos top
= q
[0] - d
[0];
809 ale_pos bot
= q
[0] + d
[0];
810 ale_pos lef
= q
[1] - d
[1];
811 ale_pos rig
= q
[1] + d
[1];
814 * Iterate over frame pixels influenced by the scene
818 for (int ii
= (int) floor(top
+ lresponse
->min_i());
819 ii
<= ceil(bot
+ lresponse
->max_i()); ii
++)
820 for (int jj
= (int) floor(lef
+ lresponse
->min_j());
821 jj
<= ceil(rig
+ lresponse
->max_j()); jj
++) {
824 || ii
>= (int) lreal
->height()
826 || jj
>= (int) lreal
->width())
829 if (is_excluded_f(ii
, jj
, frame_num
))
832 unsigned int selection
= lresponse
->select(ii
, jj
);
834 char channels
= lreal
->get_channels(ii
, jj
);
836 class backprojector::psf_result r
=
837 (*lresponse
)(top
- ii
, bot
- ii
,
839 selection
, channels
);
843 * R is the result of integration in the
844 * coordinate system of the simulated frame.
845 * We must rescale to get the result of
846 * integration in the coordinate system of the
847 * approximation image.
850 r
*= (1 / (bot
- top
) / (rig
- lef
));
853 lreal
->get_raw_pixel(ii
, jj
);
856 // real->get_pixel(ii, jj);
859 lsimulated
->get_raw_pixel(ii
, jj
);
864 * Under the assumption that finite() testing
865 * may be expensive, limit such tests to active
868 int found_nonfinite
= 0;
869 for (int k
= 0; k
< 3; k
++) {
870 if (((1 << k
) & channels
)
871 && (!finite(comp_simu
[k
])
872 || !finite(comp_lreal
[k
]))) {
882 if (!finite(comp_simu
[0])
883 || !finite(comp_simu
[1])
884 || !finite(comp_simu
[2])
885 || !finite(comp_lreal
[0])
886 || !finite(comp_lreal
[1])
887 || !finite(comp_lreal
[2]))
892 * Backprojection value unadjusted
896 pixel bpv
= r(comp_lreal
- comp_simu
);
899 * Confidence [equal to (1, 1, 1) when
900 * confidence is uniform].
903 // Ordinary certainty
904 // pixel conf = real->exp().confidence(comp_lreal);
906 // One-sided certainty
907 // pixel conf = real->exp().one_sided_confidence(comp_lreal, bpv);
908 // conf = real->exp().one_sided_confidence(comp_real, bpv);
910 // Estimate-based certainty
911 // pixel conf = real->exp().confidence(comp_simu);
913 // One-sided estimate-based certainty
914 pixel conf
= real
->exp().one_sided_confidence(comp_simu
, bpv
);
916 // One-sided approximation-based certainty
917 // pixel conf = real->exp().one_sided_confidence(approximation->pix(i, j), bpv);
920 * If a color is bayer-interpolated, then we have no confidence in its
924 if (real
->get_bayer() != IMAGE_BAYER_NONE
) {
925 int color
= ((image_bayer_ale_real
*)real
)->bayer_color(ii
, jj
);
926 conf
[(color
+ 1) % 3] = 0;
927 conf
[(color
+ 2) % 3] = 0;
934 // c->pix(i, j) += bpv * conf;
937 * Increment the backprojection weight. When
938 * confidence is uniform, this should weight
939 * each frame's correction equally.
942 // cc->pix(i, j) += conf * r.weight()
943 // / lresponse->integral(selection);
945 cu
->update(i
, j
, bpv
* conf
, conf
* r
.weight() / lresponse
->integral(selection
));
952 correct_linear(correct_args c
) : decompose_domain(0, c
.approximation
->height(),
953 0, c
.approximation
->width()),
958 void _ip_frame_correct(int frame_num
, image
*approximation
,
959 correction_t
*cu
, const image
*real
, image
*lsimulated
,
960 image
*nlsimulated
, transformation t
,
961 const backprojector
*lresponse
,
962 const backprojector
*nlresponse
, point
*extents
) {
966 args
.frame_num
= frame_num
;
967 args
.approximation
= approximation
;
970 args
.lsimulated
= lsimulated
;
971 args
.nlsimulated
= nlsimulated
;
973 args
.lresponse
= lresponse
;
974 args
.nlresponse
= nlresponse
;
977 * Generate the image to compare lsimulated with.
982 if (nlsimulated
== NULL
)
986 image
*new_lreal
= new image_ale_real(
993 args
.lreal
= new_lreal
;
995 correct_nonlinear
cn(args
);
1002 * Perform exposure adjustment.
1005 if (exposure_register
) {
1010 ec
= lsimulated
->avg_channel_magnitude()
1011 / lreal
->avg_channel_magnitude();
1013 pixel_accum ratio_sum
;
1014 pixel_accum weight_sum
;
1016 for (unsigned int i
= 0; i
< lreal
->height(); i
++)
1017 for (unsigned int j
= 0; j
< lreal
->width(); j
++) {
1019 pixel s
= lsimulated
->get_pixel(i
, j
);
1020 pixel r
= lreal
->get_pixel(i
, j
);
1021 pixel confidence
= real
->exp().confidence(r
);
1029 ratio_sum
+= confidence
* s
/ r
;
1030 weight_sum
+= confidence
;
1034 ec
= ratio_sum
/ weight_sum
;
1036 pixel_accum ssum
, rsum
;
1038 // for (unsigned int i = 0; i < lreal->height(); i++)
1039 // for (unsigned int j = 0; j < lreal->width(); j++) {
1040 for (unsigned int i
= (unsigned int) floor(extents
[0][0]);
1041 i
< (unsigned int) ceil(extents
[1][0]); i
++)
1042 for (unsigned int j
= (unsigned int) floor(extents
[0][1]);
1043 j
< (unsigned int) ceil(extents
[1][1]); j
++) {
1045 pixel s
= lsimulated
->get_pixel(i
, j
);
1046 pixel r
= lreal
->get_pixel(i
, j
);
1048 pixel confidence
= real
->exp().confidence(s
);
1052 || !confidence
.finite())
1055 if (real
->get_bayer() != IMAGE_BAYER_NONE
) {
1056 int color
= ((image_bayer_ale_real
*)real
)->bayer_color(i
, j
);
1057 ssum
[color
] += confidence
[color
] * s
[color
];
1058 rsum
[color
] += confidence
[color
] * r
[color
];
1060 ssum
+= confidence
* s
;
1061 rsum
+= confidence
* r
;
1074 real
->exp().set_multiplier(
1075 real
->exp().get_multiplier() * ec
);
1078 args
.lreal
= (d2::image
*) lreal
;
1080 correct_linear
cl(args
);
1088 * Adjust correction array C based on the difference between the
1089 * simulated projected frame and real frame M. Update the correction
1090 * count CC for affected pixels in C.
1093 virtual void _ip_frame(int frame_num
, correction_t
*cu
, const image
*real
,
1094 transformation t
, const raster
*f
, const backprojector
*b
,
1095 const raster
*nlf
, const backprojector
*nlb
) {
1097 ui::get()->d2_irani_peleg_start();
1100 * Initialize simulated data structures
1105 if (real
->get_bayer() == IMAGE_BAYER_NONE
) {
1106 lsimulated
= new image_ale_real(
1111 lsimulated
= new image_bayer_ale_real(
1118 image
*nlsimulated
= NULL
;
1121 nlsimulated
= new image_ale_real(
1127 * Create simulated frames with forward projection.
1130 ui::get()->ip_frame_simulate_start();
1132 point extents
[2] = { point::posinf(), point::neginf() };
1134 _ip_frame_simulate(frame_num
, approximation
, lsimulated
, nlsimulated
, t
, f
, nlf
, real
->exp(), extents
);
1137 * Update the correction array using backprojection.
1140 ui::get()->ip_frame_correct_start();
1141 _ip_frame_correct(frame_num
, approximation
, cu
, real
, lsimulated
, nlsimulated
, t
, b
, nlb
, extents
);
1144 * Finalize data structures.
1150 ui::get()->d2_irani_peleg_stop();
1154 * Iterate _ip_frame() over all frames, and update APPROXIMATION after
1155 * corrections from all frames have been summed. Repeat for the number
1156 * of iterations specified by the user.
1159 virtual void _ip() {
1162 * Create rasterized PSF and backprojection kernel AUX.
1165 raster
**f
= (raster
**) malloc(image_rw::count() * sizeof(raster
*));
1166 backprojector
**b
= (backprojector
**) malloc(image_rw::count() * sizeof(backprojector
*));
1168 for (unsigned int m
= 0; m
< image_rw::count(); m
++) {
1170 if (!align::match(m
))
1173 transformation t
= align::of(m
);
1175 f
[m
] = new normalizer(new rasterizer(lresponse
, t
));
1176 b
[m
] = new backprojector(f
[m
]);
1180 backprojector
*nlb
= NULL
;
1183 nlf
= new normalizer(new rasterizer(nlresponse
, transformation::eu_identity()));
1184 nlb
= new backprojector(nlf
);
1187 for (unsigned int n
= 0; n
< iterations
; n
++) {
1189 correction_t
*correction
= new correction_t(
1190 use_weighted_median
,
1192 approximation
->height(),
1193 approximation
->width(),
1194 approximation
->depth());
1197 * Iterate over all frames
1200 for (unsigned int m
= 0; m
< image_rw::count(); m
++) {
1202 if (!align::match(m
))
1205 ui::get()->ip_frame_start(m
);
1207 transformation t
= align::of(m
);
1208 const image
*real
= image_rw::open(m
);
1210 _ip_frame(m
, correction
, real
,
1211 t
, f
[m
], b
[m
], nlf
, nlb
);
1215 correction
->frame_end(m
);
1219 * Update the approximation.
1222 ui::get()->ip_update();
1224 for (unsigned int i
= 0; i
< approximation
->height(); i
++)
1225 for (unsigned int j
= 0; j
< approximation
->width(); j
++) {
1227 pixel cpix
= correction
->get_correction(i
, j
);
1228 pixel ccpix
= correction
->get_count(i
, j
);
1229 pixel apix
= approximation
->get_pixel(i
, j
);
1231 for (unsigned int k
= 0; k
< 3; k
++) {
1233 const ale_real cc_floor
= 1e-20;
1235 if (ccpix
[k
] < cc_floor
)
1238 if (!finite(cpix
[k
]))
1241 if (!finite(apix
[k
]))
1244 ale_real new_value
= cpix
[k
] + apix
[k
];
1246 assert (finite(apix
[k
]));
1247 assert (finite(ccpix
[k
]));
1248 assert (finite(cpix
[k
]));
1249 assert (finite(new_value
));
1252 * Negative light doesn't make sense.
1257 approximation
->chan(i
, j
, k
) = new_value
;
1264 ui::get()->ip_write();
1265 image_rw::output(approximation
);
1268 ui::get()->ip_step_done();
1272 for (unsigned int m
= 0; m
< image_rw::count(); m
++) {
1274 if (!align::match(m
))
1290 ipc(render
*input
, unsigned int iterations
, int _inc
, psf
*lresponse
,
1291 psf
*nlresponse
, int exposure_register
,
1292 int use_weighted_median
, double ipwl
) {
1293 this->input
= input
;
1296 this->iterations
= iterations
;
1297 this->lresponse
= lresponse
;
1298 this->nlresponse
= nlresponse
;
1299 this->exposure_register
= exposure_register
;
1300 this->use_weighted_median
= use_weighted_median
;
1301 this->weight_limit
= ipwl
;
1304 const image
*get_image() const {
1306 return approximation
;
1308 return input
->get_image();
1311 const image
*get_defined() const {
1312 return input
->get_defined();
1324 virtual int sync() {
1327 approximation
= optimizations::get_ip_working_image(input
->get_image());
1328 ui::get()->ip_start();
1330 ui::get()->ip_done();
1333 * Since we write output internally for --inc, no external
1334 * update is necessary in this case.
1343 void free_memory() {