Revert earlier change from <config.h> to "config.h", as the former is suggested in...
[Ale.git] / d2 / render / ipc.h
blobccd50f38bd58cc7f46b652cac56705f9d9b87878
1 // Copyright 2003, 2004 David Hilvert <dhilvert@auricle.dyndns.org>,
2 // <dhilvert@ugcs.caltech.edu>
4 /* This file is part of the Anti-Lamenessing Engine.
6 The Anti-Lamenessing Engine is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 The Anti-Lamenessing Engine is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with the Anti-Lamenessing Engine; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * ipc.h: A render subclass implementing an iterative image reconstruction
23 * algorithm based on the work of Michal Irani and Shmuel Peleg. The
24 * projection and backprojection functions used are user-configurable.
25 * For more information on the theory behind the algorithm, see the last
26 * section and appendix of:
28 * http://www.wisdom.weizmann.ac.il/~irani/abstracts/superResolution.html
30 * The algorithm in the source paper looks something like this (PSF' is the
31 * backprojection kernel, and corresponds to what the authors of the paper call
32 * AUX):
34 * ===============================================================
35 * Forward Backward Points of comparison
36 * ---------------------------------------------------------------
38 * scene(n) scene(n+1)
40 * | ^
41 * | |
42 * PSF PSF'
43 * | |
44 * | ---------+ <--- difference
45 * V / |
47 * simulated(n) real
49 * ===============================================================
51 * This assumes a single colorspace representation. However, consumer cameras
52 * sometimes perform sharpening in non-linear colorspace, whereas lens and
53 * sensor blurring occurs in linear colorspace. Hence, there can be two
54 * colorspaces involved; ALE accounts for this with linear and non-linear
55 * colorspace PSFs. Hence, the algorithm we use looks something like:
57 * ===============================================================
58 * Forward Backward Points of comparison
59 * ---------------------------------------------------------------
61 * scene(n) scene(n+1)
63 * | ^
64 * | |
65 * LPSF LPSF'
66 * | |
67 * | ----------+ <--- difference,
68 * V / | exposure
69 * re-estimation
70 * lsimulated(n) lreal
72 * | ^
73 * | |
74 * unlinearize linearize
75 * | |
76 * V |
78 * lsim_nl(n) lreal_nl(n)
80 * | ^
81 * | |
82 * NLPSF NLPSF'
83 * | |
84 * | ----------+ <--- difference
85 * V / |
87 * nlsimulated(n) real_nl
89 * ^
90 * |
91 * unlinearize
92 * |
93 * |
95 * real
97 * ===============================================================
100 #ifndef __ipc_h__
101 #define __ipc_h__
103 #include "../image.h"
104 #include "../render.h"
105 #include "psf/rasterizer.h"
106 #include "psf/normalizer.h"
107 #include "psf/backprojector.h"
109 class ipc : public render {
110 protected:
111 int synced;
112 int inc;
113 image *approximation;
114 render *input;
115 unsigned int iterations;
116 psf *lresponse, *nlresponse;
117 int exposure_register;
118 int use_weighted_median;
119 double weight_limit;
122 * Calculate the simulated input frame SIMULATED from the image
123 * approximation APPROXIMATION, iterating over image approximation
124 * pixels.
127 struct sim_args {
128 int frame_num;
129 image *approximation;
130 image *lsimulated;
131 image *nlsimulated;
132 image *lsim_weights;
133 image *nlsim_weights;
134 transformation t;
135 const raster *lresponse;
136 const raster *nlresponse;
137 const exposure *exp;
138 point *extents;
141 class simulate_linear : public thread::decompose_domain,
142 private sim_args {
143 point *subdomain_extents;
145 protected:
146 void prepare_subdomains(unsigned int N) {
147 subdomain_extents = new point[N * 2];
149 for (unsigned int n = 0; n < N; n++) {
150 point *se = subdomain_extents + 2 * n;
152 for (int d = 0; d < 2; d++) {
153 se[0][d] = extents[0][d];
154 se[1][d] = extents[1][d];
159 void subdomain_algorithm(unsigned int thread,
160 int i_min, int i_max, int j_min, int j_max) {
162 point *extents = subdomain_extents + thread * 2;
165 * Linear filtering, iterating over approximation pixels
168 for (int i = i_min; i < i_max; i++)
169 for (int j = j_min; j < j_max; j++) {
171 if (is_excluded_r(approximation->offset(), i, j, frame_num))
172 continue;
175 * Obtain the position Q and dimensions D of
176 * image approximation pixel (i, j) in the coordinate
177 * system of the simulated frame.
180 point p = point(i + approximation->offset()[0], j + approximation->offset()[1]);
181 point q;
182 ale_pos d[2];
185 * XXX: This appears to calculate the wrong thing.
188 if (is_excluded_f(p, frame_num))
189 continue;
191 t.unscaled_map_area_inverse(p, &q, d);
194 * Convenient variables for expressing the boundaries
195 * of the mapped area.
198 ale_pos top = q[0] - d[0];
199 ale_pos bot = q[0] + d[0];
200 ale_pos lef = q[1] - d[1];
201 ale_pos rig = q[1] + d[1];
204 * Iterate over all simulated frame pixels influenced
205 * by the scene pixel (i, j), as determined by the
206 * response function.
209 for (int ii = (int) floor(top + lresponse->min_i());
210 ii <= ceil(bot + lresponse->max_i()); ii++)
211 for (int jj = (int) floor(lef + lresponse->min_j());
212 jj <= ceil(rig + lresponse->max_j()); jj++) {
214 if (ii < (int) 0
215 || ii >= (int) lsimulated->height()
216 || jj < (int) 0
217 || jj >= (int) lsimulated->width())
218 continue;
220 if (ii < extents[0][0])
221 extents[0][0] = ii;
222 if (jj < extents[0][1])
223 extents[0][1] = jj;
224 if (ii > extents[1][0])
225 extents[1][0] = ii;
226 if (jj > extents[1][1])
227 extents[1][1] = jj;
229 class rasterizer::psf_result r =
230 (*lresponse)(top - ii, bot - ii,
231 lef - jj, rig - jj,
232 lresponse->select(ii, jj),
233 lsimulated->get_channels(ii, jj));
235 lock();
237 if (lsimulated->get_bayer() == IMAGE_BAYER_NONE) {
238 lsimulated->pix(ii, jj) +=
239 r(approximation->get_pixel(i, j));
240 lsim_weights->pix(ii, jj) +=
241 r.weight();
242 } else {
243 int k = ((image_bayer_ale_real *)lsimulated)->bayer_color(ii, jj);
244 lsimulated->chan(ii, jj, k) +=
245 r(approximation->get_pixel(i, j))[k];
246 lsim_weights->chan(ii, jj, k) +=
247 r.weight()[k];
250 unlock();
255 void finish_subdomains(unsigned int N) {
257 * Determine extents
260 for (unsigned int n = 0; n < N; n++) {
261 point *se = subdomain_extents + 2 * n;
263 for (int d = 0; d < 2; d++) {
264 if (se[0][d] < extents[0][d])
265 extents[0][d] = se[0][d];
266 if (se[1][d] > extents[1][d])
267 extents[1][d] = se[1][d];
271 delete[] subdomain_extents;
273 public:
275 simulate_linear(sim_args s) : decompose_domain(0, s.approximation->height(),
276 0, s.approximation->width()),
277 sim_args(s) {
282 class simulate_nonlinear : public thread::decompose_domain,
283 private sim_args {
284 point *subdomain_extents;
286 protected:
287 void prepare_subdomains(unsigned int N) {
288 subdomain_extents = new point[N * 2];
290 for (unsigned int n = 0; n < N; n++) {
291 point *se = subdomain_extents + 2 * n;
293 for (int d = 0; d < 2; d++) {
294 se[0][d] = extents[0][d];
295 se[1][d] = extents[1][d];
300 void subdomain_algorithm(unsigned int thread,
301 int i_min, int i_max, int j_min, int j_max) {
303 point *extents = subdomain_extents + thread * 2;
306 * Iterate non-linear
309 for (int i = i_min; i < i_max; i++)
310 for (int j = j_min; j < j_max; j++) {
313 * Convenient variables for expressing the boundaries
314 * of the mapped area.
317 ale_pos top = i - 0.5;
318 ale_pos bot = i + 0.5;
319 ale_pos lef = j - 0.5;
320 ale_pos rig = j + 0.5;
323 * Iterate over all simulated frame pixels influenced
324 * by the scene pixel (i, j), as determined by the
325 * response function.
328 for (int ii = (int) floor(top + nlresponse->min_i());
329 ii <= ceil(bot + nlresponse->max_i()); ii++)
330 for (int jj = (int) floor(lef + nlresponse->min_j());
331 jj <= ceil(rig + nlresponse->max_j()); jj++) {
333 if (ii < (int) 0
334 || ii >= (int) nlsimulated->height()
335 || jj < (int) 0
336 || jj >= (int) nlsimulated->width())
337 continue;
339 if (ii < extents[0][0])
340 extents[0][0] = ii;
341 if (jj < extents[0][1])
342 extents[0][1] = jj;
343 if (ii > extents[1][0])
344 extents[1][0] = ii;
345 if (jj > extents[1][1])
346 extents[1][1] = jj;
348 class rasterizer::psf_result r =
349 (*nlresponse)(top - ii, bot - ii,
350 lef - jj, rig - jj,
351 nlresponse->select(ii, jj));
353 lock();
355 nlsimulated->pix(ii, jj) +=
356 r(exp->unlinearize(lsimulated->get_pixel(i, j)));
357 nlsim_weights->pix(ii, jj) +=
358 r.weight();
360 unlock();
365 void finish_subdomains(unsigned int N) {
367 * Determine extents
370 for (unsigned int n = 0; n < N; n++) {
371 point *se = subdomain_extents + 2 * n;
373 for (int d = 0; d < 2; d++) {
374 if (se[0][d] < extents[0][d])
375 extents[0][d] = se[0][d];
376 if (se[1][d] > extents[1][d])
377 extents[1][d] = se[1][d];
381 delete[] subdomain_extents;
383 public:
385 simulate_nonlinear(sim_args s) : decompose_domain(0, s.lsimulated->height(),
386 0, s.lsimulated->width()),
387 sim_args(s) {
391 void _ip_frame_simulate(int frame_num, image *approximation,
392 image *lsimulated, image *nlsimulated,
393 transformation t, const raster *lresponse,
394 const raster *nlresponse, const exposure &exp,
395 point *extents) {
397 sim_args args;
400 * Initializations for linear filtering
403 image *lsim_weights = NULL;
405 if (lsimulated->get_bayer() == IMAGE_BAYER_NONE) {
406 lsim_weights = new image_ale_real(
407 lsimulated->height(),
408 lsimulated->width(),
409 lsimulated->depth());
410 } else {
411 lsim_weights = new image_bayer_ale_real(
412 lsimulated->height(),
413 lsimulated->width(),
414 lsimulated->depth(),
415 lsimulated->get_bayer());
417 assert (lsim_weights);
419 args.frame_num = frame_num;
420 args.approximation = approximation;
421 args.lsimulated = lsimulated;
422 args.nlsimulated = nlsimulated;
423 args.lsim_weights = lsim_weights;
424 args.t = t;
425 args.lresponse = lresponse;
426 args.nlresponse = nlresponse;
427 args.exp = &exp;
428 args.extents = extents;
431 * Simulate linear
434 simulate_linear sl(args);
435 sl.run();
438 * Normalize linear
441 for (unsigned int ii = 0; ii < lsimulated->height(); ii++)
442 for (unsigned int jj = 0; jj < lsimulated->width(); jj++) {
443 const ale_real weight_floor = 1e-10;
444 ale_accum zero = 0;
446 for (int k = 0; k < 3; k++) {
447 if ((lsimulated->get_channels(ii, jj) & (1 << k)) == 0)
448 continue;
450 ale_real weight = lsim_weights->chan(ii, jj, k);
452 if (!(weight > weight_floor))
453 lsimulated->chan(ii, jj, k)
454 /= zero; /* Generate a non-finite value */
456 lsimulated->chan(ii, jj, k) /= weight;
461 * Finalize linear
464 delete lsim_weights;
467 * Return if there is no non-linear step.
470 if (nlsimulated == NULL) {
471 return;
475 * Initialize non-linear
478 image_ale_real *nlsim_weights = new image_ale_real(
479 nlsimulated->height(),
480 nlsimulated->width(),
481 nlsimulated->depth());
483 args.nlsim_weights = nlsim_weights;
486 * Simulate non-linear
489 simulate_nonlinear snl(args);
490 snl.run();
493 * Normalize non-linear
496 for (unsigned int ii = 0; ii < nlsimulated->height(); ii++)
497 for (unsigned int jj = 0; jj < nlsimulated->width(); jj++) {
498 pixel weight = nlsim_weights->get_pixel(ii, jj);
499 ale_real weight_floor = 1e-10;
500 ale_accum zero = 0;
502 for (int k = 0; k < 3; k++)
503 if (!(weight[k] > weight_floor))
504 nlsimulated->pix(ii, jj)[k]
505 /= zero; /* Generate a non-finite value */
507 nlsimulated->pix(ii, jj)
508 /= weight;
512 * Finalize non-linear
515 delete nlsim_weights;
518 struct correction_t {
520 * Type
522 int use_weighted_median;
525 * Weighted Median
527 image_weighted_median *iwm;
530 * Weight limit
532 double weight_limit;
535 * Common
537 image *c;
538 image *cc;
541 * Create correction structures.
543 correction_t(int use_weighted_median, double weight_limit, unsigned int h, unsigned int w, unsigned int d) {
544 this->use_weighted_median = use_weighted_median;
545 if (use_weighted_median)
546 iwm = new image_weighted_median(h, w, d);
547 this->weight_limit = weight_limit;
548 c = new image_ale_real(h, w, d);
549 cc = new image_ale_real(h, w, d);
553 * Destroy correction structures
555 ~correction_t() {
556 if (use_weighted_median)
557 delete iwm;
558 delete c;
559 delete cc;
563 * Correction count
565 pixel get_count(int i, int j) {
566 if (use_weighted_median)
567 return iwm->get_weights()->get_pixel(i, j);
568 else
569 return cc->get_pixel(i, j);
573 * Check for weight limit
575 int weight_limited(int i, int j) {
576 if (weight_limit
577 && get_count(i, j)[0] >= weight_limit
578 && get_count(i, j)[1] >= weight_limit
579 && get_count(i, j)[2] >= weight_limit)
580 return 1;
582 return 0;
586 * Correction value
588 pixel get_correction(int i, int j) {
589 if (use_weighted_median)
590 return iwm->get_colors()->get_pixel(i, j);
591 else
592 return c->get_pixel(i, j)
593 / cc->get_pixel(i, j);
597 * Frame end
599 void frame_end(int frame_num) {
600 if (use_weighted_median) {
602 for (unsigned int i = 0; i < c->height(); i++)
603 for (unsigned int j = 0; j < c->width(); j++) {
606 * Update the median calculator
608 pixel cval = c->get_pixel(i, j);
609 pixel ccval = cc->get_pixel(i, j);
610 iwm->accumulate(i, j, frame_num, cval / ccval, ccval);
613 * Reset the counters
615 c->pix(i, j) = pixel::zero();
616 cc->pix(i, j) = pixel::zero();
622 * Update correction structures, using either a weighted mean update or
623 * a weighted median update.
625 void update(int i, int j, pixel value_times_weight, pixel weight) {
626 c->pix(i, j) += value_times_weight;
627 cc->pix(i, j) += weight;
632 * For each pixel in APPROXIMATION, calculate the differences
633 * between SIMULATED and REAL pixels influenced by this pixel,
634 * and back-project the differences onto the correction array
635 * C. The number of frames backprojected to each pixel in C is
636 * counted in CC.
638 * Since APPROXIMATION can always, given sufficient computational
639 * resources, be configured to be of finer resolution than SIMULATED,
640 * we map APPROXIMATION pixels onto the SIMULATED grid rather than vice
641 * versa. This should reduce the error incurred by approximations in
642 * d2::transformation::unscaled_map_area*().
644 * This approach requires multiplying the resultant integral by a
645 * corrective factor, since the grids are generally of different
646 * densities.
649 struct correct_args {
650 int frame_num;
651 image *approximation;
652 correction_t *cu;
653 const image *real;
654 image *lreal;
655 image *lsimulated;
656 image *nlsimulated;
657 transformation t;
658 const backprojector *lresponse;
659 const backprojector *nlresponse;
660 double weight_limit;
663 class correct_nonlinear : public thread::decompose_domain,
664 private correct_args {
666 protected:
667 void subdomain_algorithm(unsigned int thread,
668 int i_min, int i_max, int j_min, int j_max) {
671 * Unlinearize values from lsimulated.
674 for (int i = i_min; i < i_max; i++)
675 for (int j = j_min; j < j_max; j++)
676 lreal->set_pixel(i, j, real->exp().unlinearize(
677 lsimulated->get_pixel(i, j)));
681 * Backproject from real to lreal, iterating over all pixels
682 * in lreal.
685 for (int i = i_min; i < i_max; i++)
686 for (int j = j_min; j < j_max; j++) {
689 * XXX: Is this right?
692 if (is_excluded_r(approximation->offset(), i, j, frame_num))
693 continue;
696 * Convenient variables for expressing the boundaries
697 * of the mapped area.
700 ale_pos top = i - 0.5;
701 ale_pos bot = i + 0.5;
702 ale_pos lef = j - 0.5;
703 ale_pos rig = j + 0.5;
706 * Iterate over non-linear pixels influenced by linear
707 * pixels.
710 for (int ii = (int) floor(top + nlresponse->min_i());
711 ii <= ceil(bot + nlresponse->max_i()); ii++)
712 for (int jj = (int) floor(lef + nlresponse->min_j());
713 jj <= ceil(rig + nlresponse->max_j()); jj++) {
715 if (ii < (int) 0
716 || ii >= (int) nlsimulated->height()
717 || jj < (int) 0
718 || jj >= (int) nlsimulated->width())
719 continue;
721 class backprojector::psf_result r =
722 (*nlresponse)(top - ii, bot - ii,
723 lef - jj, rig - jj,
724 nlresponse->select(ii, jj));
727 pixel comp_real =
728 real->exp().unlinearize(real->get_pixel(ii, jj));
730 pixel comp_simu =
731 nlsimulated->get_pixel(ii, jj);
733 if (!finite(comp_simu[0])
734 || !finite(comp_simu[1])
735 || !finite(comp_simu[2]))
736 continue;
739 * Backprojection value.
742 pixel bpv = r(comp_real - comp_simu);
745 * Error calculation
748 lreal->pix(i, j) += bpv;
753 * Linearize lreal.
756 for (int i = i_min; i < i_max; i++)
757 for (int j = j_min; j < j_max; j++)
758 lreal->set_pixel(i, j, real->exp().linearize(
759 lreal->get_pixel(i, j)));
762 public:
764 correct_nonlinear(correct_args c) : decompose_domain(0, c.lreal->height(),
765 0, c.lreal->width()),
766 correct_args(c) {
770 class correct_linear : public thread::decompose_domain,
771 private correct_args {
773 protected:
774 void subdomain_algorithm(unsigned int thread,
775 int i_min, int i_max, int j_min, int j_max) {
778 * Iterate over all pixels in the approximation.
781 for (int i = i_min; i < i_max; i++)
782 for (int j = j_min; j < j_max; j++) {
785 * Check correction count against any weight limit.
788 if (cu->weight_limited(i, j))
789 continue;
792 * Obtain the position Q and dimensions D of image
793 * approximation pixel (i, j) in the coordinate system
794 * of the simulated (and real) frame.
797 point p = point(i + approximation->offset()[0], j + approximation->offset()[1]);
798 point q;
799 ale_pos d[2];
801 t.unscaled_map_area_inverse(p, &q, d);
804 * Convenient variables for expressing the boundaries
805 * of the mapped area.
808 ale_pos top = q[0] - d[0];
809 ale_pos bot = q[0] + d[0];
810 ale_pos lef = q[1] - d[1];
811 ale_pos rig = q[1] + d[1];
814 * Iterate over frame pixels influenced by the scene
815 * pixel.
818 for (int ii = (int) floor(top + lresponse->min_i());
819 ii <= ceil(bot + lresponse->max_i()); ii++)
820 for (int jj = (int) floor(lef + lresponse->min_j());
821 jj <= ceil(rig + lresponse->max_j()); jj++) {
823 if (ii < (int) 0
824 || ii >= (int) lreal->height()
825 || jj < (int) 0
826 || jj >= (int) lreal->width())
827 continue;
829 if (is_excluded_f(ii, jj, frame_num))
830 continue;
832 unsigned int selection = lresponse->select(ii, jj);
834 char channels = lreal->get_channels(ii, jj);
836 class backprojector::psf_result r =
837 (*lresponse)(top - ii, bot - ii,
838 lef - jj, rig - jj,
839 selection, channels);
843 * R is the result of integration in the
844 * coordinate system of the simulated frame.
845 * We must rescale to get the result of
846 * integration in the coordinate system of the
847 * approximation image.
850 r *= (1 / (bot - top) / (rig - lef));
852 pixel comp_lreal =
853 lreal->get_raw_pixel(ii, jj);
855 // pixel comp_real =
856 // real->get_pixel(ii, jj);
858 pixel comp_simu =
859 lsimulated->get_raw_pixel(ii, jj);
861 #if 1
864 * Under the assumption that finite() testing
865 * may be expensive, limit such tests to active
866 * channels.
868 int found_nonfinite = 0;
869 for (int k = 0; k < 3; k++) {
870 if (((1 << k) & channels)
871 && (!finite(comp_simu[k])
872 || !finite(comp_lreal[k]))) {
873 found_nonfinite = 1;
874 break;
878 if (found_nonfinite)
879 continue;
880 #else
882 if (!finite(comp_simu[0])
883 || !finite(comp_simu[1])
884 || !finite(comp_simu[2])
885 || !finite(comp_lreal[0])
886 || !finite(comp_lreal[1])
887 || !finite(comp_lreal[2]))
888 continue;
889 #endif
892 * Backprojection value unadjusted
893 * for confidence.
896 pixel bpv = r(comp_lreal - comp_simu);
899 * Confidence [equal to (1, 1, 1) when
900 * confidence is uniform].
903 // Ordinary certainty
904 // pixel conf = real->exp().confidence(comp_lreal);
906 // One-sided certainty
907 // pixel conf = real->exp().one_sided_confidence(comp_lreal, bpv);
908 // conf = real->exp().one_sided_confidence(comp_real, bpv);
910 // Estimate-based certainty
911 // pixel conf = real->exp().confidence(comp_simu);
913 // One-sided estimate-based certainty
914 pixel conf = real->exp().one_sided_confidence(comp_simu, bpv);
916 // One-sided approximation-based certainty
917 // pixel conf = real->exp().one_sided_confidence(approximation->pix(i, j), bpv);
920 * If a color is bayer-interpolated, then we have no confidence in its
921 * value.
924 if (real->get_bayer() != IMAGE_BAYER_NONE) {
925 int color = ((image_bayer_ale_real *)real)->bayer_color(ii, jj);
926 conf[(color + 1) % 3] = 0;
927 conf[(color + 2) % 3] = 0;
931 * Error calculation
934 // c->pix(i, j) += bpv * conf;
937 * Increment the backprojection weight. When
938 * confidence is uniform, this should weight
939 * each frame's correction equally.
942 // cc->pix(i, j) += conf * r.weight()
943 // / lresponse->integral(selection);
945 cu->update(i, j, bpv * conf, conf * r.weight() / lresponse->integral(selection));
950 public:
952 correct_linear(correct_args c) : decompose_domain(0, c.approximation->height(),
953 0, c.approximation->width()),
954 correct_args(c) {
958 void _ip_frame_correct(int frame_num, image *approximation,
959 correction_t *cu, const image *real, image *lsimulated,
960 image *nlsimulated, transformation t,
961 const backprojector *lresponse,
962 const backprojector *nlresponse, point *extents) {
964 correct_args args;
966 args.frame_num = frame_num;
967 args.approximation = approximation;
968 args.cu = cu;
969 args.real = real;
970 args.lsimulated = lsimulated;
971 args.nlsimulated = nlsimulated;
972 args.t = t;
973 args.lresponse = lresponse;
974 args.nlresponse = nlresponse;
977 * Generate the image to compare lsimulated with.
980 const image *lreal;
982 if (nlsimulated == NULL)
983 lreal = real;
984 else {
986 image *new_lreal = new image_ale_real(
987 real->height(),
988 real->width(),
989 real->depth(),
990 "IPC lreal",
991 &real->exp());
993 args.lreal = new_lreal;
995 correct_nonlinear cn(args);
996 cn.run();
998 lreal = new_lreal;
1002 * Perform exposure adjustment.
1005 if (exposure_register) {
1007 pixel ec;
1009 #if 0
1010 ec = lsimulated->avg_channel_magnitude()
1011 / lreal->avg_channel_magnitude();
1012 #elsif 0
1013 pixel_accum ratio_sum;
1014 pixel_accum weight_sum;
1016 for (unsigned int i = 0; i < lreal->height(); i++)
1017 for (unsigned int j = 0; j < lreal->width(); j++) {
1019 pixel s = lsimulated->get_pixel(i, j);
1020 pixel r = lreal->get_pixel(i, j);
1021 pixel confidence = real->exp().confidence(r);
1023 if (s[0] > 0.001
1024 && s[1] > 0.001
1025 && s[2] > 0.001
1026 && r[0] > 0.001
1027 && r[1] > 0.001
1028 && r[2] > 0.001) {
1029 ratio_sum += confidence * s / r;
1030 weight_sum += confidence;
1034 ec = ratio_sum / weight_sum;
1035 #else
1036 pixel_accum ssum, rsum;
1038 // for (unsigned int i = 0; i < lreal->height(); i++)
1039 // for (unsigned int j = 0; j < lreal->width(); j++) {
1040 for (unsigned int i = (unsigned int) floor(extents[0][0]);
1041 i < (unsigned int) ceil(extents[1][0]); i++)
1042 for (unsigned int j = (unsigned int) floor(extents[0][1]);
1043 j < (unsigned int) ceil(extents[1][1]); j++) {
1045 pixel s = lsimulated->get_pixel(i, j);
1046 pixel r = lreal->get_pixel(i, j);
1048 pixel confidence = real->exp().confidence(s);
1050 if (!s.finite()
1051 || !r.finite()
1052 || !confidence.finite())
1053 continue;
1055 if (real->get_bayer() != IMAGE_BAYER_NONE) {
1056 int color = ((image_bayer_ale_real *)real)->bayer_color(i, j);
1057 ssum[color] += confidence[color] * s[color];
1058 rsum[color] += confidence[color] * r[color];
1059 } else {
1060 ssum += confidence * s;
1061 rsum += confidence * r;
1066 ec = ssum / rsum;
1068 #endif
1070 if (ec.finite()
1071 && rsum[0] > 0.001
1072 && rsum[1] > 0.001
1073 && rsum[2] > 0.001)
1074 real->exp().set_multiplier(
1075 real->exp().get_multiplier() * ec);
1078 args.lreal = (d2::image *) lreal;
1080 correct_linear cl(args);
1081 cl.run();
1083 if (nlsimulated)
1084 delete lreal;
1088 * Adjust correction array C based on the difference between the
1089 * simulated projected frame and real frame M. Update the correction
1090 * count CC for affected pixels in C.
1093 virtual void _ip_frame(int frame_num, correction_t *cu, const image *real,
1094 transformation t, const raster *f, const backprojector *b,
1095 const raster *nlf, const backprojector *nlb) {
1097 ui::get()->d2_irani_peleg_start();
1100 * Initialize simulated data structures
1103 image *lsimulated;
1105 if (real->get_bayer() == IMAGE_BAYER_NONE) {
1106 lsimulated = new image_ale_real(
1107 real->height(),
1108 real->width(),
1109 real->depth());
1110 } else {
1111 lsimulated = new image_bayer_ale_real(
1112 real->height(),
1113 real->width(),
1114 real->depth(),
1115 real->get_bayer());
1118 image *nlsimulated = NULL;
1120 if (nlf)
1121 nlsimulated = new image_ale_real(
1122 real->height(),
1123 real->width(),
1124 real->depth());
1127 * Create simulated frames with forward projection.
1130 ui::get()->ip_frame_simulate_start();
1132 point extents[2] = { point::posinf(), point::neginf() };
1134 _ip_frame_simulate(frame_num, approximation, lsimulated, nlsimulated, t, f, nlf, real->exp(), extents);
1137 * Update the correction array using backprojection.
1140 ui::get()->ip_frame_correct_start();
1141 _ip_frame_correct(frame_num, approximation, cu, real, lsimulated, nlsimulated, t, b, nlb, extents);
1144 * Finalize data structures.
1147 delete lsimulated;
1148 delete nlsimulated;
1150 ui::get()->d2_irani_peleg_stop();
1154 * Iterate _ip_frame() over all frames, and update APPROXIMATION after
1155 * corrections from all frames have been summed. Repeat for the number
1156 * of iterations specified by the user.
1159 virtual void _ip() {
1162 * Create rasterized PSF and backprojection kernel AUX.
1165 raster **f = (raster **) malloc(image_rw::count() * sizeof(raster *));
1166 backprojector **b = (backprojector **) malloc(image_rw::count() * sizeof(backprojector *));
1168 for (unsigned int m = 0; m < image_rw::count(); m++) {
1170 if (!align::match(m))
1171 continue;
1173 transformation t = align::of(m);
1175 f[m] = new normalizer(new rasterizer(lresponse, t));
1176 b[m] = new backprojector(f[m]);
1179 raster *nlf = NULL;
1180 backprojector *nlb = NULL;
1182 if (nlresponse) {
1183 nlf = new normalizer(new rasterizer(nlresponse, transformation::eu_identity()));
1184 nlb = new backprojector(nlf);
1187 for (unsigned int n = 0; n < iterations; n++) {
1189 correction_t *correction = new correction_t(
1190 use_weighted_median,
1191 weight_limit,
1192 approximation->height(),
1193 approximation->width(),
1194 approximation->depth());
1197 * Iterate over all frames
1200 for (unsigned int m = 0; m < image_rw::count(); m++) {
1202 if (!align::match(m))
1203 continue;
1205 ui::get()->ip_frame_start(m);
1207 transformation t = align::of(m);
1208 const image *real = image_rw::open(m);
1210 _ip_frame(m, correction, real,
1211 t, f[m], b[m], nlf, nlb);
1213 image_rw::close(m);
1215 correction->frame_end(m);
1219 * Update the approximation.
1222 ui::get()->ip_update();
1224 for (unsigned int i = 0; i < approximation->height(); i++)
1225 for (unsigned int j = 0; j < approximation->width(); j++) {
1227 pixel cpix = correction->get_correction(i, j);
1228 pixel ccpix = correction->get_count(i, j);
1229 pixel apix = approximation->get_pixel(i, j);
1231 for (unsigned int k = 0; k < 3; k++) {
1233 const ale_real cc_floor = 1e-20;
1235 if (ccpix[k] < cc_floor)
1236 continue;
1238 if (!finite(cpix[k]))
1239 continue;
1241 if (!finite(apix[k]))
1242 continue;
1244 ale_real new_value = cpix[k] + apix[k];
1246 assert (finite(apix[k]));
1247 assert (finite(ccpix[k]));
1248 assert (finite(cpix[k]));
1249 assert (finite(new_value));
1252 * Negative light doesn't make sense.
1254 if (new_value < 0)
1255 new_value = 0;
1257 approximation->chan(i, j, k) = new_value;
1261 delete correction;
1263 if (inc) {
1264 ui::get()->ip_write();
1265 image_rw::output(approximation);
1268 ui::get()->ip_step_done();
1272 for (unsigned int m = 0; m < image_rw::count(); m++) {
1274 if (!align::match(m))
1275 continue;
1277 delete f[m];
1278 delete b[m];
1281 free(f);
1282 free(b);
1284 delete nlf;
1285 delete nlb;
1288 public:
1290 ipc(render *input, unsigned int iterations, int _inc, psf *lresponse,
1291 psf *nlresponse, int exposure_register,
1292 int use_weighted_median, double ipwl) {
1293 this->input = input;
1294 synced = 0;
1295 inc = _inc;
1296 this->iterations = iterations;
1297 this->lresponse = lresponse;
1298 this->nlresponse = nlresponse;
1299 this->exposure_register = exposure_register;
1300 this->use_weighted_median = use_weighted_median;
1301 this->weight_limit = ipwl;
1304 const image *get_image() const {
1305 if (synced)
1306 return approximation;
1307 else
1308 return input->get_image();
1311 const image *get_defined() const {
1312 return input->get_defined();
1315 void sync(int n) {
1316 render::sync(n);
1317 input->sync(n);
1320 void step() {
1321 return;
1324 virtual int sync() {
1325 input->sync();
1326 synced = 1;
1327 approximation = optimizations::get_ip_working_image(input->get_image());
1328 ui::get()->ip_start();
1329 _ip();
1330 ui::get()->ip_done();
1333 * Since we write output internally for --inc, no external
1334 * update is necessary in this case.
1337 return 0;
1340 virtual ~ipc() {
1343 void free_memory() {
1347 #endif