Update license to GNU GPL version 3.
[Ale.git] / d2 / render / ipc.h
blobf9a7dcec93540f2d70e36fa0623130fedc9c6e54
1 // Copyright 2003, 2004 David Hilvert <dhilvert@auricle.dyndns.org>,
2 // <dhilvert@ugcs.caltech.edu>
4 /* This file is part of the Anti-Lamenessing Engine.
6 The Anti-Lamenessing Engine is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 The Anti-Lamenessing Engine is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with the Anti-Lamenessing Engine; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * ipc.h: A render subclass implementing an iterative image reconstruction
23 * algorithm based on the work of Michal Irani and Shmuel Peleg. The
24 * projection and backprojection functions used are user-configurable.
25 * For more information on the theory behind the algorithm, see the last
26 * section and appendix of:
28 * http://www.wisdom.weizmann.ac.il/~irani/abstracts/superResolution.html
30 * The algorithm in the source paper looks something like this (PSF' is the
31 * backprojection kernel, and corresponds to what the authors of the paper call
32 * AUX):
34 * ===============================================================
35 * Forward Backward Points of comparison
36 * ---------------------------------------------------------------
38 * scene(n) scene(n+1)
40 * | ^
41 * | |
42 * PSF PSF'
43 * | |
44 * | ---------+ <--- difference
45 * V / |
47 * simulated(n) real
49 * ===============================================================
51 * This assumes a single colorspace representation. However, consumer cameras
52 * sometimes perform sharpening in non-linear colorspace, whereas lens and
53 * sensor blurring occurs in linear colorspace. Hence, there can be two
54 * colorspaces involved; ALE accounts for this with linear and non-linear
55 * colorspace PSFs. Hence, the algorithm we use looks something like:
57 * ===============================================================
58 * Forward Backward Points of comparison
59 * ---------------------------------------------------------------
61 * scene(n) scene(n+1)
63 * | ^
64 * | |
65 * LPSF LPSF'
66 * | |
67 * | ----------+ <--- difference,
68 * V / | exposure
69 * re-estimation
70 * lsimulated(n) lreal
72 * | ^
73 * | |
74 * unlinearize linearize
75 * | |
76 * V |
78 * lsim_nl(n) lreal_nl(n)
80 * | ^
81 * | |
82 * NLPSF NLPSF'
83 * | |
84 * | ----------+ <--- difference
85 * V / |
87 * nlsimulated(n) real_nl
89 * ^
90 * |
91 * unlinearize
92 * |
93 * |
95 * real
97 * ===============================================================
100 #ifndef __ipc_h__
101 #define __ipc_h__
103 #include "../image.h"
104 #include "../render.h"
105 #include "psf/rasterizer.h"
106 #include "psf/normalizer.h"
107 #include "psf/backprojector.h"
109 class ipc : public render {
110 protected:
111 int done;
112 int inc;
113 image *approximation;
114 render *input;
115 unsigned int iterations;
116 psf *lresponse, *nlresponse;
117 int exposure_register;
118 int use_weighted_median;
119 double weight_limit;
122 * Calculate the simulated input frame SIMULATED from the image
123 * approximation APPROXIMATION, iterating over image approximation
124 * pixels.
127 struct sim_subdomain_args {
128 int frame_num;
129 image *approximation;
130 image *lsimulated;
131 image *nlsimulated;
132 image *lsim_weights;
133 image *nlsim_weights;
134 transformation t;
135 const raster *lresponse;
136 const raster *nlresponse;
137 const exposure *exp;
138 int i_min, i_max, j_min, j_max;
139 point extents[2];
140 #ifdef USE_PTHREAD
141 pthread_mutex_t *lock;
142 #endif
145 static void *_ip_frame_simulate_subdomain_linear(void *args) {
147 sim_subdomain_args *sargs = (sim_subdomain_args *) args;
149 int frame_num = sargs->frame_num;
150 image *approximation = sargs->approximation;
151 image *lsimulated = sargs->lsimulated;
152 image *lsim_weights = sargs->lsim_weights;
153 transformation t = sargs->t;
154 const raster *lresponse = sargs->lresponse;
155 unsigned int i_min = sargs->i_min;
156 unsigned int i_max = sargs->i_max;
157 unsigned int j_min = sargs->j_min;
158 unsigned int j_max = sargs->j_max;
159 point *extents = sargs->extents;
160 #ifdef USE_PTHREAD
161 pthread_mutex_t *lock = sargs->lock;
162 #endif
165 * Linear filtering, iterating over approximation pixels
168 for (unsigned int i = i_min; i < i_max; i++)
169 for (unsigned int j = j_min; j < j_max; j++) {
171 if (is_excluded_r(approximation->offset(), i, j, frame_num))
172 continue;
175 * Obtain the position Q and dimensions D of
176 * image approximation pixel (i, j) in the coordinate
177 * system of the simulated frame.
180 point p = point(i + approximation->offset()[0], j + approximation->offset()[1]);
181 point q;
182 ale_pos d[2];
185 * XXX: This appears to calculate the wrong thing.
188 if (is_excluded_f(p, frame_num))
189 continue;
191 t.unscaled_map_area_inverse(p, &q, d);
194 * Convenient variables for expressing the boundaries
195 * of the mapped area.
198 ale_pos top = q[0] - d[0];
199 ale_pos bot = q[0] + d[0];
200 ale_pos lef = q[1] - d[1];
201 ale_pos rig = q[1] + d[1];
204 * Iterate over all simulated frame pixels influenced
205 * by the scene pixel (i, j), as determined by the
206 * response function.
209 for (int ii = (int) floor(top + lresponse->min_i());
210 ii <= ceil(bot + lresponse->max_i()); ii++)
211 for (int jj = (int) floor(lef + lresponse->min_j());
212 jj <= ceil(rig + lresponse->max_j()); jj++) {
214 if (ii < (int) 0
215 || ii >= (int) lsimulated->height()
216 || jj < (int) 0
217 || jj >= (int) lsimulated->width())
218 continue;
220 if (ii < extents[0][0])
221 extents[0][0] = ii;
222 if (jj < extents[0][1])
223 extents[0][1] = jj;
224 if (ii > extents[1][0])
225 extents[1][0] = ii;
226 if (jj > extents[1][1])
227 extents[1][1] = jj;
229 class rasterizer::psf_result r =
230 (*lresponse)(top - ii, bot - ii,
231 lef - jj, rig - jj,
232 lresponse->select(ii, jj));
234 #ifdef USE_PTHREAD
235 pthread_mutex_lock(lock);
236 #endif
237 lsimulated->pix(ii, jj) +=
238 r(approximation->get_pixel(i, j));
239 lsim_weights->pix(ii, jj) +=
240 r.weight();
241 #ifdef USE_PTHREAD
242 pthread_mutex_unlock(lock);
243 #endif
247 return NULL;
250 static void *_ip_frame_simulate_subdomain_nonlinear(void *args) {
252 sim_subdomain_args *sargs = (sim_subdomain_args *) args;
254 image *lsimulated = sargs->lsimulated;
255 image *nlsimulated = sargs->nlsimulated;
256 image *nlsim_weights = sargs->nlsim_weights;
257 transformation t = sargs->t;
258 const raster *nlresponse = sargs->nlresponse;
259 const exposure *exp = sargs->exp;
260 unsigned int i_min = sargs->i_min;
261 unsigned int i_max = sargs->i_max;
262 unsigned int j_min = sargs->j_min;
263 unsigned int j_max = sargs->j_max;
264 point *extents = sargs->extents;
265 #ifdef USE_PTHREAD
266 pthread_mutex_t *lock = sargs->lock;
267 #endif
270 * Iterate non-linear
273 for (unsigned int i = i_min; i < i_max; i++)
274 for (unsigned int j = j_min; j < j_max; j++) {
277 * Convenient variables for expressing the boundaries
278 * of the mapped area.
281 ale_pos top = i - 0.5;
282 ale_pos bot = i + 0.5;
283 ale_pos lef = j - 0.5;
284 ale_pos rig = j + 0.5;
287 * Iterate over all simulated frame pixels influenced
288 * by the scene pixel (i, j), as determined by the
289 * response function.
292 for (int ii = (int) floor(top + nlresponse->min_i());
293 ii <= ceil(bot + nlresponse->max_i()); ii++)
294 for (int jj = (int) floor(lef + nlresponse->min_j());
295 jj <= ceil(rig + nlresponse->max_j()); jj++) {
297 if (ii < (int) 0
298 || ii >= (int) nlsimulated->height()
299 || jj < (int) 0
300 || jj >= (int) nlsimulated->width())
301 continue;
303 if (ii < extents[0][0])
304 extents[0][0] = ii;
305 if (jj < extents[0][1])
306 extents[0][1] = jj;
307 if (ii > extents[1][0])
308 extents[1][0] = ii;
309 if (jj > extents[1][1])
310 extents[1][1] = jj;
312 class rasterizer::psf_result r =
313 (*nlresponse)(top - ii, bot - ii,
314 lef - jj, rig - jj,
315 nlresponse->select(ii, jj));
317 #ifdef USE_PTHREAD
318 pthread_mutex_lock(lock);
319 #endif
320 nlsimulated->pix(ii, jj) +=
321 r(exp->unlinearize(lsimulated->get_pixel(i, j)));
322 nlsim_weights->pix(ii, jj) +=
323 r.weight();
324 #ifdef USE_PTHREAD
325 pthread_mutex_unlock(lock);
326 #endif
330 return NULL;
333 void _ip_frame_simulate(int frame_num, image *approximation,
334 image *lsimulated, image *nlsimulated,
335 transformation t, const raster *lresponse,
336 const raster *nlresponse, const exposure &exp,
337 point *extents) {
340 * Threading initializations
343 int N;
345 #ifdef USE_PTHREAD
346 N = thread::count();
348 pthread_t *threads = (pthread_t *) malloc(sizeof(pthread_t) * N);
349 pthread_attr_t *thread_attr = (pthread_attr_t *) malloc(sizeof(pthread_attr_t) * N);
350 pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
352 #else
353 N = 1;
354 #endif
356 sim_subdomain_args *args = new sim_subdomain_args[N];
360 * Initializations for linear filtering
363 image_ale_real *lsim_weights = new image_ale_real(
364 lsimulated->height(),
365 lsimulated->width(),
366 lsimulated->depth());
369 for (int ti = 0; ti < N; ti++) {
370 args[ti].frame_num = frame_num;
371 args[ti].approximation = approximation;
372 args[ti].lsimulated = lsimulated;
373 args[ti].nlsimulated = nlsimulated;
374 args[ti].lsim_weights = lsim_weights;
375 args[ti].t = t;
376 args[ti].lresponse = lresponse;
377 args[ti].nlresponse = nlresponse;
378 args[ti].exp = &exp;
379 args[ti].i_min = (approximation->height() * ti) / N;
380 args[ti].i_max = (approximation->height() * (ti + 1)) / N;
381 args[ti].j_min = 0;
382 args[ti].j_max = approximation->width();
383 args[ti].extents[0] = point::posinf();
384 args[ti].extents[1] = point::neginf();
386 #ifdef USE_PTHREAD
387 args[ti].lock = &lock;
388 pthread_attr_init(&thread_attr[ti]);
389 pthread_attr_setdetachstate(&thread_attr[ti], PTHREAD_CREATE_JOINABLE);
390 pthread_create(&threads[ti], &thread_attr[ti], _ip_frame_simulate_subdomain_linear, &args[ti]);
391 #else
392 _ip_frame_simulate_subdomain_linear(&args[ti]);
393 #endif
396 #ifdef USE_PTHREAD
397 for (int ti = 0; ti < N; ti++) {
398 pthread_join(threads[ti], NULL);
400 #endif
403 * Normalize linear
406 for (unsigned int ii = 0; ii < lsimulated->height(); ii++)
407 for (unsigned int jj = 0; jj < lsimulated->width(); jj++) {
408 pixel weight = lsim_weights->get_pixel(ii, jj);
409 const ale_real weight_floor = 1e-10;
410 ale_accum zero = 0;
412 for (int k = 0; k < 3; k++)
413 if (!(weight[k] > weight_floor))
414 lsimulated->pix(ii, jj)[k]
415 /= zero; /* Generate a non-finite value */
417 lsimulated->pix(ii, jj)
418 /= weight;
422 * Determine extents
425 for (int n = 0; n < N; n++)
426 for (int d = 0; d < 2; d++) {
427 if (args[n].extents[0][d] < extents[0][d])
428 extents[0][d] = args[n].extents[0][d];
429 if (args[n].extents[1][d] > extents[1][d])
430 extents[1][d] = args[n].extents[1][d];
434 * Finalize linear
437 delete lsim_weights;
440 * Return if there is no non-linear step.
443 if (nlsimulated == NULL) {
444 delete[] args;
445 return;
449 * Initialize non-linear
452 image_ale_real *nlsim_weights = new image_ale_real(
453 nlsimulated->height(),
454 nlsimulated->width(),
455 nlsimulated->depth());
457 for (int ti = 0; ti < N; ti++) {
458 args[ti].nlsim_weights = nlsim_weights;
459 args[ti].i_min = (lsimulated->height() * ti) / N;
460 args[ti].i_max = (lsimulated->height() * (ti + 1)) / N;
461 args[ti].j_min = 0;
462 args[ti].j_max = lsimulated->width();
464 #ifdef USE_PTHREAD
465 pthread_attr_init(&thread_attr[ti]);
466 pthread_attr_setdetachstate(&thread_attr[ti], PTHREAD_CREATE_JOINABLE);
467 pthread_create(&threads[ti], &thread_attr[ti], _ip_frame_simulate_subdomain_nonlinear, &args[ti]);
468 #else
469 _ip_frame_simulate_subdomain_nonlinear(&args[ti]);
470 #endif
473 #ifdef USE_PTHREAD
474 for (int ti = 0; ti < N; ti++) {
475 pthread_join(threads[ti], NULL);
477 #endif
480 * Normalize non-linear
483 for (unsigned int ii = 0; ii < nlsimulated->height(); ii++)
484 for (unsigned int jj = 0; jj < nlsimulated->width(); jj++) {
485 pixel weight = nlsim_weights->get_pixel(ii, jj);
486 ale_real weight_floor = 1e-10;
487 ale_accum zero = 0;
489 for (int k = 0; k < 3; k++)
490 if (!(weight[k] > weight_floor))
491 nlsimulated->pix(ii, jj)[k]
492 /= zero; /* Generate a non-finite value */
494 nlsimulated->pix(ii, jj)
495 /= weight;
499 * Determine extents
502 for (int n = 0; n < N; n++)
503 for (int d = 0; d < 2; d++) {
504 if (args[n].extents[0][d] < extents[0][d])
505 extents[0][d] = args[n].extents[0][d];
506 if (args[n].extents[1][d] > extents[1][d])
507 extents[1][d] = args[n].extents[1][d];
511 * Finalize non-linear
514 delete nlsim_weights;
516 delete[] args;
519 struct correction_t {
521 * Type
523 int use_weighted_median;
526 * Weighted Median
528 image_weighted_median *iwm;
531 * Weight limit
533 double weight_limit;
536 * Common
538 image *c;
539 image *cc;
542 * Create correction structures.
544 correction_t(int use_weighted_median, double weight_limit, unsigned int h, unsigned int w, unsigned int d) {
545 this->use_weighted_median = use_weighted_median;
546 if (use_weighted_median)
547 iwm = new image_weighted_median(h, w, d);
548 this->weight_limit = weight_limit;
549 c = new image_ale_real(h, w, d);
550 cc = new image_ale_real(h, w, d);
554 * Destroy correction structures
556 ~correction_t() {
557 if (use_weighted_median)
558 delete iwm;
559 delete c;
560 delete cc;
564 * Correction count
566 pixel get_count(int i, int j) {
567 if (use_weighted_median)
568 return iwm->get_weights()->get_pixel(i, j);
569 else
570 return cc->get_pixel(i, j);
574 * Check for weight limit
576 int weight_limited(int i, int j) {
577 if (weight_limit
578 && get_count(i, j)[0] >= weight_limit
579 && get_count(i, j)[1] >= weight_limit
580 && get_count(i, j)[2] >= weight_limit)
581 return 1;
583 return 0;
587 * Correction value
589 pixel get_correction(int i, int j) {
590 if (use_weighted_median)
591 return iwm->get_colors()->get_pixel(i, j);
592 else
593 return c->get_pixel(i, j)
594 / cc->get_pixel(i, j);
598 * Frame end
600 void frame_end(int frame_num) {
601 if (use_weighted_median) {
603 for (unsigned int i = 0; i < c->height(); i++)
604 for (unsigned int j = 0; j < c->width(); j++) {
607 * Update the median calculator
609 pixel cval = c->get_pixel(i, j);
610 pixel ccval = cc->get_pixel(i, j);
611 iwm->accumulate(i, j, frame_num, cval / ccval, ccval);
614 * Reset the counters
616 c->pix(i, j) = pixel::zero();
617 cc->pix(i, j) = pixel::zero();
623 * Update correction structures, using either a weighted mean update or
624 * a weighted median update.
626 void update(int i, int j, pixel value_times_weight, pixel weight) {
627 c->pix(i, j) += value_times_weight;
628 cc->pix(i, j) += weight;
633 * For each pixel in APPROXIMATION, calculate the differences
634 * between SIMULATED and REAL pixels influenced by this pixel,
635 * and back-project the differences onto the correction array
636 * C. The number of frames backprojected to each pixel in C is
637 * counted in CC.
639 * Since APPROXIMATION can always, given sufficient computational
640 * resources, be configured to be of finer resolution than SIMULATED,
641 * we map APPROXIMATION pixels onto the SIMULATED grid rather than vice
642 * versa. This should reduce the error incurred by approximations in
643 * d2::transformation::unscaled_map_area*().
645 * This approach requires multiplying the resultant integral by a
646 * corrective factor, since the grids are generally of different
647 * densities.
650 struct correct_subdomain_args {
651 int frame_num;
652 image *approximation;
653 correction_t *cu;
654 const image *real;
655 image *lreal;
656 image *lsimulated;
657 image *nlsimulated;
658 transformation t;
659 const backprojector *lresponse;
660 const backprojector *nlresponse;
661 unsigned int i_min, i_max, j_min, j_max;
662 double weight_limit;
665 static void *_ip_frame_correct_subdomain_nonlinear(void *args) {
667 correct_subdomain_args *sargs = (correct_subdomain_args *) args;
669 int frame_num = sargs->frame_num;
670 image *approximation = sargs->approximation;
671 const image *real = sargs->real;
672 image *lreal = sargs->lreal;
673 image *lsimulated = sargs->lsimulated;
674 const image *nlsimulated = sargs->nlsimulated;
675 transformation t = sargs->t;
676 const backprojector *nlresponse = sargs->nlresponse;
677 unsigned int i_min = sargs->i_min;
678 unsigned int i_max = sargs->i_max;
679 unsigned int j_min = sargs->j_min;
680 unsigned int j_max = sargs->j_max;
683 * Unlinearize values from lsimulated.
686 for (unsigned int i = i_min; i < i_max; i++)
687 for (unsigned int j = j_min; j < j_max; j++)
688 lreal->set_pixel(i, j, real->exp().unlinearize(
689 lsimulated->get_pixel(i, j)));
693 * Backproject from real to lreal, iterating over all pixels
694 * in lreal.
697 for (unsigned int i = i_min; i < i_max; i++)
698 for (unsigned int j = j_min; j < j_max; j++) {
701 * XXX: Is this right?
704 if (is_excluded_r(approximation->offset(), i, j, frame_num))
705 continue;
708 * Convenient variables for expressing the boundaries
709 * of the mapped area.
712 ale_pos top = i - 0.5;
713 ale_pos bot = i + 0.5;
714 ale_pos lef = j - 0.5;
715 ale_pos rig = j + 0.5;
718 * Iterate over non-linear pixels influenced by linear
719 * pixels.
722 for (int ii = (int) floor(top + nlresponse->min_i());
723 ii <= ceil(bot + nlresponse->max_i()); ii++)
724 for (int jj = (int) floor(lef + nlresponse->min_j());
725 jj <= ceil(rig + nlresponse->max_j()); jj++) {
727 if (ii < (int) 0
728 || ii >= (int) nlsimulated->height()
729 || jj < (int) 0
730 || jj >= (int) nlsimulated->width())
731 continue;
733 class backprojector::psf_result r =
734 (*nlresponse)(top - ii, bot - ii,
735 lef - jj, rig - jj,
736 nlresponse->select(ii, jj));
739 pixel comp_real =
740 real->exp().unlinearize(real->get_pixel(ii, jj));
742 pixel comp_simu =
743 nlsimulated->get_pixel(ii, jj);
745 if (!finite(comp_simu[0])
746 || !finite(comp_simu[1])
747 || !finite(comp_simu[2]))
748 continue;
751 * Backprojection value.
754 pixel bpv = r(comp_real - comp_simu);
757 * Error calculation
760 lreal->pix(i, j) += bpv;
765 * Linearize lreal.
768 for (unsigned int i = i_min; i < i_max; i++)
769 for (unsigned int j = j_min; j < j_max; j++)
770 lreal->set_pixel(i, j, real->exp().linearize(
771 lreal->get_pixel(i, j)));
774 return NULL;
777 static void *_ip_frame_correct_subdomain_linear(void *args) {
779 correct_subdomain_args *sargs = (correct_subdomain_args *) args;
781 int frame_num = sargs->frame_num;
782 image *approximation = sargs->approximation;
783 correction_t *cu = sargs->cu;
784 const image *real = sargs->real;
785 const image *lreal = sargs->lreal;
786 const image *lsimulated = sargs->lsimulated;
787 transformation t = sargs->t;
788 const backprojector *lresponse = sargs->lresponse;
789 unsigned int i_min = sargs->i_min;
790 unsigned int i_max = sargs->i_max;
791 unsigned int j_min = sargs->j_min;
792 unsigned int j_max = sargs->j_max;
795 * Iterate over all pixels in the approximation.
798 for (unsigned int i = i_min; i < i_max; i++)
799 for (unsigned int j = j_min; j < j_max; j++) {
802 * Check correction count against any weight limit.
805 if (cu->weight_limited(i, j))
806 continue;
809 * Obtain the position Q and dimensions D of image
810 * approximation pixel (i, j) in the coordinate system
811 * of the simulated (and real) frame.
814 point p = point(i + approximation->offset()[0], j + approximation->offset()[1]);
815 point q;
816 ale_pos d[2];
818 t.unscaled_map_area_inverse(p, &q, d);
821 * Convenient variables for expressing the boundaries
822 * of the mapped area.
825 ale_pos top = q[0] - d[0];
826 ale_pos bot = q[0] + d[0];
827 ale_pos lef = q[1] - d[1];
828 ale_pos rig = q[1] + d[1];
831 * Iterate over frame pixels influenced by the scene
832 * pixel.
835 for (int ii = (int) floor(top + lresponse->min_i());
836 ii <= ceil(bot + lresponse->max_i()); ii++)
837 for (int jj = (int) floor(lef + lresponse->min_j());
838 jj <= ceil(rig + lresponse->max_j()); jj++) {
840 if (ii < (int) 0
841 || ii >= (int) lreal->height()
842 || jj < (int) 0
843 || jj >= (int) lreal->width())
844 continue;
846 if (is_excluded_f(ii, jj, frame_num))
847 continue;
849 unsigned int selection = lresponse->select(ii, jj);
851 class backprojector::psf_result r =
852 (*lresponse)(top - ii, bot - ii,
853 lef - jj, rig - jj,
854 selection);
858 * R is the result of integration in the
859 * coordinate system of the simulated frame.
860 * We must rescale to get the result of
861 * integration in the coordinate system of the
862 * approximation image.
865 r *= (1 / (bot - top) / (rig - lef));
867 pixel comp_lreal =
868 lreal->get_pixel(ii, jj);
870 // pixel comp_real =
871 // real->get_pixel(ii, jj);
873 pixel comp_simu =
874 lsimulated->get_pixel(ii, jj);
876 if (!finite(comp_simu[0])
877 || !finite(comp_simu[1])
878 || !finite(comp_simu[2])
879 || !finite(comp_lreal[0])
880 || !finite(comp_lreal[1])
881 || !finite(comp_lreal[2]))
882 continue;
885 * Backprojection value unadjusted
886 * for confidence.
889 pixel bpv = r(comp_lreal - comp_simu);
892 * Confidence [equal to (1, 1, 1) when
893 * confidence is uniform].
896 // Ordinary certainty
897 // pixel conf = real->exp().confidence(comp_lreal);
899 // One-sided certainty
900 // pixel conf = real->exp().one_sided_confidence(comp_lreal, bpv);
901 // conf = real->exp().one_sided_confidence(comp_real, bpv);
903 // Estimate-based certainty
904 pixel conf = real->exp().confidence(comp_simu);
906 // One-sided estimate-based certainty
907 // pixel conf = real->exp().one_sided_confidence(comp_simu, bpv);
909 // One-sided approximation-based certainty
910 // pixel conf = real->exp().one_sided_confidence(approximation->pix(i, j), bpv);
913 * If a color is bayer-interpolated, then we have no confidence in its
914 * value.
917 if (real->get_bayer() != IMAGE_BAYER_NONE) {
918 int color = ((image_bayer_ale_real *)real)->bayer_color(ii, jj);
919 conf[(color + 1) % 3] = 0;
920 conf[(color + 2) % 3] = 0;
924 * Error calculation
927 // c->pix(i, j) += bpv * conf;
930 * Increment the backprojection weight. When
931 * confidence is uniform, this should weight
932 * each frame's correction equally.
935 // cc->pix(i, j) += conf * r.weight()
936 // / lresponse->integral(selection);
938 cu->update(i, j, bpv * conf, conf * r.weight() / lresponse->integral(selection));
942 return NULL;
945 void _ip_frame_correct(int frame_num, image *approximation,
946 correction_t *cu, const image *real, image *lsimulated,
947 image *nlsimulated, transformation t,
948 const backprojector *lresponse,
949 const backprojector *nlresponse, point *extents) {
952 * Threading initializations
955 int N;
957 #ifdef USE_PTHREAD
958 N = thread::count();
960 pthread_t *threads = (pthread_t *) malloc(sizeof(pthread_t) * N);
961 pthread_attr_t *thread_attr = (pthread_attr_t *) malloc(sizeof(pthread_attr_t) * N);
963 #else
964 N = 1;
965 #endif
967 correct_subdomain_args *args = new correct_subdomain_args[N];
969 for (int ti = 0; ti < N; ti++) {
970 args[ti].frame_num = frame_num;
971 args[ti].approximation = approximation;
972 args[ti].cu = cu;
973 args[ti].real = real;
974 args[ti].lsimulated = lsimulated;
975 args[ti].nlsimulated = nlsimulated;
976 args[ti].t = t;
977 args[ti].lresponse = lresponse;
978 args[ti].nlresponse = nlresponse;
982 * Generate the image to compare lsimulated with.
985 const image *lreal;
987 if (nlsimulated == NULL)
988 lreal = real;
989 else {
991 image *new_lreal = new image_ale_real(
992 real->height(),
993 real->width(),
994 real->depth(),
995 "IPC lreal",
996 &real->exp());
998 for (int ti = 0; ti < N; ti++) {
999 args[ti].lreal = new_lreal;
1000 args[ti].i_min = (new_lreal->height() * ti) / N;
1001 args[ti].i_max = (new_lreal->height() * (ti + 1)) / N;
1002 args[ti].j_min = 0;
1003 args[ti].j_max = new_lreal->width();
1005 #ifdef USE_PTHREAD
1006 pthread_attr_init(&thread_attr[ti]);
1007 pthread_attr_setdetachstate(&thread_attr[ti], PTHREAD_CREATE_JOINABLE);
1008 pthread_create(&threads[ti], &thread_attr[ti], _ip_frame_correct_subdomain_nonlinear, &args[ti]);
1009 #else
1010 _ip_frame_correct_subdomain_nonlinear(&args[ti]);
1011 #endif
1014 #ifdef USE_PTHREAD
1015 for (int ti = 0; ti < N; ti++) {
1016 pthread_join(threads[ti], NULL);
1018 #endif
1020 lreal = new_lreal;
1024 * Perform exposure adjustment.
1027 if (exposure_register) {
1029 pixel ec;
1031 #if 0
1032 ec = lsimulated->avg_channel_magnitude()
1033 / lreal->avg_channel_magnitude();
1034 #elsif 0
1035 pixel_accum ratio_sum;
1036 pixel_accum weight_sum;
1038 for (unsigned int i = 0; i < lreal->height(); i++)
1039 for (unsigned int j = 0; j < lreal->width(); j++) {
1041 pixel s = lsimulated->get_pixel(i, j);
1042 pixel r = lreal->get_pixel(i, j);
1043 pixel confidence = real->exp().confidence(r);
1045 if (s[0] > 0.001
1046 && s[1] > 0.001
1047 && s[2] > 0.001
1048 && r[0] > 0.001
1049 && r[1] > 0.001
1050 && r[2] > 0.001) {
1051 ratio_sum += confidence * s / r;
1052 weight_sum += confidence;
1056 ec = ratio_sum / weight_sum;
1057 #else
1058 pixel_accum ssum, rsum;
1060 // for (unsigned int i = 0; i < lreal->height(); i++)
1061 // for (unsigned int j = 0; j < lreal->width(); j++) {
1062 for (unsigned int i = (unsigned int) floor(extents[0][0]);
1063 i < (unsigned int) ceil(extents[1][0]); i++)
1064 for (unsigned int j = (unsigned int) floor(extents[0][1]);
1065 j < (unsigned int) ceil(extents[1][1]); j++) {
1067 pixel s = lsimulated->get_pixel(i, j);
1068 pixel r = lreal->get_pixel(i, j);
1070 pixel confidence = real->exp().confidence(s);
1072 if (!s.finite()
1073 || !r.finite()
1074 || !confidence.finite())
1075 continue;
1077 if (real->get_bayer() != IMAGE_BAYER_NONE) {
1078 int color = ((image_bayer_ale_real *)real)->bayer_color(i, j);
1079 ssum[color] += confidence[color] * s[color];
1080 rsum[color] += confidence[color] * r[color];
1081 } else {
1082 ssum += confidence * s;
1083 rsum += confidence * r;
1088 ec = ssum / rsum;
1090 #endif
1092 if (ec.finite()
1093 && rsum[0] > 0.001
1094 && rsum[1] > 0.001
1095 && rsum[2] > 0.001)
1096 real->exp().set_multiplier(
1097 real->exp().get_multiplier() * ec);
1099 for (int ti = 0; ti < N; ti++) {
1100 args[ti].lreal = (d2::image *) lreal;
1101 args[ti].i_min = (approximation->height() * ti) / N;
1102 args[ti].i_max = (approximation->height() * (ti + 1)) / N;
1103 args[ti].j_min = 0;
1104 args[ti].j_max = approximation->width();
1106 #ifdef USE_PTHREAD
1107 pthread_attr_init(&thread_attr[ti]);
1108 pthread_attr_setdetachstate(&thread_attr[ti], PTHREAD_CREATE_JOINABLE);
1109 pthread_create(&threads[ti], &thread_attr[ti], _ip_frame_correct_subdomain_linear, &args[ti]);
1110 #else
1111 _ip_frame_correct_subdomain_linear(&args[ti]);
1112 #endif
1115 #ifdef USE_PTHREAD
1116 for (int ti = 0; ti < N; ti++) {
1117 pthread_join(threads[ti], NULL);
1119 #endif
1121 if (nlsimulated)
1122 delete lreal;
1124 delete[] args;
1128 * Adjust correction array C based on the difference between the
1129 * simulated projected frame and real frame M. Update the correction
1130 * count CC for affected pixels in C.
1133 virtual void _ip_frame(int frame_num, correction_t *cu, const image *real,
1134 transformation t, const raster *f, const backprojector *b,
1135 const raster *nlf, const backprojector *nlb) {
1137 ui::get()->d2_irani_peleg_start();
1140 * Initialize simulated data structures
1143 image *lsimulated = new image_ale_real(
1144 real->height(),
1145 real->width(),
1146 real->depth());
1148 image *nlsimulated = NULL;
1150 if (nlf)
1151 nlsimulated = new image_ale_real(
1152 real->height(),
1153 real->width(),
1154 real->depth());
1157 * Create simulated frames with forward projection.
1160 ui::get()->ip_frame_simulate_start();
1162 point extents[2] = { point::posinf(), point::neginf() };
1164 _ip_frame_simulate(frame_num, approximation, lsimulated, nlsimulated, t, f, nlf, real->exp(), extents);
1167 * Update the correction array using backprojection.
1170 ui::get()->ip_frame_correct_start();
1171 _ip_frame_correct(frame_num, approximation, cu, real, lsimulated, nlsimulated, t, b, nlb, extents);
1174 * Finalize data structures.
1177 delete lsimulated;
1178 delete nlsimulated;
1180 ui::get()->d2_irani_peleg_stop();
1184 * Iterate _ip_frame() over all frames, and update APPROXIMATION after
1185 * corrections from all frames have been summed. Repeat for the number
1186 * of iterations specified by the user.
1189 virtual void _ip() {
1192 * Create rasterized PSF and backprojection kernel AUX.
1195 raster **f = (raster **) malloc(image_rw::count() * sizeof(raster *));
1196 backprojector **b = (backprojector **) malloc(image_rw::count() * sizeof(backprojector *));
1198 for (unsigned int m = 0; m < image_rw::count(); m++) {
1200 if (!align::match(m))
1201 continue;
1203 transformation t = align::of(m);
1205 f[m] = new normalizer(new rasterizer(lresponse, t));
1206 b[m] = new backprojector(f[m]);
1209 raster *nlf = NULL;
1210 backprojector *nlb = NULL;
1212 if (nlresponse) {
1213 nlf = new normalizer(new rasterizer(nlresponse, transformation::eu_identity()));
1214 nlb = new backprojector(nlf);
1217 for (unsigned int n = 0; n < iterations; n++) {
1219 correction_t *correction = new correction_t(
1220 use_weighted_median,
1221 weight_limit,
1222 approximation->height(),
1223 approximation->width(),
1224 approximation->depth());
1227 * Iterate over all frames
1230 for (unsigned int m = 0; m < image_rw::count(); m++) {
1232 if (!align::match(m))
1233 continue;
1235 ui::get()->ip_frame_start(m);
1237 transformation t = align::of(m);
1238 const image *real = image_rw::open(m);
1240 _ip_frame(m, correction, real,
1241 t, f[m], b[m], nlf, nlb);
1243 image_rw::close(m);
1245 correction->frame_end(m);
1249 * Update the approximation.
1252 ui::get()->ip_update();
1254 for (unsigned int i = 0; i < approximation->height(); i++)
1255 for (unsigned int j = 0; j < approximation->width(); j++) {
1257 pixel cpix = correction->get_correction(i, j);
1258 pixel ccpix = correction->get_count(i, j);
1259 pixel apix = approximation->get_pixel(i, j);
1261 for (unsigned int k = 0; k < 3; k++) {
1263 const ale_real cc_floor = 1e-20;
1265 if (ccpix[k] < cc_floor)
1266 continue;
1268 if (!finite(cpix[k]))
1269 continue;
1271 if (!finite(apix[k]))
1272 continue;
1274 ale_real new_value = cpix[k] + apix[k];
1276 assert (finite(apix[k]));
1277 assert (finite(ccpix[k]));
1278 assert (finite(cpix[k]));
1279 assert (finite(new_value));
1282 * Negative light doesn't make sense.
1284 if (new_value < 0)
1285 new_value = 0;
1287 approximation->chan(i, j, k) = new_value;
1291 delete correction;
1293 if (inc) {
1294 ui::get()->ip_write();
1295 image_rw::output(approximation);
1298 ui::get()->ip_step_done();
1302 for (unsigned int m = 0; m < image_rw::count(); m++) {
1304 if (!align::match(m))
1305 continue;
1307 delete f[m];
1308 delete b[m];
1311 free(f);
1312 free(b);
1314 delete nlf;
1315 delete nlb;
1318 public:
1320 ipc(render *input, unsigned int iterations, int _inc, psf *lresponse,
1321 psf *nlresponse, int exposure_register,
1322 int use_weighted_median, double ipwl) {
1323 this->input = input;
1324 done = 0;
1325 inc = _inc;
1326 this->iterations = iterations;
1327 this->lresponse = lresponse;
1328 this->nlresponse = nlresponse;
1329 this->exposure_register = exposure_register;
1330 this->use_weighted_median = use_weighted_median;
1331 this->weight_limit = ipwl;
1334 const image *get_image() {
1335 if (done)
1336 return approximation;
1337 else
1338 return input->get_image();
1341 const image *get_defined() {
1342 return input->get_defined();
1345 void sync(int n) {
1346 render::sync(n);
1347 input->sync(n);
1350 void step() {
1351 return;
1354 virtual int sync() {
1355 input->sync();
1356 ui::get()->ip_start();
1357 done = 1;
1358 approximation = optimizations::get_ip_working_image(input->get_image());
1359 _ip();
1360 ui::get()->ip_done();
1362 return 0;
1365 virtual ~ipc() {
1368 void free_memory() {
1372 #endif