1 // Copyright 2003, 2004 David Hilvert <dhilvert@auricle.dyndns.org>,
2 // <dhilvert@ugcs.caltech.edu>
4 /* This file is part of the Anti-Lamenessing Engine.
6 The Anti-Lamenessing Engine is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 The Anti-Lamenessing Engine is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with the Anti-Lamenessing Engine; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * ipc.h: A render subclass implementing an iterative image reconstruction
23 * algorithm based on the work of Michal Irani and Shmuel Peleg. The
24 * projection and backprojection functions used are user-configurable.
25 * For more information on the theory behind the algorithm, see the last
26 * section and appendix of:
28 * http://www.wisdom.weizmann.ac.il/~irani/abstracts/superResolution.html
30 * The algorithm in the source paper looks something like this (PSF' is the
31 * backprojection kernel, and corresponds to what the authors of the paper call
34 * ===============================================================
35 * Forward Backward Points of comparison
36 * ---------------------------------------------------------------
44 * | ---------+ <--- difference
49 * ===============================================================
51 * This assumes a single colorspace representation. However, consumer cameras
52 * sometimes perform sharpening in non-linear colorspace, whereas lens and
53 * sensor blurring occurs in linear colorspace. Hence, there can be two
54 * colorspaces involved; ALE accounts for this with linear and non-linear
55 * colorspace PSFs. Hence, the algorithm we use looks something like:
57 * ===============================================================
58 * Forward Backward Points of comparison
59 * ---------------------------------------------------------------
67 * | ----------+ <--- difference,
74 * unlinearize linearize
78 * lsim_nl(n) lreal_nl(n)
84 * | ----------+ <--- difference
87 * nlsimulated(n) real_nl
97 * ===============================================================
103 #include "../image.h"
104 #include "../render.h"
105 #include "psf/rasterizer.h"
106 #include "psf/normalizer.h"
107 #include "psf/backprojector.h"
109 class ipc
: public render
{
113 image
*approximation
;
115 unsigned int iterations
;
116 psf
*lresponse
, *nlresponse
;
117 int exposure_register
;
118 int use_weighted_median
;
122 * Calculate the simulated input frame SIMULATED from the image
123 * approximation APPROXIMATION, iterating over image approximation
127 struct sim_subdomain_args
{
129 image
*approximation
;
133 image
*nlsim_weights
;
135 const raster
*lresponse
;
136 const raster
*nlresponse
;
138 int i_min
, i_max
, j_min
, j_max
;
141 pthread_mutex_t
*lock
;
145 static void *_ip_frame_simulate_subdomain_linear(void *args
) {
147 sim_subdomain_args
*sargs
= (sim_subdomain_args
*) args
;
149 int frame_num
= sargs
->frame_num
;
150 image
*approximation
= sargs
->approximation
;
151 image
*lsimulated
= sargs
->lsimulated
;
152 image
*lsim_weights
= sargs
->lsim_weights
;
153 transformation t
= sargs
->t
;
154 const raster
*lresponse
= sargs
->lresponse
;
155 unsigned int i_min
= sargs
->i_min
;
156 unsigned int i_max
= sargs
->i_max
;
157 unsigned int j_min
= sargs
->j_min
;
158 unsigned int j_max
= sargs
->j_max
;
159 point
*extents
= sargs
->extents
;
161 pthread_mutex_t
*lock
= sargs
->lock
;
165 * Linear filtering, iterating over approximation pixels
168 for (unsigned int i
= i_min
; i
< i_max
; i
++)
169 for (unsigned int j
= j_min
; j
< j_max
; j
++) {
171 if (is_excluded_r(approximation
->offset(), i
, j
, frame_num
))
175 * Obtain the position Q and dimensions D of
176 * image approximation pixel (i, j) in the coordinate
177 * system of the simulated frame.
180 point p
= point(i
+ approximation
->offset()[0], j
+ approximation
->offset()[1]);
185 * XXX: This appears to calculate the wrong thing.
188 if (is_excluded_f(p
, frame_num
))
191 t
.unscaled_map_area_inverse(p
, &q
, d
);
194 * Convenient variables for expressing the boundaries
195 * of the mapped area.
198 ale_pos top
= q
[0] - d
[0];
199 ale_pos bot
= q
[0] + d
[0];
200 ale_pos lef
= q
[1] - d
[1];
201 ale_pos rig
= q
[1] + d
[1];
204 * Iterate over all simulated frame pixels influenced
205 * by the scene pixel (i, j), as determined by the
209 for (int ii
= (int) floor(top
+ lresponse
->min_i());
210 ii
<= ceil(bot
+ lresponse
->max_i()); ii
++)
211 for (int jj
= (int) floor(lef
+ lresponse
->min_j());
212 jj
<= ceil(rig
+ lresponse
->max_j()); jj
++) {
215 || ii
>= (int) lsimulated
->height()
217 || jj
>= (int) lsimulated
->width())
220 if (ii
< extents
[0][0])
222 if (jj
< extents
[0][1])
224 if (ii
> extents
[1][0])
226 if (jj
> extents
[1][1])
229 class rasterizer::psf_result r
=
230 (*lresponse
)(top
- ii
, bot
- ii
,
232 lresponse
->select(ii
, jj
));
235 pthread_mutex_lock(lock
);
237 lsimulated
->pix(ii
, jj
) +=
238 r(approximation
->get_pixel(i
, j
));
239 lsim_weights
->pix(ii
, jj
) +=
242 pthread_mutex_unlock(lock
);
250 static void *_ip_frame_simulate_subdomain_nonlinear(void *args
) {
252 sim_subdomain_args
*sargs
= (sim_subdomain_args
*) args
;
254 image
*lsimulated
= sargs
->lsimulated
;
255 image
*nlsimulated
= sargs
->nlsimulated
;
256 image
*nlsim_weights
= sargs
->nlsim_weights
;
257 transformation t
= sargs
->t
;
258 const raster
*nlresponse
= sargs
->nlresponse
;
259 const exposure
*exp
= sargs
->exp
;
260 unsigned int i_min
= sargs
->i_min
;
261 unsigned int i_max
= sargs
->i_max
;
262 unsigned int j_min
= sargs
->j_min
;
263 unsigned int j_max
= sargs
->j_max
;
264 point
*extents
= sargs
->extents
;
266 pthread_mutex_t
*lock
= sargs
->lock
;
273 for (unsigned int i
= i_min
; i
< i_max
; i
++)
274 for (unsigned int j
= j_min
; j
< j_max
; j
++) {
277 * Convenient variables for expressing the boundaries
278 * of the mapped area.
281 ale_pos top
= i
- 0.5;
282 ale_pos bot
= i
+ 0.5;
283 ale_pos lef
= j
- 0.5;
284 ale_pos rig
= j
+ 0.5;
287 * Iterate over all simulated frame pixels influenced
288 * by the scene pixel (i, j), as determined by the
292 for (int ii
= (int) floor(top
+ nlresponse
->min_i());
293 ii
<= ceil(bot
+ nlresponse
->max_i()); ii
++)
294 for (int jj
= (int) floor(lef
+ nlresponse
->min_j());
295 jj
<= ceil(rig
+ nlresponse
->max_j()); jj
++) {
298 || ii
>= (int) nlsimulated
->height()
300 || jj
>= (int) nlsimulated
->width())
303 if (ii
< extents
[0][0])
305 if (jj
< extents
[0][1])
307 if (ii
> extents
[1][0])
309 if (jj
> extents
[1][1])
312 class rasterizer::psf_result r
=
313 (*nlresponse
)(top
- ii
, bot
- ii
,
315 nlresponse
->select(ii
, jj
));
318 pthread_mutex_lock(lock
);
320 nlsimulated
->pix(ii
, jj
) +=
321 r(exp
->unlinearize(lsimulated
->get_pixel(i
, j
)));
322 nlsim_weights
->pix(ii
, jj
) +=
325 pthread_mutex_unlock(lock
);
333 void _ip_frame_simulate(int frame_num
, image
*approximation
,
334 image
*lsimulated
, image
*nlsimulated
,
335 transformation t
, const raster
*lresponse
,
336 const raster
*nlresponse
, const exposure
&exp
,
340 * Threading initializations
348 pthread_t
*threads
= (pthread_t
*) malloc(sizeof(pthread_t
) * N
);
349 pthread_attr_t
*thread_attr
= (pthread_attr_t
*) malloc(sizeof(pthread_attr_t
) * N
);
350 pthread_mutex_t lock
= PTHREAD_MUTEX_INITIALIZER
;
356 sim_subdomain_args
*args
= new sim_subdomain_args
[N
];
360 * Initializations for linear filtering
363 image_ale_real
*lsim_weights
= new image_ale_real(
364 lsimulated
->height(),
366 lsimulated
->depth());
369 for (int ti
= 0; ti
< N
; ti
++) {
370 args
[ti
].frame_num
= frame_num
;
371 args
[ti
].approximation
= approximation
;
372 args
[ti
].lsimulated
= lsimulated
;
373 args
[ti
].nlsimulated
= nlsimulated
;
374 args
[ti
].lsim_weights
= lsim_weights
;
376 args
[ti
].lresponse
= lresponse
;
377 args
[ti
].nlresponse
= nlresponse
;
379 args
[ti
].i_min
= (approximation
->height() * ti
) / N
;
380 args
[ti
].i_max
= (approximation
->height() * (ti
+ 1)) / N
;
382 args
[ti
].j_max
= approximation
->width();
383 args
[ti
].extents
[0] = point::posinf();
384 args
[ti
].extents
[1] = point::neginf();
387 args
[ti
].lock
= &lock
;
388 pthread_attr_init(&thread_attr
[ti
]);
389 pthread_attr_setdetachstate(&thread_attr
[ti
], PTHREAD_CREATE_JOINABLE
);
390 pthread_create(&threads
[ti
], &thread_attr
[ti
], _ip_frame_simulate_subdomain_linear
, &args
[ti
]);
392 _ip_frame_simulate_subdomain_linear(&args
[ti
]);
397 for (int ti
= 0; ti
< N
; ti
++) {
398 pthread_join(threads
[ti
], NULL
);
406 for (unsigned int ii
= 0; ii
< lsimulated
->height(); ii
++)
407 for (unsigned int jj
= 0; jj
< lsimulated
->width(); jj
++) {
408 pixel weight
= lsim_weights
->get_pixel(ii
, jj
);
409 const ale_real weight_floor
= 1e-10;
412 for (int k
= 0; k
< 3; k
++)
413 if (!(weight
[k
] > weight_floor
))
414 lsimulated
->pix(ii
, jj
)[k
]
415 /= zero
; /* Generate a non-finite value */
417 lsimulated
->pix(ii
, jj
)
425 for (int n
= 0; n
< N
; n
++)
426 for (int d
= 0; d
< 2; d
++) {
427 if (args
[n
].extents
[0][d
] < extents
[0][d
])
428 extents
[0][d
] = args
[n
].extents
[0][d
];
429 if (args
[n
].extents
[1][d
] > extents
[1][d
])
430 extents
[1][d
] = args
[n
].extents
[1][d
];
440 * Return if there is no non-linear step.
443 if (nlsimulated
== NULL
) {
449 * Initialize non-linear
452 image_ale_real
*nlsim_weights
= new image_ale_real(
453 nlsimulated
->height(),
454 nlsimulated
->width(),
455 nlsimulated
->depth());
457 for (int ti
= 0; ti
< N
; ti
++) {
458 args
[ti
].nlsim_weights
= nlsim_weights
;
459 args
[ti
].i_min
= (lsimulated
->height() * ti
) / N
;
460 args
[ti
].i_max
= (lsimulated
->height() * (ti
+ 1)) / N
;
462 args
[ti
].j_max
= lsimulated
->width();
465 pthread_attr_init(&thread_attr
[ti
]);
466 pthread_attr_setdetachstate(&thread_attr
[ti
], PTHREAD_CREATE_JOINABLE
);
467 pthread_create(&threads
[ti
], &thread_attr
[ti
], _ip_frame_simulate_subdomain_nonlinear
, &args
[ti
]);
469 _ip_frame_simulate_subdomain_nonlinear(&args
[ti
]);
474 for (int ti
= 0; ti
< N
; ti
++) {
475 pthread_join(threads
[ti
], NULL
);
480 * Normalize non-linear
483 for (unsigned int ii
= 0; ii
< nlsimulated
->height(); ii
++)
484 for (unsigned int jj
= 0; jj
< nlsimulated
->width(); jj
++) {
485 pixel weight
= nlsim_weights
->get_pixel(ii
, jj
);
486 ale_real weight_floor
= 1e-10;
489 for (int k
= 0; k
< 3; k
++)
490 if (!(weight
[k
] > weight_floor
))
491 nlsimulated
->pix(ii
, jj
)[k
]
492 /= zero
; /* Generate a non-finite value */
494 nlsimulated
->pix(ii
, jj
)
502 for (int n
= 0; n
< N
; n
++)
503 for (int d
= 0; d
< 2; d
++) {
504 if (args
[n
].extents
[0][d
] < extents
[0][d
])
505 extents
[0][d
] = args
[n
].extents
[0][d
];
506 if (args
[n
].extents
[1][d
] > extents
[1][d
])
507 extents
[1][d
] = args
[n
].extents
[1][d
];
511 * Finalize non-linear
514 delete nlsim_weights
;
519 struct correction_t
{
523 int use_weighted_median
;
528 image_weighted_median
*iwm
;
542 * Create correction structures.
544 correction_t(int use_weighted_median
, double weight_limit
, unsigned int h
, unsigned int w
, unsigned int d
) {
545 this->use_weighted_median
= use_weighted_median
;
546 if (use_weighted_median
)
547 iwm
= new image_weighted_median(h
, w
, d
);
548 this->weight_limit
= weight_limit
;
549 c
= new image_ale_real(h
, w
, d
);
550 cc
= new image_ale_real(h
, w
, d
);
554 * Destroy correction structures
557 if (use_weighted_median
)
566 pixel
get_count(int i
, int j
) {
567 if (use_weighted_median
)
568 return iwm
->get_weights()->get_pixel(i
, j
);
570 return cc
->get_pixel(i
, j
);
574 * Check for weight limit
576 int weight_limited(int i
, int j
) {
578 && get_count(i
, j
)[0] >= weight_limit
579 && get_count(i
, j
)[1] >= weight_limit
580 && get_count(i
, j
)[2] >= weight_limit
)
589 pixel
get_correction(int i
, int j
) {
590 if (use_weighted_median
)
591 return iwm
->get_colors()->get_pixel(i
, j
);
593 return c
->get_pixel(i
, j
)
594 / cc
->get_pixel(i
, j
);
600 void frame_end(int frame_num
) {
601 if (use_weighted_median
) {
603 for (unsigned int i
= 0; i
< c
->height(); i
++)
604 for (unsigned int j
= 0; j
< c
->width(); j
++) {
607 * Update the median calculator
609 pixel cval
= c
->get_pixel(i
, j
);
610 pixel ccval
= cc
->get_pixel(i
, j
);
611 iwm
->accumulate(i
, j
, frame_num
, cval
/ ccval
, ccval
);
616 c
->pix(i
, j
) = pixel::zero();
617 cc
->pix(i
, j
) = pixel::zero();
623 * Update correction structures, using either a weighted mean update or
624 * a weighted median update.
626 void update(int i
, int j
, pixel value_times_weight
, pixel weight
) {
627 c
->pix(i
, j
) += value_times_weight
;
628 cc
->pix(i
, j
) += weight
;
633 * For each pixel in APPROXIMATION, calculate the differences
634 * between SIMULATED and REAL pixels influenced by this pixel,
635 * and back-project the differences onto the correction array
636 * C. The number of frames backprojected to each pixel in C is
639 * Since APPROXIMATION can always, given sufficient computational
640 * resources, be configured to be of finer resolution than SIMULATED,
641 * we map APPROXIMATION pixels onto the SIMULATED grid rather than vice
642 * versa. This should reduce the error incurred by approximations in
643 * d2::transformation::unscaled_map_area*().
645 * This approach requires multiplying the resultant integral by a
646 * corrective factor, since the grids are generally of different
650 struct correct_subdomain_args
{
652 image
*approximation
;
659 const backprojector
*lresponse
;
660 const backprojector
*nlresponse
;
661 unsigned int i_min
, i_max
, j_min
, j_max
;
665 static void *_ip_frame_correct_subdomain_nonlinear(void *args
) {
667 correct_subdomain_args
*sargs
= (correct_subdomain_args
*) args
;
669 int frame_num
= sargs
->frame_num
;
670 image
*approximation
= sargs
->approximation
;
671 const image
*real
= sargs
->real
;
672 image
*lreal
= sargs
->lreal
;
673 image
*lsimulated
= sargs
->lsimulated
;
674 const image
*nlsimulated
= sargs
->nlsimulated
;
675 transformation t
= sargs
->t
;
676 const backprojector
*nlresponse
= sargs
->nlresponse
;
677 unsigned int i_min
= sargs
->i_min
;
678 unsigned int i_max
= sargs
->i_max
;
679 unsigned int j_min
= sargs
->j_min
;
680 unsigned int j_max
= sargs
->j_max
;
683 * Unlinearize values from lsimulated.
686 for (unsigned int i
= i_min
; i
< i_max
; i
++)
687 for (unsigned int j
= j_min
; j
< j_max
; j
++)
688 lreal
->set_pixel(i
, j
, real
->exp().unlinearize(
689 lsimulated
->get_pixel(i
, j
)));
693 * Backproject from real to lreal, iterating over all pixels
697 for (unsigned int i
= i_min
; i
< i_max
; i
++)
698 for (unsigned int j
= j_min
; j
< j_max
; j
++) {
701 * XXX: Is this right?
704 if (is_excluded_r(approximation
->offset(), i
, j
, frame_num
))
708 * Convenient variables for expressing the boundaries
709 * of the mapped area.
712 ale_pos top
= i
- 0.5;
713 ale_pos bot
= i
+ 0.5;
714 ale_pos lef
= j
- 0.5;
715 ale_pos rig
= j
+ 0.5;
718 * Iterate over non-linear pixels influenced by linear
722 for (int ii
= (int) floor(top
+ nlresponse
->min_i());
723 ii
<= ceil(bot
+ nlresponse
->max_i()); ii
++)
724 for (int jj
= (int) floor(lef
+ nlresponse
->min_j());
725 jj
<= ceil(rig
+ nlresponse
->max_j()); jj
++) {
728 || ii
>= (int) nlsimulated
->height()
730 || jj
>= (int) nlsimulated
->width())
733 class backprojector::psf_result r
=
734 (*nlresponse
)(top
- ii
, bot
- ii
,
736 nlresponse
->select(ii
, jj
));
740 real
->exp().unlinearize(real
->get_pixel(ii
, jj
));
743 nlsimulated
->get_pixel(ii
, jj
);
745 if (!finite(comp_simu
[0])
746 || !finite(comp_simu
[1])
747 || !finite(comp_simu
[2]))
751 * Backprojection value.
754 pixel bpv
= r(comp_real
- comp_simu
);
760 lreal
->pix(i
, j
) += bpv
;
768 for (unsigned int i
= i_min
; i
< i_max
; i
++)
769 for (unsigned int j
= j_min
; j
< j_max
; j
++)
770 lreal
->set_pixel(i
, j
, real
->exp().linearize(
771 lreal
->get_pixel(i
, j
)));
777 static void *_ip_frame_correct_subdomain_linear(void *args
) {
779 correct_subdomain_args
*sargs
= (correct_subdomain_args
*) args
;
781 int frame_num
= sargs
->frame_num
;
782 image
*approximation
= sargs
->approximation
;
783 correction_t
*cu
= sargs
->cu
;
784 const image
*real
= sargs
->real
;
785 const image
*lreal
= sargs
->lreal
;
786 const image
*lsimulated
= sargs
->lsimulated
;
787 transformation t
= sargs
->t
;
788 const backprojector
*lresponse
= sargs
->lresponse
;
789 unsigned int i_min
= sargs
->i_min
;
790 unsigned int i_max
= sargs
->i_max
;
791 unsigned int j_min
= sargs
->j_min
;
792 unsigned int j_max
= sargs
->j_max
;
795 * Iterate over all pixels in the approximation.
798 for (unsigned int i
= i_min
; i
< i_max
; i
++)
799 for (unsigned int j
= j_min
; j
< j_max
; j
++) {
802 * Check correction count against any weight limit.
805 if (cu
->weight_limited(i
, j
))
809 * Obtain the position Q and dimensions D of image
810 * approximation pixel (i, j) in the coordinate system
811 * of the simulated (and real) frame.
814 point p
= point(i
+ approximation
->offset()[0], j
+ approximation
->offset()[1]);
818 t
.unscaled_map_area_inverse(p
, &q
, d
);
821 * Convenient variables for expressing the boundaries
822 * of the mapped area.
825 ale_pos top
= q
[0] - d
[0];
826 ale_pos bot
= q
[0] + d
[0];
827 ale_pos lef
= q
[1] - d
[1];
828 ale_pos rig
= q
[1] + d
[1];
831 * Iterate over frame pixels influenced by the scene
835 for (int ii
= (int) floor(top
+ lresponse
->min_i());
836 ii
<= ceil(bot
+ lresponse
->max_i()); ii
++)
837 for (int jj
= (int) floor(lef
+ lresponse
->min_j());
838 jj
<= ceil(rig
+ lresponse
->max_j()); jj
++) {
841 || ii
>= (int) lreal
->height()
843 || jj
>= (int) lreal
->width())
846 if (is_excluded_f(ii
, jj
, frame_num
))
849 unsigned int selection
= lresponse
->select(ii
, jj
);
851 class backprojector::psf_result r
=
852 (*lresponse
)(top
- ii
, bot
- ii
,
858 * R is the result of integration in the
859 * coordinate system of the simulated frame.
860 * We must rescale to get the result of
861 * integration in the coordinate system of the
862 * approximation image.
865 r
*= (1 / (bot
- top
) / (rig
- lef
));
868 lreal
->get_pixel(ii
, jj
);
871 // real->get_pixel(ii, jj);
874 lsimulated
->get_pixel(ii
, jj
);
876 if (!finite(comp_simu
[0])
877 || !finite(comp_simu
[1])
878 || !finite(comp_simu
[2])
879 || !finite(comp_lreal
[0])
880 || !finite(comp_lreal
[1])
881 || !finite(comp_lreal
[2]))
885 * Backprojection value unadjusted
889 pixel bpv
= r(comp_lreal
- comp_simu
);
892 * Confidence [equal to (1, 1, 1) when
893 * confidence is uniform].
896 // Ordinary certainty
897 // pixel conf = real->exp().confidence(comp_lreal);
899 // One-sided certainty
900 // pixel conf = real->exp().one_sided_confidence(comp_lreal, bpv);
901 // conf = real->exp().one_sided_confidence(comp_real, bpv);
903 // Estimate-based certainty
904 pixel conf
= real
->exp().confidence(comp_simu
);
906 // One-sided estimate-based certainty
907 // pixel conf = real->exp().one_sided_confidence(comp_simu, bpv);
909 // One-sided approximation-based certainty
910 // pixel conf = real->exp().one_sided_confidence(approximation->pix(i, j), bpv);
913 * If a color is bayer-interpolated, then we have no confidence in its
917 if (real
->get_bayer() != IMAGE_BAYER_NONE
) {
918 int color
= ((image_bayer_ale_real
*)real
)->bayer_color(ii
, jj
);
919 conf
[(color
+ 1) % 3] = 0;
920 conf
[(color
+ 2) % 3] = 0;
927 // c->pix(i, j) += bpv * conf;
930 * Increment the backprojection weight. When
931 * confidence is uniform, this should weight
932 * each frame's correction equally.
935 // cc->pix(i, j) += conf * r.weight()
936 // / lresponse->integral(selection);
938 cu
->update(i
, j
, bpv
* conf
, conf
* r
.weight() / lresponse
->integral(selection
));
945 void _ip_frame_correct(int frame_num
, image
*approximation
,
946 correction_t
*cu
, const image
*real
, image
*lsimulated
,
947 image
*nlsimulated
, transformation t
,
948 const backprojector
*lresponse
,
949 const backprojector
*nlresponse
, point
*extents
) {
952 * Threading initializations
960 pthread_t
*threads
= (pthread_t
*) malloc(sizeof(pthread_t
) * N
);
961 pthread_attr_t
*thread_attr
= (pthread_attr_t
*) malloc(sizeof(pthread_attr_t
) * N
);
967 correct_subdomain_args
*args
= new correct_subdomain_args
[N
];
969 for (int ti
= 0; ti
< N
; ti
++) {
970 args
[ti
].frame_num
= frame_num
;
971 args
[ti
].approximation
= approximation
;
973 args
[ti
].real
= real
;
974 args
[ti
].lsimulated
= lsimulated
;
975 args
[ti
].nlsimulated
= nlsimulated
;
977 args
[ti
].lresponse
= lresponse
;
978 args
[ti
].nlresponse
= nlresponse
;
982 * Generate the image to compare lsimulated with.
987 if (nlsimulated
== NULL
)
991 image
*new_lreal
= new image_ale_real(
998 for (int ti
= 0; ti
< N
; ti
++) {
999 args
[ti
].lreal
= new_lreal
;
1000 args
[ti
].i_min
= (new_lreal
->height() * ti
) / N
;
1001 args
[ti
].i_max
= (new_lreal
->height() * (ti
+ 1)) / N
;
1003 args
[ti
].j_max
= new_lreal
->width();
1006 pthread_attr_init(&thread_attr
[ti
]);
1007 pthread_attr_setdetachstate(&thread_attr
[ti
], PTHREAD_CREATE_JOINABLE
);
1008 pthread_create(&threads
[ti
], &thread_attr
[ti
], _ip_frame_correct_subdomain_nonlinear
, &args
[ti
]);
1010 _ip_frame_correct_subdomain_nonlinear(&args
[ti
]);
1015 for (int ti
= 0; ti
< N
; ti
++) {
1016 pthread_join(threads
[ti
], NULL
);
1024 * Perform exposure adjustment.
1027 if (exposure_register
) {
1032 ec
= lsimulated
->avg_channel_magnitude()
1033 / lreal
->avg_channel_magnitude();
1035 pixel_accum ratio_sum
;
1036 pixel_accum weight_sum
;
1038 for (unsigned int i
= 0; i
< lreal
->height(); i
++)
1039 for (unsigned int j
= 0; j
< lreal
->width(); j
++) {
1041 pixel s
= lsimulated
->get_pixel(i
, j
);
1042 pixel r
= lreal
->get_pixel(i
, j
);
1043 pixel confidence
= real
->exp().confidence(r
);
1051 ratio_sum
+= confidence
* s
/ r
;
1052 weight_sum
+= confidence
;
1056 ec
= ratio_sum
/ weight_sum
;
1058 pixel_accum ssum
, rsum
;
1060 // for (unsigned int i = 0; i < lreal->height(); i++)
1061 // for (unsigned int j = 0; j < lreal->width(); j++) {
1062 for (unsigned int i
= (unsigned int) floor(extents
[0][0]);
1063 i
< (unsigned int) ceil(extents
[1][0]); i
++)
1064 for (unsigned int j
= (unsigned int) floor(extents
[0][1]);
1065 j
< (unsigned int) ceil(extents
[1][1]); j
++) {
1067 pixel s
= lsimulated
->get_pixel(i
, j
);
1068 pixel r
= lreal
->get_pixel(i
, j
);
1070 pixel confidence
= real
->exp().confidence(s
);
1074 || !confidence
.finite())
1077 if (real
->get_bayer() != IMAGE_BAYER_NONE
) {
1078 int color
= ((image_bayer_ale_real
*)real
)->bayer_color(i
, j
);
1079 ssum
[color
] += confidence
[color
] * s
[color
];
1080 rsum
[color
] += confidence
[color
] * r
[color
];
1082 ssum
+= confidence
* s
;
1083 rsum
+= confidence
* r
;
1096 real
->exp().set_multiplier(
1097 real
->exp().get_multiplier() * ec
);
1099 for (int ti
= 0; ti
< N
; ti
++) {
1100 args
[ti
].lreal
= (d2::image
*) lreal
;
1101 args
[ti
].i_min
= (approximation
->height() * ti
) / N
;
1102 args
[ti
].i_max
= (approximation
->height() * (ti
+ 1)) / N
;
1104 args
[ti
].j_max
= approximation
->width();
1107 pthread_attr_init(&thread_attr
[ti
]);
1108 pthread_attr_setdetachstate(&thread_attr
[ti
], PTHREAD_CREATE_JOINABLE
);
1109 pthread_create(&threads
[ti
], &thread_attr
[ti
], _ip_frame_correct_subdomain_linear
, &args
[ti
]);
1111 _ip_frame_correct_subdomain_linear(&args
[ti
]);
1116 for (int ti
= 0; ti
< N
; ti
++) {
1117 pthread_join(threads
[ti
], NULL
);
1128 * Adjust correction array C based on the difference between the
1129 * simulated projected frame and real frame M. Update the correction
1130 * count CC for affected pixels in C.
1133 virtual void _ip_frame(int frame_num
, correction_t
*cu
, const image
*real
,
1134 transformation t
, const raster
*f
, const backprojector
*b
,
1135 const raster
*nlf
, const backprojector
*nlb
) {
1137 ui::get()->d2_irani_peleg_start();
1140 * Initialize simulated data structures
1143 image
*lsimulated
= new image_ale_real(
1148 image
*nlsimulated
= NULL
;
1151 nlsimulated
= new image_ale_real(
1157 * Create simulated frames with forward projection.
1160 ui::get()->ip_frame_simulate_start();
1162 point extents
[2] = { point::posinf(), point::neginf() };
1164 _ip_frame_simulate(frame_num
, approximation
, lsimulated
, nlsimulated
, t
, f
, nlf
, real
->exp(), extents
);
1167 * Update the correction array using backprojection.
1170 ui::get()->ip_frame_correct_start();
1171 _ip_frame_correct(frame_num
, approximation
, cu
, real
, lsimulated
, nlsimulated
, t
, b
, nlb
, extents
);
1174 * Finalize data structures.
1180 ui::get()->d2_irani_peleg_stop();
1184 * Iterate _ip_frame() over all frames, and update APPROXIMATION after
1185 * corrections from all frames have been summed. Repeat for the number
1186 * of iterations specified by the user.
1189 virtual void _ip() {
1192 * Create rasterized PSF and backprojection kernel AUX.
1195 raster
**f
= (raster
**) malloc(image_rw::count() * sizeof(raster
*));
1196 backprojector
**b
= (backprojector
**) malloc(image_rw::count() * sizeof(backprojector
*));
1198 for (unsigned int m
= 0; m
< image_rw::count(); m
++) {
1200 if (!align::match(m
))
1203 transformation t
= align::of(m
);
1205 f
[m
] = new normalizer(new rasterizer(lresponse
, t
));
1206 b
[m
] = new backprojector(f
[m
]);
1210 backprojector
*nlb
= NULL
;
1213 nlf
= new normalizer(new rasterizer(nlresponse
, transformation::eu_identity()));
1214 nlb
= new backprojector(nlf
);
1217 for (unsigned int n
= 0; n
< iterations
; n
++) {
1219 correction_t
*correction
= new correction_t(
1220 use_weighted_median
,
1222 approximation
->height(),
1223 approximation
->width(),
1224 approximation
->depth());
1227 * Iterate over all frames
1230 for (unsigned int m
= 0; m
< image_rw::count(); m
++) {
1232 if (!align::match(m
))
1235 ui::get()->ip_frame_start(m
);
1237 transformation t
= align::of(m
);
1238 const image
*real
= image_rw::open(m
);
1240 _ip_frame(m
, correction
, real
,
1241 t
, f
[m
], b
[m
], nlf
, nlb
);
1245 correction
->frame_end(m
);
1249 * Update the approximation.
1252 ui::get()->ip_update();
1254 for (unsigned int i
= 0; i
< approximation
->height(); i
++)
1255 for (unsigned int j
= 0; j
< approximation
->width(); j
++) {
1257 pixel cpix
= correction
->get_correction(i
, j
);
1258 pixel ccpix
= correction
->get_count(i
, j
);
1259 pixel apix
= approximation
->get_pixel(i
, j
);
1261 for (unsigned int k
= 0; k
< 3; k
++) {
1263 const ale_real cc_floor
= 1e-20;
1265 if (ccpix
[k
] < cc_floor
)
1268 if (!finite(cpix
[k
]))
1271 if (!finite(apix
[k
]))
1274 ale_real new_value
= cpix
[k
] + apix
[k
];
1276 assert (finite(apix
[k
]));
1277 assert (finite(ccpix
[k
]));
1278 assert (finite(cpix
[k
]));
1279 assert (finite(new_value
));
1282 * Negative light doesn't make sense.
1287 approximation
->chan(i
, j
, k
) = new_value
;
1294 ui::get()->ip_write();
1295 image_rw::output(approximation
);
1298 ui::get()->ip_step_done();
1302 for (unsigned int m
= 0; m
< image_rw::count(); m
++) {
1304 if (!align::match(m
))
1320 ipc(render
*input
, unsigned int iterations
, int _inc
, psf
*lresponse
,
1321 psf
*nlresponse
, int exposure_register
,
1322 int use_weighted_median
, double ipwl
) {
1323 this->input
= input
;
1326 this->iterations
= iterations
;
1327 this->lresponse
= lresponse
;
1328 this->nlresponse
= nlresponse
;
1329 this->exposure_register
= exposure_register
;
1330 this->use_weighted_median
= use_weighted_median
;
1331 this->weight_limit
= ipwl
;
1334 const image
*get_image() {
1336 return approximation
;
1338 return input
->get_image();
1341 const image
*get_defined() {
1342 return input
->get_defined();
1354 virtual int sync() {
1356 ui::get()->ip_start();
1358 approximation
= optimizations::get_ip_working_image(input
->get_image());
1360 ui::get()->ip_done();
1368 void free_memory() {