2 * Copyright 2002, 2004, 2005, 2006, 2007, 2008, 2009 David Hilvert <dhilvert@gmail.com>
4 * This file is part of libale.
6 * libale is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU Affero General Public License as published by the Free
8 * Software Foundation, either version 3 of the License, or (at your option)
11 * libale is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 * FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
16 * You should have received a copy of the GNU Affero General Public License
17 * along with libale. If not, see <http://www.gnu.org/licenses/>.
27 * Types for scale clusters.
33 * Experimental non-linear scaling approach.
36 struct nl_scale_cluster
{
39 ale_image aweight_max
;
40 ale_image aweight_min
;
43 ale_image input_certainty_max
;
44 ale_image input_certainty_min
;
50 struct scale_cluster
{
55 ale_image input_certainty
;
59 nl_scale_cluster
*nl_scale_clusters
;
64 static struct scale_cluster
*init_clusters(ale_context ac
, int frame
, ale_pos scale_factor
,
65 ale_image input_frame
, ale_trans input_trans
, ale_image reference_image
,
66 ale_image alignment_weights
, unsigned int steps
, ale_filter interpolant
) {
68 int input_bayer
= ale_trans_get_bayer(input_trans
);
69 ale_real input_gamma
= ale_trans_get_gamma(input_trans
);
70 ale_certainty input_certainty
= ale_trans_retain_certainty(input_trans
);
71 ale_real input_black
= ale_trans_get_black(input_trans
);
76 * Allocate memory for the array.
79 struct scale_cluster
*scale_clusters
=
80 (struct scale_cluster
*) malloc(steps
* sizeof(struct scale_cluster
));
86 * Prepare images and exclusion regions for the highest level
90 scale_clusters
[0].accum
= reference_image
;
92 ale_context_signal(ac
, "ui_constructing_lod_clusters", 0.0);
94 scale_clusters
[0].input_scale
= scale_factor
;
95 if (scale_factor
< 1.0 && interpolant
== NULL
)
96 scale_clusters
[0].input
= ale_scale_image(input_frame
, scale_factor
, ale_context_get_color_type(ac
), input_gamma
, input_bayer
);
98 scale_clusters
[0].input
= input_frame
;
100 scale_clusters
[0].aweight
= alignment_weights
;
103 * Allocate and determine input frame certainty.
106 scale_clusters
[0].input_certainty
= ale_clone_image(scale_clusters
[0].input
, ale_context_get_certainty_type(ac
));
108 ale_eval("MAP_PIXEL(%0I, p, CERTAINTY(%1t, p, %2f, GET_AFFINE_PIXEL(%3i, p, %4f)))",
109 scale_clusters
[0].input_certainty
, input_black
, input_certainty
,
110 scale_clusters
[0].input
, (scale_factor
< 1.0 && interpolant
== NULL
) ? 1 : input_gamma
);
113 init_nl_cluster(&(scale_clusters
[0]));
117 * Prepare reduced-detail images and exclusion
121 for (step
= 1; step
< steps
; step
++) {
122 ale_context_signal(ac
, "ui_constructing_lod_clusters", step
);
123 scale_clusters
[step
].accum
= ale_scale_image(scale_clusters
[step
- 1].accum
, 0.5, ale_context_get_color_type(ac
), 1, ALE_BAYER_NONE
);
124 scale_clusters
[step
].aweight
= ale_scale_image_wt(scale_clusters
[step
- 1].aweight
, 0.5, ale_context_get_weight_type(ac
), ALE_BAYER_NONE
);
126 ale_pos sf
= scale_clusters
[step
- 1].input_scale
;
128 scale_clusters
[step
].input_scale
= sf
/ 2;
130 if (sf
>= 2.0 || interpolant
!= NULL
) {
131 scale_clusters
[step
].input
= scale_clusters
[step
- 1].input
;
132 scale_clusters
[step
].input_certainty
= scale_clusters
[step
- 1].input_certainty
;
133 } else if (sf
>= 1.0) {
134 scale_clusters
[step
].input
= ale_scale_image(scale_clusters
[step
- 1].input
, sf
/ 2, ale_context_get_color_type(ac
), input_gamma
, input_bayer
);
135 scale_clusters
[step
].input_certainty
= ale_scale_image_wt(scale_clusters
[step
- 1].input_certainty
, sf
/ 2, ale_context_get_certainty_type(ac
), input_bayer
);
137 scale_clusters
[step
].input
= ale_scale_image(scale_clusters
[step
- 1].input
, 0.5, ale_context_get_color_type(ac
), 1, ALE_BAYER_NONE
);
138 scale_clusters
[step
].input_certainty
= ale_scale_image_wt(scale_clusters
[step
- 1].input_certainty
, 0.5, ale_context_get_certainty_type(ac
), ALE_BAYER_NONE
);
142 init_nl_cluster(&(scale_clusters
[step
]));
146 ale_certainty_release(input_certainty
);
148 return scale_clusters
;
152 * Not-quite-symmetric difference function. Determines the difference in areas
153 * where the arrays overlap. Uses the reference array's notion of pixel positions.
155 * For the purposes of determining the difference, this function divides each
156 * pixel value by the corresponding image's average pixel magnitude, unless we
159 * a) Extending the boundaries of the image, or
161 * b) following the previous frame's transform
163 * If we are doing monte-carlo pixel sampling for alignment, we
164 * typically sample a subset of available pixels; otherwise, we sample
177 ale_accum centroid
[2], centroid_divisor
;
178 ale_accum de_centroid
[2], de_centroid_v
, de_sum
;
181 static void run_init(run
*r
, ale_trans offset
) {
186 r
->min
= point_posinf(2);
187 r
->max
= point_neginf(2);
191 r
->centroid_divisor
= 0;
193 r
->de_centroid
[0] = 0;
194 r
->de_centroid
[1] = 0;
196 r
->de_centroid_v
= 0;
203 static void run_add(const run
&_run
) {
204 result
+= _run
.result
;
205 divisor
+= _run
.divisor
;
207 for (int d
= 0; d
< 2; d
++) {
208 if (min
[d
] > _run
.min
[d
])
209 min
[d
] = _run
.min
[d
];
210 if (max
[d
] < _run
.max
[d
])
211 max
[d
] = _run
.max
[d
];
212 centroid
[d
] += _run
.centroid
[d
];
213 de_centroid
[d
] += _run
.de_centroid
[d
];
216 centroid_divisor
+= _run
.centroid_divisor
;
217 de_centroid_v
+= _run
.de_centroid_v
;
218 de_sum
+= _run
.de_sum
;
221 static run_run(const run
&_run
) : offset(_run
.offset
) {
234 static run
&run_operator
=(const run
&_run
) {
249 static ale_accum
run_get_error() const {
250 return pow(result
/ divisor
, 1/(ale_accum
) metric_exponent
);
253 static void run_sample(int f
, scale_cluster c
, int i
, int j
, point t
, point u
,
254 const run
&comparison
) {
256 pixel pa
= c
.accum
->get_pixel(i
, j
);
258 ale_real this_result
[2] = { 0, 0 };
259 ale_real this_divisor
[2] = { 0, 0 };
263 weight
[0] = pixel(1, 1, 1);
264 weight
[1] = pixel(1, 1, 1);
266 pixel tm
= offset
.get_tonal_multiplier(point(i
, j
) + c
.accum
->offset());
268 if (interpolant
!= NULL
) {
269 interpolant
->filtered(i
, j
, &p
[0], &weight
[0], 1, f
);
271 if (weight
[0].min_norm() > ale_real_weight_floor
) {
278 p
[0] = c
.input
->get_bl(t
);
284 p
[1] = c
.input
->get_bl(u
);
293 if (certainty_weights
== 1) {
296 * For speed, use arithmetic interpolation (get_bl(.))
297 * instead of geometric (get_bl(., 1))
300 weight
[0] *= c
.input_certainty
->get_bl(t
);
302 weight
[1] *= c
.input_certainty
->get_bl(u
);
303 weight
[0] *= c
.certainty
->get_pixel(i
, j
);
304 weight
[1] *= c
.certainty
->get_pixel(i
, j
);
307 if (c
.aweight
!= NULL
) {
308 weight
[0] *= c
.aweight
->get_pixel(i
, j
);
309 weight
[1] *= c
.aweight
->get_pixel(i
, j
);
313 * Update sampling area statistics
325 centroid
[0] += (weight
[0][0] + weight
[0][1] + weight
[0][2]) * i
;
326 centroid
[1] += (weight
[0][0] + weight
[0][1] + weight
[0][2]) * j
;
327 centroid_divisor
+= (weight
[0][0] + weight
[0][1] + weight
[0][2]);
330 * Determine alignment type.
333 for (int m
= 0; m
< (u
.defined() ? 2 : 1); m
++)
334 if (channel_alignment_type
== 0) {
336 * Align based on all channels.
340 for (int k
= 0; k
< 3; k
++) {
341 ale_real achan
= pa
[k
];
342 ale_real bchan
= p
[m
][k
];
344 this_result
[m
] += weight
[m
][k
] * pow(fabs(achan
- bchan
), metric_exponent
);
345 this_divisor
[m
] += weight
[m
][k
] * pow(achan
> bchan
? achan
: bchan
, metric_exponent
);
347 } else if (channel_alignment_type
== 1) {
349 * Align based on the green channel.
352 ale_real achan
= pa
[1];
353 ale_real bchan
= p
[m
][1];
355 this_result
[m
] = weight
[m
][1] * pow(fabs(achan
- bchan
), metric_exponent
);
356 this_divisor
[m
] = weight
[m
][1] * pow(achan
> bchan
? achan
: bchan
, metric_exponent
);
357 } else if (channel_alignment_type
== 2) {
359 * Align based on the sum of all channels.
366 for (int k
= 0; k
< 3; k
++) {
369 wsum
+= weight
[m
][k
] / 3;
372 this_result
[m
] = wsum
* pow(fabs(asum
- bsum
), metric_exponent
);
373 this_divisor
[m
] = wsum
* pow(asum
> bsum
? asum
: bsum
, metric_exponent
);
377 // ale_real de = fabs(this_result[0] / this_divisor[0]
378 // - this_result[1] / this_divisor[1]);
379 ale_real de
= fabs(this_result
[0] - this_result
[1]);
381 de_centroid
[0] += de
* (ale_real
) i
;
382 de_centroid
[1] += de
* (ale_real
) j
;
384 de_centroid_v
+= de
* (ale_real
) t
.lengthto(u
);
389 result
+= (this_result
[0]);
390 divisor
+= (this_divisor
[0]);
393 static void run_rescale(ale_pos scale
) {
394 offset
.rescale(scale
);
396 de_centroid
[0] *= scale
;
397 de_centroid
[1] *= scale
;
398 de_centroid_v
*= scale
;
401 static point
run_get_centroid() {
402 point result
= point(centroid
[0] / centroid_divisor
, centroid
[1] / centroid_divisor
);
404 assert (finite(centroid
[0])
405 && finite(centroid
[1])
406 && (result
.defined() || centroid_divisor
== 0));
411 static point
run_get_error_centroid() {
412 point result
= point(de_centroid
[0] / de_sum
, de_centroid
[1] / de_sum
);
417 static ale_pos
run_get_error_perturb() {
418 ale_pos result
= de_centroid_v
/ de_sum
;
423 template<class diff_trans
>
424 class diff_stat_generic
{
426 transformation::elem_bounds_t elem_bounds
;
429 * When non-empty, runs.front() is best, runs.back() is
433 std::vector
<run
> runs
;
436 * old_runs stores the latest available perturbation set for
437 * each multi-alignment element.
440 typedef int run_index
;
441 std::map
<run_index
, run
> old_runs
;
443 static void *diff_subdomain(void *args
);
445 struct subdomain_args
{
446 struct scale_cluster c
;
447 std::vector
<run
> runs
;
450 int i_min
, i_max
, j_min
, j_max
;
454 struct scale_cluster si
;
458 std::vector
<ale_pos
> perturb_multipliers
;
461 void diff(struct scale_cluster c
, const diff_trans
&t
,
462 int _ax_count
, int f
) {
464 if (runs
.size() == 2)
467 runs
.push_back(run(t
));
470 ax_count
= _ax_count
;
473 ui::get()->d2_align_sample_start();
475 if (interpolant
!= NULL
) {
478 * XXX: This has not been tested, and may be completely
482 transformation tt
= transformation::eu_identity();
483 tt
.set_current_element(t
);
484 interpolant
->set_parameters(tt
, c
.input
, c
.accum
->offset());
491 pthread_t
*threads
= (pthread_t
*) malloc(sizeof(pthread_t
) * N
);
492 pthread_attr_t
*thread_attr
= (pthread_attr_t
*) malloc(sizeof(pthread_attr_t
) * N
);
498 subdomain_args
*args
= new subdomain_args
[N
];
500 transformation::elem_bounds_int_t b
= elem_bounds
.scale_to_bounds(c
.accum
->height(), c
.accum
->width());
502 // fprintf(stdout, "[%d %d] [%d %d]\n",
503 // global_i_min, global_i_max, global_j_min, global_j_max);
505 for (int ti
= 0; ti
< N
; ti
++) {
507 args
[ti
].runs
= runs
;
508 args
[ti
].ax_count
= ax_count
;
510 args
[ti
].i_min
= b
.imin
+ ((b
.imax
- b
.imin
) * ti
) / N
;
511 args
[ti
].i_max
= b
.imin
+ ((b
.imax
- b
.imin
) * (ti
+ 1)) / N
;
512 args
[ti
].j_min
= b
.jmin
;
513 args
[ti
].j_max
= b
.jmax
;
514 args
[ti
].subdomain
= ti
;
516 pthread_attr_init(&thread_attr
[ti
]);
517 pthread_attr_setdetachstate(&thread_attr
[ti
], PTHREAD_CREATE_JOINABLE
);
518 pthread_create(&threads
[ti
], &thread_attr
[ti
], diff_subdomain
, &args
[ti
]);
520 diff_subdomain(&args
[ti
]);
524 for (int ti
= 0; ti
< N
; ti
++) {
526 pthread_join(threads
[ti
], NULL
);
528 runs
.back().add(args
[ti
].runs
.back());
538 ui::get()->d2_align_sample_stop();
544 std::vector
<diff_trans
> t_array
;
546 for (unsigned int r
= 0; r
< runs
.size(); r
++) {
547 t_array
.push_back(runs
[r
].offset
);
552 for (unsigned int r
= 0; r
< t_array
.size(); r
++)
553 diff(si
, t_array
[r
], ax_count
, frame
);
559 assert(runs
.size() >= 2);
560 assert(runs
[0].offset
.scale() == runs
[1].offset
.scale());
562 return (runs
[1].get_error() < runs
[0].get_error()
563 || (!finite(runs
[0].get_error()) && finite(runs
[1].get_error())));
566 int better_defined() {
567 assert(runs
.size() >= 2);
568 assert(runs
[0].offset
.scale() == runs
[1].offset
.scale());
570 return (runs
[1].get_error() < runs
[0].get_error());
573 diff_stat_generic(transformation::elem_bounds_t e
)
574 : runs(), old_runs(), perturb_multipliers() {
578 run_index
get_run_index(unsigned int perturb_index
) {
579 return perturb_index
;
582 run
&get_run(unsigned int perturb_index
) {
583 run_index index
= get_run_index(perturb_index
);
585 assert(old_runs
.count(index
));
586 return old_runs
[index
];
589 void rescale(ale_pos scale
, scale_cluster _si
) {
590 assert(runs
.size() == 1);
594 runs
[0].rescale(scale
);
599 ~diff_stat_generic() {
602 diff_stat_generic
&operator=(const diff_stat_generic
&dst
) {
604 * Copy run information.
607 old_runs
= dst
.old_runs
;
610 * Copy diff variables
613 ax_count
= dst
.ax_count
;
615 perturb_multipliers
= dst
.perturb_multipliers
;
616 elem_bounds
= dst
.elem_bounds
;
621 diff_stat_generic(const diff_stat_generic
&dst
) : runs(), old_runs(),
622 perturb_multipliers() {
626 void set_elem_bounds(transformation::elem_bounds_t e
) {
630 ale_accum
get_result() {
631 assert(runs
.size() == 1);
632 return runs
[0].result
;
635 ale_accum
get_divisor() {
636 assert(runs
.size() == 1);
637 return runs
[0].divisor
;
640 diff_trans
get_offset() {
641 assert(runs
.size() == 1);
642 return runs
[0].offset
;
645 int operator!=(diff_stat_generic
¶m
) {
646 return (get_error() != param
.get_error());
649 int operator==(diff_stat_generic
¶m
) {
650 return !(operator!=(param
));
653 ale_pos
get_error_perturb() {
654 assert(runs
.size() == 1);
655 return runs
[0].get_error_perturb();
658 ale_accum
get_error() const {
659 assert(runs
.size() == 1);
660 return runs
[0].get_error();
665 * Get the set of transformations produced by a given perturbation
667 void get_perturb_set(std::vector
<diff_trans
> *set
,
668 ale_pos adj_p
, ale_pos adj_o
, ale_pos adj_b
,
669 ale_pos
*current_bd
, ale_pos
*modified_bd
,
670 std::vector
<ale_pos
> multipliers
= std::vector
<ale_pos
>()) {
672 assert(runs
.size() == 1);
674 diff_trans
test_t(diff_trans::eu_identity());
677 * Translational or euclidean transformation
680 for (unsigned int i
= 0; i
< 2; i
++)
681 for (unsigned int s
= 0; s
< 2; s
++) {
683 if (!multipliers
.size())
684 multipliers
.push_back(1);
686 assert(finite(multipliers
[0]));
688 test_t
= get_offset();
690 // test_t.eu_modify(i, (s ? -adj_p : adj_p) * multipliers[0]);
691 test_t
.translate((i
? point(1, 0) : point(0, 1))
692 * (s
? -adj_p
: adj_p
)
695 test_t
.snap(adj_p
/ 2);
697 set
->push_back(test_t
);
698 multipliers
.erase(multipliers
.begin());
702 if (alignment_class
> 0)
703 for (unsigned int s
= 0; s
< 2; s
++) {
705 if (!multipliers
.size())
706 multipliers
.push_back(1);
708 assert(multipliers
.size());
709 assert(finite(multipliers
[0]));
711 if (!(adj_o
* multipliers
[0] < rot_max
))
714 ale_pos adj_s
= (s
? 1 : -1) * adj_o
* multipliers
[0];
716 test_t
= get_offset();
718 run_index ori
= get_run_index(set
->size());
719 point centroid
= point::undefined();
721 if (!old_runs
.count(ori
))
722 ori
= get_run_index(0);
724 if (!centroid
.finite() && old_runs
.count(ori
)) {
725 centroid
= old_runs
[ori
].get_error_centroid();
727 if (!centroid
.finite())
728 centroid
= old_runs
[ori
].get_centroid();
730 centroid
*= test_t
.scale()
731 / old_runs
[ori
].offset
.scale();
734 if (!centroid
.finite() && !test_t
.is_projective()) {
735 test_t
.eu_modify(2, adj_s
);
736 } else if (!centroid
.finite()) {
737 centroid
= point(si
.input
->height() / 2,
738 si
.input
->width() / 2);
740 test_t
.rotate(centroid
+ si
.accum
->offset(),
743 test_t
.rotate(centroid
+ si
.accum
->offset(),
747 test_t
.snap(adj_p
/ 2);
749 set
->push_back(test_t
);
750 multipliers
.erase(multipliers
.begin());
753 if (alignment_class
== 2) {
756 * Projective transformation
759 for (unsigned int i
= 0; i
< 4; i
++)
760 for (unsigned int j
= 0; j
< 2; j
++)
761 for (unsigned int s
= 0; s
< 2; s
++) {
763 if (!multipliers
.size())
764 multipliers
.push_back(1);
766 assert(multipliers
.size());
767 assert(finite(multipliers
[0]));
769 ale_pos adj_s
= (s
? -1 : 1) * adj_p
* multipliers
[0];
771 test_t
= get_offset();
773 if (perturb_type
== 0)
774 test_t
.gpt_modify_bounded(j
, i
, adj_s
, elem_bounds
.scale_to_bounds(si
.accum
->height(), si
.accum
->width()));
775 else if (perturb_type
== 1)
776 test_t
.gr_modify(j
, i
, adj_s
);
780 test_t
.snap(adj_p
/ 2);
782 set
->push_back(test_t
);
783 multipliers
.erase(multipliers
.begin());
792 if (bda_mult
!= 0 && adj_b
!= 0) {
794 for (unsigned int d
= 0; d
< get_offset().bd_count(); d
++)
795 for (unsigned int s
= 0; s
< 2; s
++) {
797 if (!multipliers
.size())
798 multipliers
.push_back(1);
800 assert (multipliers
.size());
801 assert (finite(multipliers
[0]));
803 ale_pos adj_s
= (s
? -1 : 1) * adj_b
* multipliers
[0];
805 if (bda_rate
> 0 && fabs(modified_bd
[d
] + adj_s
- current_bd
[d
]) > bda_rate
)
808 diff_trans test_t
= get_offset();
810 test_t
.bd_modify(d
, adj_s
);
812 set
->push_back(test_t
);
818 assert(runs
.size() == 2);
824 assert(runs
.size() == 2);
828 void perturb_test(ale_pos perturb
, ale_pos adj_p
, ale_pos adj_o
, ale_pos adj_b
,
829 ale_pos
*current_bd
, ale_pos
*modified_bd
, int stable
) {
831 assert(runs
.size() == 1);
833 std::vector
<diff_trans
> t_set
;
835 if (perturb_multipliers
.size() == 0) {
836 get_perturb_set(&t_set
, adj_p
, adj_o
, adj_b
,
837 current_bd
, modified_bd
);
839 for (unsigned int i
= 0; i
< t_set
.size(); i
++) {
840 diff_stat_generic test
= *this;
842 test
.diff(si
, t_set
[i
], ax_count
, frame
);
846 if (finite(test
.get_error_perturb()))
847 perturb_multipliers
.push_back(adj_p
/ test
.get_error_perturb());
849 perturb_multipliers
.push_back(1);
856 get_perturb_set(&t_set
, adj_p
, adj_o
, adj_b
, current_bd
, modified_bd
,
857 perturb_multipliers
);
859 int found_unreliable
= 1;
860 std::vector
<int> tested(t_set
.size(), 0);
862 for (unsigned int i
= 0; i
< t_set
.size(); i
++) {
863 run_index ori
= get_run_index(i
);
866 * Check for stability
869 && old_runs
.count(ori
)
870 && old_runs
[ori
].offset
== t_set
[i
])
874 std::vector
<ale_pos
> perturb_multipliers_original
= perturb_multipliers
;
876 while (found_unreliable
) {
878 found_unreliable
= 0;
880 for (unsigned int i
= 0; i
< t_set
.size(); i
++) {
885 diff(si
, t_set
[i
], ax_count
, frame
);
887 if (!(i
< perturb_multipliers
.size())
888 || !finite(perturb_multipliers
[i
])) {
890 perturb_multipliers
.resize(i
+ 1);
892 if (finite(perturb_multipliers
[i
])
894 && finite(adj_p
/ runs
[1].get_error_perturb())) {
895 perturb_multipliers
[i
] =
896 adj_p
/ runs
[1].get_error_perturb();
898 found_unreliable
= 1;
900 perturb_multipliers
[i
] = 1;
905 run_index ori
= get_run_index(i
);
907 if (old_runs
.count(ori
) == 0)
908 old_runs
.insert(std::pair
<run_index
, run
>(ori
, runs
[1]));
910 old_runs
[ori
] = runs
[1];
912 if (finite(perturb_multipliers_original
[i
])
913 && finite(runs
[1].get_error_perturb())
915 && finite(perturb_multipliers_original
[i
] * adj_p
/ runs
[1].get_error_perturb()))
916 perturb_multipliers
[i
] = perturb_multipliers_original
[i
]
917 * adj_p
/ runs
[1].get_error_perturb();
919 perturb_multipliers
[i
] = 1;
924 && runs
[1].get_error() < runs
[0].get_error()
925 && perturb_multipliers
[i
]
926 / perturb_multipliers_original
[i
] < 2) {
937 if (!found_unreliable
)
944 typedef diff_stat_generic
<trans_single
> diff_stat_t
;
945 typedef diff_stat_generic
<trans_multi
> diff_stat_multi
;
949 * Adjust exposure for an aligned frame B against reference A.
951 * Expects full-LOD images.
953 * Note: This method does not use any weighting, by certainty or
954 * otherwise, in the first exposure registration pass, as any bias of
955 * weighting according to color may also bias the exposure registration
956 * result; it does use weighting, including weighting by certainty
957 * (even if certainty weighting is not specified), in the second pass,
958 * under the assumption that weighting by certainty improves handling
959 * of out-of-range highlights, and that bias of exposure measurements
960 * according to color may generally be less harmful after spatial
961 * registration has been performed.
963 class exposure_ratio_iterate
: public thread::decompose_domain
{
968 struct scale_cluster c
;
969 const transformation
&t
;
973 void prepare_subdomains(unsigned int N
) {
974 asums
= new pixel_accum
[N
];
975 bsums
= new pixel_accum
[N
];
977 void subdomain_algorithm(unsigned int thread
,
978 int i_min
, int i_max
, int j_min
, int j_max
) {
980 point offset
= c
.accum
->offset();
982 for (mc_iterate
m(i_min
, i_max
, j_min
, j_max
, thread
); !m
.done(); m
++) {
984 unsigned int i
= (unsigned int) m
.get_i();
985 unsigned int j
= (unsigned int) m
.get_j();
987 if (ref_excluded(i
, j
, offset
, c
.ax_parameters
, ax_count
))
996 q
= (c
.input_scale
< 1.0 && interpolant
== NULL
)
997 ? t
.scaled_inverse_transform(
998 point(i
+ offset
[0], j
+ offset
[1]))
999 : t
.unscaled_inverse_transform(
1000 point(i
+ offset
[0], j
+ offset
[1]));
1003 * Check that the transformed coordinates are within
1004 * the boundaries of array c.input, that they are not
1005 * subject to exclusion, and that the weight value in
1006 * the accumulated array is nonzero.
1009 if (input_excluded(q
[0], q
[1], c
.ax_parameters
, ax_count
))
1013 && q
[0] <= c
.input
->height() - 1
1015 && q
[1] <= c
.input
->width() - 1
1016 && ((pixel
) c
.certainty
->get_pixel(i
, j
)).minabs_norm() != 0) {
1017 pixel a
= c
.accum
->get_pixel(i
, j
);
1020 b
= c
.input
->get_bl(q
);
1022 pixel weight
= ((c
.aweight
&& pass_number
)
1023 ? (pixel
) c
.aweight
->get_pixel(i
, j
)
1026 ? psqrt(c
.certainty
->get_pixel(i
, j
)
1027 * c
.input_certainty
->get_bl(q
, 1))
1030 asums
[thread
] += a
* weight
;
1031 bsums
[thread
] += b
* weight
;
1036 void finish_subdomains(unsigned int N
) {
1037 for (unsigned int n
= 0; n
< N
; n
++) {
1045 exposure_ratio_iterate(pixel_accum
*_asum
,
1047 struct scale_cluster _c
,
1048 const transformation
&_t
,
1050 int _pass_number
) : decompose_domain(0, _c
.accum
->height(),
1051 0, _c
.accum
->width()),
1057 ax_count
= _ax_count
;
1058 pass_number
= _pass_number
;
1061 exposure_ratio_iterate(pixel_accum
*_asum
,
1063 struct scale_cluster _c
,
1064 const transformation
&_t
,
1067 transformation::elem_bounds_int_t b
) : decompose_domain(b
.imin
, b
.imax
,
1074 ax_count
= _ax_count
;
1075 pass_number
= _pass_number
;
1079 static void set_exposure_ratio(unsigned int m
, struct scale_cluster c
,
1080 const transformation
&t
, int ax_count
, int pass_number
) {
1082 if (_exp_register
== 2) {
1084 * Use metadata only.
1086 ale_real gain_multiplier
= image_rw::exp(m
).get_gain_multiplier();
1087 pixel multiplier
= pixel(gain_multiplier
, gain_multiplier
, gain_multiplier
);
1089 image_rw::exp(m
).set_multiplier(multiplier
);
1090 ui::get()->exp_multiplier(multiplier
[0],
1097 pixel_accum
asum(0, 0, 0), bsum(0, 0, 0);
1099 exposure_ratio_iterate
eri(&asum
, &bsum
, c
, t
, ax_count
, pass_number
);
1102 // std::cerr << (asum / bsum) << " ";
1104 pixel_accum new_multiplier
;
1106 new_multiplier
= asum
/ bsum
* image_rw::exp(m
).get_multiplier();
1108 if (finite(new_multiplier
[0])
1109 && finite(new_multiplier
[1])
1110 && finite(new_multiplier
[2])) {
1111 image_rw::exp(m
).set_multiplier(new_multiplier
);
1112 ui::get()->exp_multiplier(new_multiplier
[0],
1118 static diff_stat_t
_align_element(ale_pos perturb
, ale_pos local_lower
,
1119 scale_cluster
*scale_clusters
, diff_stat_t here
,
1120 ale_pos adj_p
, ale_pos adj_o
, ale_pos adj_b
,
1121 ale_pos
*current_bd
, ale_pos
*modified_bd
,
1122 astate_t
*astate
, int lod
, scale_cluster si
) {
1125 * Run initial tests to get perturbation multipliers and error
1129 std::vector
<d2::trans_single
> t_set
;
1131 here
.get_perturb_set(&t_set
, adj_p
, adj_o
, adj_b
, current_bd
, modified_bd
);
1133 int stable_count
= 0;
1135 while (perturb
>= local_lower
) {
1137 ui::get()->alignment_dims(scale_clusters
[lod
].accum
->height(), scale_clusters
[lod
].accum
->width(),
1138 scale_clusters
[lod
].input
->height(), scale_clusters
[lod
].input
->width());
1141 * Orientational adjustment value in degrees.
1143 * Since rotational perturbation is now specified as an
1144 * arclength, we have to convert.
1147 ale_pos adj_o
= 2 * (double) perturb
1148 / sqrt(pow(scale_clusters
[0].input
->height(), 2)
1149 + pow(scale_clusters
[0].input
->width(), 2))
1154 * Barrel distortion adjustment value
1157 ale_pos adj_b
= perturb
* bda_mult
;
1159 trans_single old_offset
= here
.get_offset();
1161 here
.perturb_test(perturb
, adj_p
, adj_o
, adj_b
, current_bd
, modified_bd
,
1164 if (here
.get_offset() == old_offset
)
1169 if (stable_count
== 3) {
1176 && lod
> lrint(log(perturb
) / log(2)) - lod_preferred
) {
1179 * Work with images twice as large
1183 si
= scale_clusters
[lod
];
1186 * Rescale the transforms.
1189 ale_pos rescale_factor
= (double) scale_factor
1190 / (double) pow(2, lod
)
1191 / (double) here
.get_offset().scale();
1193 here
.rescale(rescale_factor
, si
);
1196 adj_p
= perturb
/ pow(2, lod
);
1203 ui::get()->alignment_perturbation_level(perturb
, lod
);
1206 ui::get()->set_match(here
.get_error());
1207 ui::get()->set_offset(here
.get_offset());
1211 ale_pos rescale_factor
= (double) scale_factor
1212 / (double) here
.get_offset().scale();
1214 here
.rescale(rescale_factor
, scale_clusters
[0]);
1221 * Align frame m against the reference.
1223 * XXX: the transformation class currently combines ordinary
1224 * transformations with scaling. This is somewhat convenient for
1225 * some things, but can also be confusing. This method, _align(), is
1226 * one case where special care must be taken to ensure that the scale
1227 * is always set correctly (by using the 'rescale' method).
1229 static diff_stat_multi
_align(int m
, int local_gs
, astate_t
*astate
) {
1231 ale_image input_frame
= astate
->get_input_frame();
1234 * Local upper/lower data, possibly dependent on image
1238 ale_pos local_lower
, local_upper
;
1239 ale_accum local_gs_mo
;
1242 * Select the minimum dimension as the reference.
1245 ale_pos reference_size
= input_frame
->height();
1246 if (input_frame
->width() < reference_size
)
1247 reference_size
= input_frame
->width();
1248 ale_accum reference_area
= input_frame
->height()
1249 * input_frame
->width();
1251 if (perturb_lower_percent
)
1252 local_lower
= (double) perturb_lower
1253 * (double) reference_size
1255 * (double) scale_factor
;
1257 local_lower
= perturb_lower
;
1259 if (perturb_upper_percent
)
1260 local_upper
= (double) perturb_upper
1261 * (double) reference_size
1263 * (double) scale_factor
;
1265 local_upper
= perturb_upper
;
1267 local_upper
= pow(2, floor(log(local_upper
) / log(2)));
1270 local_gs_mo
= (double) _gs_mo
1271 * (double) reference_area
1273 * (double) scale_factor
;
1275 local_gs_mo
= _gs_mo
;
1278 * Logarithms aren't exact, so we divide repeatedly to discover
1279 * how many steps will occur, and pass this information to the
1284 double step_variable
= local_upper
;
1285 while (step_variable
>= local_lower
) {
1290 ale_pos perturb
= local_upper
;
1293 kept_t
[latest
] = latest_t
;
1294 kept_ok
[latest
] = latest_ok
;
1298 * Determine how many levels of detail should be prepared, by
1299 * calculating the initial (largest) value for the
1300 * level-of-detail variable.
1303 int lod
= lrint(log(perturb
) / log(2)) - lod_preferred
;
1308 while (lod
> 0 && (reference_image
->width() < pow(2, lod
) * min_dimension
1309 || reference_image
->height() < pow(2, lod
) * min_dimension
))
1312 unsigned int steps
= (unsigned int) lod
+ 1;
1315 * Prepare multiple levels of detail.
1319 struct scale_cluster
*scale_clusters
= init_clusters(m
,
1320 scale_factor
, input_frame
, steps
,
1323 #error add check for non-NULL return
1326 * Initialize the default initial transform
1329 astate
->init_default();
1332 * Set the default transformation.
1335 transformation offset
= astate
->get_default();
1338 * Establish boundaries
1341 offset
.set_current_bounds(reference_image
);
1343 ui::get()->alignment_degree_max(offset
.get_coordinate(offset
.stack_depth() - 1).degree
);
1345 if (offset
.stack_depth() == 1) {
1346 ui::get()->set_steps(step_count
, 0);
1348 ui::get()->set_steps(offset
.get_coordinate(offset
.stack_depth() - 1).degree
+ 1, 1);
1352 * Load any file-specified transformations
1355 for (unsigned int index
= 0; index
< offset
.stack_depth(); index
++) {
1357 unsigned int index_2
;
1358 offset
.set_current_index(index
);
1360 offset
= tload_next(tload
, alignment_class
== 2,
1362 &is_default
, offset
.get_current_index() == 0);
1364 index_2
= offset
.get_current_index();
1366 if (index_2
> index
) {
1367 for (unsigned int index_3
= index
; index_3
< index_2
; index_3
++)
1368 astate
->set_is_default(index_3
, 1);
1373 astate
->set_is_default(index
, is_default
);
1376 offset
.set_current_index(0);
1378 astate
->init_frame_alignment_primary(&offset
, lod
, perturb
);
1381 * Control point alignment
1384 if (local_gs
== 5) {
1386 transformation o
= offset
;
1389 * Determine centroid data
1392 point current
, previous
;
1393 centroids(m
, ¤t
, &previous
);
1395 if (current
.defined() && previous
.defined()) {
1397 o
.set_dimensions(input_frame
);
1398 o
.translate((previous
- current
) * o
.scale());
1403 * Determine rotation for alignment classes other than translation.
1406 ale_pos lowest_error
= cp_rms_error(m
, o
);
1408 ale_pos rot_lower
= 2 * (double) local_lower
1409 / sqrt(pow(scale_clusters
[0].input
->height(), 2)
1410 + pow(scale_clusters
[0].input
->width(), 2))
1414 if (alignment_class
> 0)
1415 for (double rot
= 30; rot
> rot_lower
; rot
/= 2)
1416 for (double srot
= -rot
; srot
< rot
* 1.5; srot
+= rot
* 2) {
1417 int is_improved
= 1;
1418 while (is_improved
) {
1420 transformation test_t
= o
;
1422 * XXX: is this right?
1424 test_t
.rotate(current
* o
.scale(), srot
);
1425 ale_pos test_v
= cp_rms_error(m
, test_t
);
1427 if (test_v
< lowest_error
) {
1428 lowest_error
= test_v
;
1437 * Determine projective parameters through a local
1441 if (alignment_class
== 2) {
1442 ale_pos adj_p
= lowest_error
;
1444 if (adj_p
< local_lower
)
1445 adj_p
= local_lower
;
1447 while (adj_p
>= local_lower
) {
1448 transformation test_t
= o
;
1449 int is_improved
= 1;
1453 while (is_improved
) {
1456 for (int i
= 0; i
< 4; i
++)
1457 for (int j
= 0; j
< 2; j
++)
1458 for (adj_s
= -adj_p
; adj_s
<= adj_p
; adj_s
+= 2 * adj_p
) {
1462 if (perturb_type
== 0)
1463 test_t
.gpt_modify(j
, i
, adj_s
);
1464 else if (perturb_type
== 1)
1465 test_t
.gr_modify(j
, i
, adj_s
);
1469 test_v
= cp_rms_error(m
, test_t
);
1471 if (test_v
< lowest_error
) {
1472 lowest_error
= test_v
;
1485 * Pre-alignment exposure adjustment
1488 if (_exp_register
) {
1489 ui::get()->exposure_1();
1490 set_exposure_ratio(m
, scale_clusters
[0], offset
, local_ax_count
, 0);
1494 * Scale transform for lod
1497 for (int lod_
= 0; lod_
< lod
; lod_
++) {
1498 transformation s
= offset
;
1499 transformation t
= offset
;
1501 t
.rescale(1 / (double) 2);
1503 if (!(t
.scaled_height() > 0 && t
.scaled_height() < s
.scaled_height())
1504 || !(t
.scaled_width() > 0 && t
.scaled_width() < s
.scaled_width())) {
1505 perturb
/= pow(2, lod
- lod_
);
1513 ui::get()->set_offset(offset
);
1515 struct scale_cluster si
= scale_clusters
[lod
];
1518 * Projective adjustment value
1521 ale_pos adj_p
= perturb
/ pow(2, lod
);
1524 * Orientational adjustment value in degrees.
1526 * Since rotational perturbation is now specified as an
1527 * arclength, we have to convert.
1530 ale_pos adj_o
= (double) 2 * (double) perturb
1531 / sqrt(pow((double) scale_clusters
[0].input
->height(), (double) 2)
1532 + pow((double) scale_clusters
[0].input
->width(), (double) 2))
1537 * Barrel distortion adjustment value
1540 ale_pos adj_b
= perturb
* bda_mult
;
1543 * Global search overlap requirements.
1546 local_gs_mo
= (double) local_gs_mo
/ pow(pow(2, lod
), 2);
1549 * Alignment statistics.
1552 diff_stat_t
here(offset
.elem_bounds());
1555 * Current difference (error) value
1558 ui::get()->prematching();
1559 here
.diff(si
, offset
.get_current_element(), local_ax_count
, m
);
1560 ui::get()->set_match(here
.get_error());
1563 * Current and modified barrel distortion parameters
1566 ale_pos current_bd
[BARREL_DEGREE
];
1567 ale_pos modified_bd
[BARREL_DEGREE
];
1568 offset
.bd_get(current_bd
);
1569 offset
.bd_get(modified_bd
);
1572 * Translational global search step
1575 if (perturb
>= local_lower
&& local_gs
!= 0 && local_gs
!= 5
1576 && (local_gs
!= 6 || astate
->get_is_default(0))) {
1578 ui::get()->global_alignment(perturb
, lod
);
1579 ui::get()->gs_mo(local_gs_mo
);
1581 test_globals(&here
, si
, offset
, local_gs
, adj_p
,
1582 local_ax_count
, m
, local_gs_mo
, perturb
);
1584 ui::get()->set_match(here
.get_error());
1585 ui::get()->set_offset(here
.get_offset());
1589 * Perturbation adjustment loop.
1592 offset
.set_current_element(here
.get_offset());
1594 for (unsigned int i
= 0; i
< offset
.stack_depth(); i
++) {
1596 ui::get()->aligning_element(i
, offset
.stack_depth());
1598 offset
.set_current_index(i
);
1600 ui::get()->start_multi_alignment_element(offset
);
1602 ui::get()->set_offset(offset
);
1605 astate
->init_frame_alignment_nonprimary(&offset
, lod
, perturb
, i
);
1607 if (_exp_register
== 1) {
1608 ui::get()->exposure_1();
1609 pixel_accum
asum(0, 0, 0), bsum(0, 0, 0);
1610 exposure_ratio_iterate
eri(&asum
, &bsum
, scale_clusters
[0], offset
, local_ax_count
, 0,
1611 offset
.elem_bounds().scale_to_bounds(scale_clusters
[0].accum
->height(),
1612 scale_clusters
[0].accum
->width()));
1615 pixel_accum tr
= asum
/ bsum
;
1616 ui::get()->exp_multiplier(tr
[0], tr
[1], tr
[2]);
1617 offset
.set_tonal_multiplier(tr
);
1622 int e_div
= offset
.get_current_coordinate().degree
;
1623 ale_pos e_perturb
= perturb
;
1624 ale_pos e_adj_p
= adj_p
;
1625 ale_pos e_adj_b
= adj_b
;
1627 for (int d
= 0; d
< e_div
; d
++) {
1639 d2::trans_multi::elem_bounds_t b
= offset
.elem_bounds();
1641 for (int dim_satisfied
= 0; e_lod
> 0 && !dim_satisfied
; ) {
1642 int height
= scale_clusters
[e_lod
].accum
->height();
1643 int width
= scale_clusters
[e_lod
].accum
->width();
1645 d2::trans_multi::elem_bounds_int_t bi
= b
.scale_to_bounds(height
, width
);
1647 dim_satisfied
= bi
.satisfies_min_dim(min_dimension
);
1649 if (!dim_satisfied
) {
1656 * Scale transform for lod
1659 for (int lod_
= 0; lod_
< e_lod
; lod_
++) {
1660 trans_single s
= offset
.get_element(i
);
1661 trans_single t
= offset
.get_element(i
);
1663 t
.rescale(1 / (double) 2);
1665 if (!(t
.scaled_height() > 0 && t
.scaled_height() < s
.scaled_height())
1666 || !(t
.scaled_width() > 0 && t
.scaled_width() < s
.scaled_width())) {
1667 e_perturb
/= pow(2, e_lod
- lod_
);
1671 offset
.set_element(i
, t
);
1675 ui::get()->set_offset(offset
);
1679 * Announce perturbation size
1682 ui::get()->aligning(e_perturb
, e_lod
);
1684 si
= scale_clusters
[e_lod
];
1686 here
.set_elem_bounds(offset
.elem_bounds());
1688 here
.diff(si
, offset
.get_current_element(), local_ax_count
, m
);
1692 here
= check_ancestor_path(offset
, si
, here
, local_ax_count
, m
);
1694 here
= _align_element(e_perturb
, local_lower
, scale_clusters
,
1695 here
, e_adj_p
, adj_o
, e_adj_b
, current_bd
, modified_bd
,
1698 offset
.rescale(here
.get_offset().scale() / offset
.scale());
1700 offset
.set_current_element(here
.get_offset());
1702 if (i
> 0 && _exp_register
== 1) {
1703 if (ma_cert_satisfied(scale_clusters
[0], offset
, i
)) {
1704 ui::get()->exposure_2();
1705 pixel_accum
asum(0, 0, 0), bsum(0, 0, 0);
1706 exposure_ratio_iterate
eri(&asum
, &bsum
, scale_clusters
[0], offset
, local_ax_count
, 1,
1707 offset
.elem_bounds().scale_to_bounds(scale_clusters
[0].accum
->height(),
1708 scale_clusters
[0].accum
->width()));
1711 pixel_accum tr
= asum
/ bsum
;
1712 ui::get()->exp_multiplier(tr
[0], tr
[1], tr
[2]);
1713 offset
.set_tonal_multiplier(tr
);
1715 offset
.set_tonal_multiplier(offset
.get_element(offset
.parent_index(i
)).get_tonal_multiplier(point(0, 0)));
1717 } else if (_exp_register
== 1) {
1718 ui::get()->exposure_2();
1719 set_exposure_ratio(m
, scale_clusters
[0], offset
, local_ax_count
, 1);
1722 ui::get()->set_offset(offset
);
1724 if (i
+ 1 == offset
.stack_depth())
1725 ui::get()->alignment_degree_complete(offset
.get_coordinate(i
).degree
);
1726 else if (offset
.get_coordinate(i
).degree
!= offset
.get_coordinate(i
+ 1).degree
)
1727 ui::get()->alignment_degree_complete(offset
.get_coordinate(i
+ 1).degree
);
1730 offset
.set_current_index(0);
1733 offset
.set_multi(reference_image
, input_frame
);
1736 * Recalculate error on whole frame.
1739 ui::get()->postmatching();
1740 diff_stat_generic
<transformation
> multi_here(offset
.elem_bounds());
1741 multi_here
.diff(scale_clusters
[0], offset
, local_ax_count
, m
);
1742 ui::get()->set_match(multi_here
.get_error());
1745 * Free the level-of-detail structures
1748 final_clusters(scale_clusters
, scale_factor
, steps
);
1751 * Ensure that the match meets the threshold.
1754 if (threshold_ok(multi_here
.get_error())) {
1756 * Update alignment variables
1759 astate
->set_default(offset
);
1760 astate
->set_final(offset
);
1761 ui::get()->alignment_match_ok();
1762 } else if (local_gs
== 4) {
1765 * Align with outer starting points.
1769 * XXX: This probably isn't exactly the right thing to do,
1770 * since variables like old_initial_value have been overwritten.
1773 diff_stat_multi nested_result
= _align(m
, -1, astate
);
1775 if (threshold_ok(nested_result
.get_error())) {
1776 return nested_result
;
1777 } else if (nested_result
.get_error() < multi_here
.get_error()) {
1778 multi_here
= nested_result
;
1781 if (is_fail_default
)
1782 offset
= astate
->get_default();
1784 ui::get()->set_match(multi_here
.get_error());
1785 ui::get()->alignment_no_match();
1787 } else if (local_gs
== -1) {
1794 if (is_fail_default
)
1795 offset
= astate
->get_default();
1797 ui::get()->alignment_no_match();
1801 * Write the tonal registration multiplier as a comment.
1804 pixel trm
= image_rw::exp(m
).get_multiplier();
1805 tsave_trm(tsave
, trm
[0], trm
[1], trm
[2]);
1808 * Save the transformation information
1811 for (unsigned int index
= 0; index
< offset
.stack_depth(); index
++) {
1812 offset
.set_current_index(index
);
1814 tsave_next(tsave
, offset
, alignment_class
== 2,
1815 offset
.get_current_index() == 0);
1818 offset
.set_current_index(0);
1821 * Update match statistics.
1824 match_sum
+= (1 - multi_here
.get_error()) * (ale_accum
) 100;
1832 int ale_align(ale_context ac
, ale_image a
, ale_image b
, ale_trans start
,
1833 ale_align_properties align_properties
, ale_trans result
) {
1834 #warning function unfinished.