src/align, src/libale: Add functions for pos/neg inf calculation, and use these in...
[libale.git] / src / align.c
blob938cc98c5d28d0b7c2ecefa40f8f89f0e9a3c9ca
1 /*
2 * Copyright 2002, 2004, 2005, 2006, 2007, 2008, 2009 David Hilvert <dhilvert@gmail.com>
4 * This file is part of libale.
6 * libale is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU Affero General Public License as published by the Free
8 * Software Foundation, either version 3 of the License, or (at your option)
9 * any later version.
11 * libale is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 * FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
14 * more details.
16 * You should have received a copy of the GNU Affero General Public License
17 * along with libale. If not, see <http://www.gnu.org/licenses/>.
20 #include "libale.h"
23 * API Implementation.
27 * Types for scale clusters.
30 #if 0
33 * Experimental non-linear scaling approach.
36 struct nl_scale_cluster {
37 ale_image accum_max;
38 ale_image accum_min;
39 ale_image aweight_max;
40 ale_image aweight_min;
42 ale_pos input_scale;
43 ale_image input_certainty_max;
44 ale_image input_certainty_min;
45 ale_image input_max;
46 ale_image input_min;
48 #endif
50 struct scale_cluster {
51 ale_image accum;
52 ale_image aweight;
54 ale_pos input_scale;
55 ale_image input_certainty;
56 ale_image input;
58 #if 0
59 nl_scale_cluster *nl_scale_clusters;
60 #endif
64 static struct scale_cluster *init_clusters(ale_context ac, int frame, ale_pos scale_factor,
65 ale_image input_frame, ale_trans input_trans, ale_image reference_image,
66 ale_image alignment_weights, unsigned int steps, ale_filter interpolant) {
68 int input_bayer = ale_trans_get_bayer(input_trans);
69 ale_real input_gamma = ale_trans_get_gamma(input_trans);
70 ale_certainty input_certainty = ale_trans_retain_certainty(input_trans);
71 ale_real input_black = ale_trans_get_black(input_trans);
73 unsigned int step;
76 * Allocate memory for the array.
79 struct scale_cluster *scale_clusters =
80 (struct scale_cluster *) malloc(steps * sizeof(struct scale_cluster));
82 if (!scale_clusters)
83 return NULL;
86 * Prepare images and exclusion regions for the highest level
87 * of detail.
90 scale_clusters[0].accum = reference_image;
92 ale_context_signal(ac, "ui_constructing_lod_clusters", 0.0);
94 scale_clusters[0].input_scale = scale_factor;
95 if (scale_factor < 1.0 && interpolant == NULL)
96 scale_clusters[0].input = ale_scale_image(input_frame, scale_factor, ale_context_get_color_type(ac), input_gamma, input_bayer);
97 else
98 scale_clusters[0].input = input_frame;
100 scale_clusters[0].aweight = alignment_weights;
103 * Allocate and determine input frame certainty.
106 scale_clusters[0].input_certainty = ale_clone_image(scale_clusters[0].input, ale_context_get_certainty_type(ac));
108 ale_eval("MAP_PIXEL(%0I, p, CERTAINTY(%1t, p, %2f, GET_AFFINE_PIXEL(%3i, p, %4f)))",
109 scale_clusters[0].input_certainty, input_black, input_certainty,
110 scale_clusters[0].input, (scale_factor < 1.0 && interpolant == NULL) ? 1 : input_gamma);
112 #if 0
113 init_nl_cluster(&(scale_clusters[0]));
114 #endif
117 * Prepare reduced-detail images and exclusion
118 * regions.
121 for (step = 1; step < steps; step++) {
122 ale_context_signal(ac, "ui_constructing_lod_clusters", step);
123 scale_clusters[step].accum = ale_scale_image(scale_clusters[step - 1].accum, 0.5, ale_context_get_color_type(ac), 1, ALE_BAYER_NONE);
124 scale_clusters[step].aweight = ale_scale_image_wt(scale_clusters[step - 1].aweight, 0.5, ale_context_get_weight_type(ac), ALE_BAYER_NONE);
126 ale_pos sf = scale_clusters[step - 1].input_scale;
128 scale_clusters[step].input_scale = sf / 2;
130 if (sf >= 2.0 || interpolant != NULL) {
131 scale_clusters[step].input = scale_clusters[step - 1].input;
132 scale_clusters[step].input_certainty = scale_clusters[step - 1].input_certainty;
133 } else if (sf >= 1.0) {
134 scale_clusters[step].input = ale_scale_image(scale_clusters[step - 1].input, sf / 2, ale_context_get_color_type(ac), input_gamma, input_bayer);
135 scale_clusters[step].input_certainty = ale_scale_image_wt(scale_clusters[step - 1].input_certainty, sf / 2, ale_context_get_certainty_type(ac), input_bayer);
136 } else {
137 scale_clusters[step].input = ale_scale_image(scale_clusters[step - 1].input, 0.5, ale_context_get_color_type(ac), 1, ALE_BAYER_NONE);
138 scale_clusters[step].input_certainty = ale_scale_image_wt(scale_clusters[step - 1].input_certainty, 0.5, ale_context_get_certainty_type(ac), ALE_BAYER_NONE);
141 #if 0
142 init_nl_cluster(&(scale_clusters[step]));
143 #endif
146 ale_certainty_release(input_certainty);
148 return scale_clusters;
152 * Not-quite-symmetric difference function. Determines the difference in areas
153 * where the arrays overlap. Uses the reference array's notion of pixel positions.
155 * For the purposes of determining the difference, this function divides each
156 * pixel value by the corresponding image's average pixel magnitude, unless we
157 * are:
159 * a) Extending the boundaries of the image, or
161 * b) following the previous frame's transform
163 * If we are doing monte-carlo pixel sampling for alignment, we
164 * typically sample a subset of available pixels; otherwise, we sample
165 * all pixels.
169 typedef struct {
171 ale_trans offset;
173 ale_accum result;
174 ale_accum divisor;
176 point max, min;
177 ale_accum centroid[2], centroid_divisor;
178 ale_accum de_centroid[2], de_centroid_v, de_sum;
179 } run;
181 static void run_init(run *r) {
183 r->result = 0;
184 r->divisor = 0;
186 r->min = point_posinf(2);
187 r->max = point_neginf(2);
189 r->centroid[0] = 0;
190 r->centroid[1] = 0;
191 r->centroid_divisor = 0;
193 r->de_centroid[0] = 0;
194 r->de_centroid[1] = 0;
196 r->de_centroid_v = 0;
198 r->de_sum = 0;
201 static void run_init(diff_trans _offset) {
202 offset = _offset;
203 init();
207 * Required for STL sanity.
209 static run_run() : offset(diff_trans::eu_identity()) {
210 init();
213 static run_run(diff_trans _offset) : offset(_offset) {
214 init(_offset);
217 static void run_add(const run &_run) {
218 result += _run.result;
219 divisor += _run.divisor;
221 for (int d = 0; d < 2; d++) {
222 if (min[d] > _run.min[d])
223 min[d] = _run.min[d];
224 if (max[d] < _run.max[d])
225 max[d] = _run.max[d];
226 centroid[d] += _run.centroid[d];
227 de_centroid[d] += _run.de_centroid[d];
230 centroid_divisor += _run.centroid_divisor;
231 de_centroid_v += _run.de_centroid_v;
232 de_sum += _run.de_sum;
235 static run_run(const run &_run) : offset(_run.offset) {
238 * Initialize
240 init(_run.offset);
243 * Add
245 add(_run);
248 static run &run_operator=(const run &_run) {
251 * Initialize
253 init(_run.offset);
256 * Add
258 add(_run);
260 return *this;
263 static ale_accum run_get_error() const {
264 return pow(result / divisor, 1/(ale_accum) metric_exponent);
267 static void run_sample(int f, scale_cluster c, int i, int j, point t, point u,
268 const run &comparison) {
270 pixel pa = c.accum->get_pixel(i, j);
272 ale_real this_result[2] = { 0, 0 };
273 ale_real this_divisor[2] = { 0, 0 };
275 pixel p[2];
276 pixel weight[2];
277 weight[0] = pixel(1, 1, 1);
278 weight[1] = pixel(1, 1, 1);
280 pixel tm = offset.get_tonal_multiplier(point(i, j) + c.accum->offset());
282 if (interpolant != NULL) {
283 interpolant->filtered(i, j, &p[0], &weight[0], 1, f);
285 if (weight[0].min_norm() > ale_real_weight_floor) {
286 p[0] /= weight[0];
287 } else {
288 return;
291 } else {
292 p[0] = c.input->get_bl(t);
295 p[0] *= tm;
297 if (u.defined()) {
298 p[1] = c.input->get_bl(u);
299 p[1] *= tm;
304 * Handle certainty.
307 if (certainty_weights == 1) {
310 * For speed, use arithmetic interpolation (get_bl(.))
311 * instead of geometric (get_bl(., 1))
314 weight[0] *= c.input_certainty->get_bl(t);
315 if (u.defined())
316 weight[1] *= c.input_certainty->get_bl(u);
317 weight[0] *= c.certainty->get_pixel(i, j);
318 weight[1] *= c.certainty->get_pixel(i, j);
321 if (c.aweight != NULL) {
322 weight[0] *= c.aweight->get_pixel(i, j);
323 weight[1] *= c.aweight->get_pixel(i, j);
327 * Update sampling area statistics
330 if (min[0] > i)
331 min[0] = i;
332 if (min[1] > j)
333 min[1] = j;
334 if (max[0] < i)
335 max[0] = i;
336 if (max[1] < j)
337 max[1] = j;
339 centroid[0] += (weight[0][0] + weight[0][1] + weight[0][2]) * i;
340 centroid[1] += (weight[0][0] + weight[0][1] + weight[0][2]) * j;
341 centroid_divisor += (weight[0][0] + weight[0][1] + weight[0][2]);
344 * Determine alignment type.
347 for (int m = 0; m < (u.defined() ? 2 : 1); m++)
348 if (channel_alignment_type == 0) {
350 * Align based on all channels.
354 for (int k = 0; k < 3; k++) {
355 ale_real achan = pa[k];
356 ale_real bchan = p[m][k];
358 this_result[m] += weight[m][k] * pow(fabs(achan - bchan), metric_exponent);
359 this_divisor[m] += weight[m][k] * pow(achan > bchan ? achan : bchan, metric_exponent);
361 } else if (channel_alignment_type == 1) {
363 * Align based on the green channel.
366 ale_real achan = pa[1];
367 ale_real bchan = p[m][1];
369 this_result[m] = weight[m][1] * pow(fabs(achan - bchan), metric_exponent);
370 this_divisor[m] = weight[m][1] * pow(achan > bchan ? achan : bchan, metric_exponent);
371 } else if (channel_alignment_type == 2) {
373 * Align based on the sum of all channels.
376 ale_real asum = 0;
377 ale_real bsum = 0;
378 ale_real wsum = 0;
380 for (int k = 0; k < 3; k++) {
381 asum += pa[k];
382 bsum += p[m][k];
383 wsum += weight[m][k] / 3;
386 this_result[m] = wsum * pow(fabs(asum - bsum), metric_exponent);
387 this_divisor[m] = wsum * pow(asum > bsum ? asum : bsum, metric_exponent);
390 if (u.defined()) {
391 // ale_real de = fabs(this_result[0] / this_divisor[0]
392 // - this_result[1] / this_divisor[1]);
393 ale_real de = fabs(this_result[0] - this_result[1]);
395 de_centroid[0] += de * (ale_real) i;
396 de_centroid[1] += de * (ale_real) j;
398 de_centroid_v += de * (ale_real) t.lengthto(u);
400 de_sum += de;
403 result += (this_result[0]);
404 divisor += (this_divisor[0]);
407 static void run_rescale(ale_pos scale) {
408 offset.rescale(scale);
410 de_centroid[0] *= scale;
411 de_centroid[1] *= scale;
412 de_centroid_v *= scale;
415 static point run_get_centroid() {
416 point result = point(centroid[0] / centroid_divisor, centroid[1] / centroid_divisor);
418 assert (finite(centroid[0])
419 && finite(centroid[1])
420 && (result.defined() || centroid_divisor == 0));
422 return result;
425 static point run_get_error_centroid() {
426 point result = point(de_centroid[0] / de_sum, de_centroid[1] / de_sum);
427 return result;
431 static ale_pos run_get_error_perturb() {
432 ale_pos result = de_centroid_v / de_sum;
434 return result;
437 template<class diff_trans>
438 class diff_stat_generic {
440 transformation::elem_bounds_t elem_bounds;
443 * When non-empty, runs.front() is best, runs.back() is
444 * testing.
447 std::vector<run> runs;
450 * old_runs stores the latest available perturbation set for
451 * each multi-alignment element.
454 typedef int run_index;
455 std::map<run_index, run> old_runs;
457 static void *diff_subdomain(void *args);
459 struct subdomain_args {
460 struct scale_cluster c;
461 std::vector<run> runs;
462 int ax_count;
463 int f;
464 int i_min, i_max, j_min, j_max;
465 int subdomain;
468 struct scale_cluster si;
469 int ax_count;
470 int frame;
472 std::vector<ale_pos> perturb_multipliers;
474 public:
475 void diff(struct scale_cluster c, const diff_trans &t,
476 int _ax_count, int f) {
478 if (runs.size() == 2)
479 runs.pop_back();
481 runs.push_back(run(t));
483 si = c;
484 ax_count = _ax_count;
485 frame = f;
487 ui::get()->d2_align_sample_start();
489 if (interpolant != NULL) {
492 * XXX: This has not been tested, and may be completely
493 * wrong.
496 transformation tt = transformation::eu_identity();
497 tt.set_current_element(t);
498 interpolant->set_parameters(tt, c.input, c.accum->offset());
501 int N;
502 #ifdef USE_PTHREAD
503 N = thread::count();
505 pthread_t *threads = (pthread_t *) malloc(sizeof(pthread_t) * N);
506 pthread_attr_t *thread_attr = (pthread_attr_t *) malloc(sizeof(pthread_attr_t) * N);
508 #else
509 N = 1;
510 #endif
512 subdomain_args *args = new subdomain_args[N];
514 transformation::elem_bounds_int_t b = elem_bounds.scale_to_bounds(c.accum->height(), c.accum->width());
516 // fprintf(stdout, "[%d %d] [%d %d]\n",
517 // global_i_min, global_i_max, global_j_min, global_j_max);
519 for (int ti = 0; ti < N; ti++) {
520 args[ti].c = c;
521 args[ti].runs = runs;
522 args[ti].ax_count = ax_count;
523 args[ti].f = f;
524 args[ti].i_min = b.imin + ((b.imax - b.imin) * ti) / N;
525 args[ti].i_max = b.imin + ((b.imax - b.imin) * (ti + 1)) / N;
526 args[ti].j_min = b.jmin;
527 args[ti].j_max = b.jmax;
528 args[ti].subdomain = ti;
529 #ifdef USE_PTHREAD
530 pthread_attr_init(&thread_attr[ti]);
531 pthread_attr_setdetachstate(&thread_attr[ti], PTHREAD_CREATE_JOINABLE);
532 pthread_create(&threads[ti], &thread_attr[ti], diff_subdomain, &args[ti]);
533 #else
534 diff_subdomain(&args[ti]);
535 #endif
538 for (int ti = 0; ti < N; ti++) {
539 #ifdef USE_PTHREAD
540 pthread_join(threads[ti], NULL);
541 #endif
542 runs.back().add(args[ti].runs.back());
545 #ifdef USE_PTHREAD
546 free(threads);
547 free(thread_attr);
548 #endif
550 delete[] args;
552 ui::get()->d2_align_sample_stop();
556 private:
557 void rediff() {
558 std::vector<diff_trans> t_array;
560 for (unsigned int r = 0; r < runs.size(); r++) {
561 t_array.push_back(runs[r].offset);
564 runs.clear();
566 for (unsigned int r = 0; r < t_array.size(); r++)
567 diff(si, t_array[r], ax_count, frame);
571 public:
572 int better() {
573 assert(runs.size() >= 2);
574 assert(runs[0].offset.scale() == runs[1].offset.scale());
576 return (runs[1].get_error() < runs[0].get_error()
577 || (!finite(runs[0].get_error()) && finite(runs[1].get_error())));
580 int better_defined() {
581 assert(runs.size() >= 2);
582 assert(runs[0].offset.scale() == runs[1].offset.scale());
584 return (runs[1].get_error() < runs[0].get_error());
587 diff_stat_generic(transformation::elem_bounds_t e)
588 : runs(), old_runs(), perturb_multipliers() {
589 elem_bounds = e;
592 run_index get_run_index(unsigned int perturb_index) {
593 return perturb_index;
596 run &get_run(unsigned int perturb_index) {
597 run_index index = get_run_index(perturb_index);
599 assert(old_runs.count(index));
600 return old_runs[index];
603 void rescale(ale_pos scale, scale_cluster _si) {
604 assert(runs.size() == 1);
606 si = _si;
608 runs[0].rescale(scale);
610 rediff();
613 ~diff_stat_generic() {
616 diff_stat_generic &operator=(const diff_stat_generic &dst) {
618 * Copy run information.
620 runs = dst.runs;
621 old_runs = dst.old_runs;
624 * Copy diff variables
626 si = dst.si;
627 ax_count = dst.ax_count;
628 frame = dst.frame;
629 perturb_multipliers = dst.perturb_multipliers;
630 elem_bounds = dst.elem_bounds;
632 return *this;
635 diff_stat_generic(const diff_stat_generic &dst) : runs(), old_runs(),
636 perturb_multipliers() {
637 operator=(dst);
640 void set_elem_bounds(transformation::elem_bounds_t e) {
641 elem_bounds = e;
644 ale_accum get_result() {
645 assert(runs.size() == 1);
646 return runs[0].result;
649 ale_accum get_divisor() {
650 assert(runs.size() == 1);
651 return runs[0].divisor;
654 diff_trans get_offset() {
655 assert(runs.size() == 1);
656 return runs[0].offset;
659 int operator!=(diff_stat_generic &param) {
660 return (get_error() != param.get_error());
663 int operator==(diff_stat_generic &param) {
664 return !(operator!=(param));
667 ale_pos get_error_perturb() {
668 assert(runs.size() == 1);
669 return runs[0].get_error_perturb();
672 ale_accum get_error() const {
673 assert(runs.size() == 1);
674 return runs[0].get_error();
677 public:
679 * Get the set of transformations produced by a given perturbation
681 void get_perturb_set(std::vector<diff_trans> *set,
682 ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
683 ale_pos *current_bd, ale_pos *modified_bd,
684 std::vector<ale_pos> multipliers = std::vector<ale_pos>()) {
686 assert(runs.size() == 1);
688 diff_trans test_t(diff_trans::eu_identity());
691 * Translational or euclidean transformation
694 for (unsigned int i = 0; i < 2; i++)
695 for (unsigned int s = 0; s < 2; s++) {
697 if (!multipliers.size())
698 multipliers.push_back(1);
700 assert(finite(multipliers[0]));
702 test_t = get_offset();
704 // test_t.eu_modify(i, (s ? -adj_p : adj_p) * multipliers[0]);
705 test_t.translate((i ? point(1, 0) : point(0, 1))
706 * (s ? -adj_p : adj_p)
707 * multipliers[0]);
709 test_t.snap(adj_p / 2);
711 set->push_back(test_t);
712 multipliers.erase(multipliers.begin());
716 if (alignment_class > 0)
717 for (unsigned int s = 0; s < 2; s++) {
719 if (!multipliers.size())
720 multipliers.push_back(1);
722 assert(multipliers.size());
723 assert(finite(multipliers[0]));
725 if (!(adj_o * multipliers[0] < rot_max))
726 return;
728 ale_pos adj_s = (s ? 1 : -1) * adj_o * multipliers[0];
730 test_t = get_offset();
732 run_index ori = get_run_index(set->size());
733 point centroid = point::undefined();
735 if (!old_runs.count(ori))
736 ori = get_run_index(0);
738 if (!centroid.finite() && old_runs.count(ori)) {
739 centroid = old_runs[ori].get_error_centroid();
741 if (!centroid.finite())
742 centroid = old_runs[ori].get_centroid();
744 centroid *= test_t.scale()
745 / old_runs[ori].offset.scale();
748 if (!centroid.finite() && !test_t.is_projective()) {
749 test_t.eu_modify(2, adj_s);
750 } else if (!centroid.finite()) {
751 centroid = point(si.input->height() / 2,
752 si.input->width() / 2);
754 test_t.rotate(centroid + si.accum->offset(),
755 adj_s);
756 } else {
757 test_t.rotate(centroid + si.accum->offset(),
758 adj_s);
761 test_t.snap(adj_p / 2);
763 set->push_back(test_t);
764 multipliers.erase(multipliers.begin());
767 if (alignment_class == 2) {
770 * Projective transformation
773 for (unsigned int i = 0; i < 4; i++)
774 for (unsigned int j = 0; j < 2; j++)
775 for (unsigned int s = 0; s < 2; s++) {
777 if (!multipliers.size())
778 multipliers.push_back(1);
780 assert(multipliers.size());
781 assert(finite(multipliers[0]));
783 ale_pos adj_s = (s ? -1 : 1) * adj_p * multipliers [0];
785 test_t = get_offset();
787 if (perturb_type == 0)
788 test_t.gpt_modify_bounded(j, i, adj_s, elem_bounds.scale_to_bounds(si.accum->height(), si.accum->width()));
789 else if (perturb_type == 1)
790 test_t.gr_modify(j, i, adj_s);
791 else
792 assert(0);
794 test_t.snap(adj_p / 2);
796 set->push_back(test_t);
797 multipliers.erase(multipliers.begin());
803 * Barrel distortion
806 if (bda_mult != 0 && adj_b != 0) {
808 for (unsigned int d = 0; d < get_offset().bd_count(); d++)
809 for (unsigned int s = 0; s < 2; s++) {
811 if (!multipliers.size())
812 multipliers.push_back(1);
814 assert (multipliers.size());
815 assert (finite(multipliers[0]));
817 ale_pos adj_s = (s ? -1 : 1) * adj_b * multipliers[0];
819 if (bda_rate > 0 && fabs(modified_bd[d] + adj_s - current_bd[d]) > bda_rate)
820 continue;
822 diff_trans test_t = get_offset();
824 test_t.bd_modify(d, adj_s);
826 set->push_back(test_t);
831 void confirm() {
832 assert(runs.size() == 2);
833 runs[0] = runs[1];
834 runs.pop_back();
837 void discard() {
838 assert(runs.size() == 2);
839 runs.pop_back();
842 void perturb_test(ale_pos perturb, ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
843 ale_pos *current_bd, ale_pos *modified_bd, int stable) {
845 assert(runs.size() == 1);
847 std::vector<diff_trans> t_set;
849 if (perturb_multipliers.size() == 0) {
850 get_perturb_set(&t_set, adj_p, adj_o, adj_b,
851 current_bd, modified_bd);
853 for (unsigned int i = 0; i < t_set.size(); i++) {
854 diff_stat_generic test = *this;
856 test.diff(si, t_set[i], ax_count, frame);
858 test.confirm();
860 if (finite(test.get_error_perturb()))
861 perturb_multipliers.push_back(adj_p / test.get_error_perturb());
862 else
863 perturb_multipliers.push_back(1);
867 t_set.clear();
870 get_perturb_set(&t_set, adj_p, adj_o, adj_b, current_bd, modified_bd,
871 perturb_multipliers);
873 int found_unreliable = 1;
874 std::vector<int> tested(t_set.size(), 0);
876 for (unsigned int i = 0; i < t_set.size(); i++) {
877 run_index ori = get_run_index(i);
880 * Check for stability
882 if (stable
883 && old_runs.count(ori)
884 && old_runs[ori].offset == t_set[i])
885 tested[i] = 1;
888 std::vector<ale_pos> perturb_multipliers_original = perturb_multipliers;
890 while (found_unreliable) {
892 found_unreliable = 0;
894 for (unsigned int i = 0; i < t_set.size(); i++) {
896 if (tested[i])
897 continue;
899 diff(si, t_set[i], ax_count, frame);
901 if (!(i < perturb_multipliers.size())
902 || !finite(perturb_multipliers[i])) {
904 perturb_multipliers.resize(i + 1);
906 if (finite(perturb_multipliers[i])
907 && finite(adj_p)
908 && finite(adj_p / runs[1].get_error_perturb())) {
909 perturb_multipliers[i] =
910 adj_p / runs[1].get_error_perturb();
912 found_unreliable = 1;
913 } else
914 perturb_multipliers[i] = 1;
916 continue;
919 run_index ori = get_run_index(i);
921 if (old_runs.count(ori) == 0)
922 old_runs.insert(std::pair<run_index, run>(ori, runs[1]));
923 else
924 old_runs[ori] = runs[1];
926 if (finite(perturb_multipliers_original[i])
927 && finite(runs[1].get_error_perturb())
928 && finite(adj_p)
929 && finite(perturb_multipliers_original[i] * adj_p / runs[1].get_error_perturb()))
930 perturb_multipliers[i] = perturb_multipliers_original[i]
931 * adj_p / runs[1].get_error_perturb();
932 else
933 perturb_multipliers[i] = 1;
935 tested[i] = 1;
937 if (better()
938 && runs[1].get_error() < runs[0].get_error()
939 && perturb_multipliers[i]
940 / perturb_multipliers_original[i] < 2) {
941 runs[0] = runs[1];
942 runs.pop_back();
943 return;
948 if (runs.size() > 1)
949 runs.pop_back();
951 if (!found_unreliable)
952 return;
958 typedef diff_stat_generic<trans_single> diff_stat_t;
959 typedef diff_stat_generic<trans_multi> diff_stat_multi;
963 * Adjust exposure for an aligned frame B against reference A.
965 * Expects full-LOD images.
967 * Note: This method does not use any weighting, by certainty or
968 * otherwise, in the first exposure registration pass, as any bias of
969 * weighting according to color may also bias the exposure registration
970 * result; it does use weighting, including weighting by certainty
971 * (even if certainty weighting is not specified), in the second pass,
972 * under the assumption that weighting by certainty improves handling
973 * of out-of-range highlights, and that bias of exposure measurements
974 * according to color may generally be less harmful after spatial
975 * registration has been performed.
977 class exposure_ratio_iterate : public thread::decompose_domain {
978 pixel_accum *asums;
979 pixel_accum *bsums;
980 pixel_accum *asum;
981 pixel_accum *bsum;
982 struct scale_cluster c;
983 const transformation &t;
984 int ax_count;
985 int pass_number;
986 protected:
987 void prepare_subdomains(unsigned int N) {
988 asums = new pixel_accum[N];
989 bsums = new pixel_accum[N];
991 void subdomain_algorithm(unsigned int thread,
992 int i_min, int i_max, int j_min, int j_max) {
994 point offset = c.accum->offset();
996 for (mc_iterate m(i_min, i_max, j_min, j_max, thread); !m.done(); m++) {
998 unsigned int i = (unsigned int) m.get_i();
999 unsigned int j = (unsigned int) m.get_j();
1001 if (ref_excluded(i, j, offset, c.ax_parameters, ax_count))
1002 continue;
1005 * Transform
1008 struct point q;
1010 q = (c.input_scale < 1.0 && interpolant == NULL)
1011 ? t.scaled_inverse_transform(
1012 point(i + offset[0], j + offset[1]))
1013 : t.unscaled_inverse_transform(
1014 point(i + offset[0], j + offset[1]));
1017 * Check that the transformed coordinates are within
1018 * the boundaries of array c.input, that they are not
1019 * subject to exclusion, and that the weight value in
1020 * the accumulated array is nonzero.
1023 if (input_excluded(q[0], q[1], c.ax_parameters, ax_count))
1024 continue;
1026 if (q[0] >= 0
1027 && q[0] <= c.input->height() - 1
1028 && q[1] >= 0
1029 && q[1] <= c.input->width() - 1
1030 && ((pixel) c.certainty->get_pixel(i, j)).minabs_norm() != 0) {
1031 pixel a = c.accum->get_pixel(i, j);
1032 pixel b;
1034 b = c.input->get_bl(q);
1036 pixel weight = ((c.aweight && pass_number)
1037 ? (pixel) c.aweight->get_pixel(i, j)
1038 : pixel(1, 1, 1))
1039 * (pass_number
1040 ? psqrt(c.certainty->get_pixel(i, j)
1041 * c.input_certainty->get_bl(q, 1))
1042 : pixel(1, 1, 1));
1044 asums[thread] += a * weight;
1045 bsums[thread] += b * weight;
1050 void finish_subdomains(unsigned int N) {
1051 for (unsigned int n = 0; n < N; n++) {
1052 *asum += asums[n];
1053 *bsum += bsums[n];
1055 delete[] asums;
1056 delete[] bsums;
1058 public:
1059 exposure_ratio_iterate(pixel_accum *_asum,
1060 pixel_accum *_bsum,
1061 struct scale_cluster _c,
1062 const transformation &_t,
1063 int _ax_count,
1064 int _pass_number) : decompose_domain(0, _c.accum->height(),
1065 0, _c.accum->width()),
1066 t(_t) {
1068 asum = _asum;
1069 bsum = _bsum;
1070 c = _c;
1071 ax_count = _ax_count;
1072 pass_number = _pass_number;
1075 exposure_ratio_iterate(pixel_accum *_asum,
1076 pixel_accum *_bsum,
1077 struct scale_cluster _c,
1078 const transformation &_t,
1079 int _ax_count,
1080 int _pass_number,
1081 transformation::elem_bounds_int_t b) : decompose_domain(b.imin, b.imax,
1082 b.jmin, b.jmax),
1083 t(_t) {
1085 asum = _asum;
1086 bsum = _bsum;
1087 c = _c;
1088 ax_count = _ax_count;
1089 pass_number = _pass_number;
1093 static void set_exposure_ratio(unsigned int m, struct scale_cluster c,
1094 const transformation &t, int ax_count, int pass_number) {
1096 if (_exp_register == 2) {
1098 * Use metadata only.
1100 ale_real gain_multiplier = image_rw::exp(m).get_gain_multiplier();
1101 pixel multiplier = pixel(gain_multiplier, gain_multiplier, gain_multiplier);
1103 image_rw::exp(m).set_multiplier(multiplier);
1104 ui::get()->exp_multiplier(multiplier[0],
1105 multiplier[1],
1106 multiplier[2]);
1108 return;
1111 pixel_accum asum(0, 0, 0), bsum(0, 0, 0);
1113 exposure_ratio_iterate eri(&asum, &bsum, c, t, ax_count, pass_number);
1114 eri.run();
1116 // std::cerr << (asum / bsum) << " ";
1118 pixel_accum new_multiplier;
1120 new_multiplier = asum / bsum * image_rw::exp(m).get_multiplier();
1122 if (finite(new_multiplier[0])
1123 && finite(new_multiplier[1])
1124 && finite(new_multiplier[2])) {
1125 image_rw::exp(m).set_multiplier(new_multiplier);
1126 ui::get()->exp_multiplier(new_multiplier[0],
1127 new_multiplier[1],
1128 new_multiplier[2]);
1132 static diff_stat_t _align_element(ale_pos perturb, ale_pos local_lower,
1133 scale_cluster *scale_clusters, diff_stat_t here,
1134 ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
1135 ale_pos *current_bd, ale_pos *modified_bd,
1136 astate_t *astate, int lod, scale_cluster si) {
1139 * Run initial tests to get perturbation multipliers and error
1140 * centroids.
1143 std::vector<d2::trans_single> t_set;
1145 here.get_perturb_set(&t_set, adj_p, adj_o, adj_b, current_bd, modified_bd);
1147 int stable_count = 0;
1149 while (perturb >= local_lower) {
1151 ui::get()->alignment_dims(scale_clusters[lod].accum->height(), scale_clusters[lod].accum->width(),
1152 scale_clusters[lod].input->height(), scale_clusters[lod].input->width());
1155 * Orientational adjustment value in degrees.
1157 * Since rotational perturbation is now specified as an
1158 * arclength, we have to convert.
1161 ale_pos adj_o = 2 * (double) perturb
1162 / sqrt(pow(scale_clusters[0].input->height(), 2)
1163 + pow(scale_clusters[0].input->width(), 2))
1164 * 180
1165 / M_PI;
1168 * Barrel distortion adjustment value
1171 ale_pos adj_b = perturb * bda_mult;
1173 trans_single old_offset = here.get_offset();
1175 here.perturb_test(perturb, adj_p, adj_o, adj_b, current_bd, modified_bd,
1176 stable_count);
1178 if (here.get_offset() == old_offset)
1179 stable_count++;
1180 else
1181 stable_count = 0;
1183 if (stable_count == 3) {
1185 stable_count = 0;
1187 perturb *= 0.5;
1189 if (lod > 0
1190 && lod > lrint(log(perturb) / log(2)) - lod_preferred) {
1193 * Work with images twice as large
1196 lod--;
1197 si = scale_clusters[lod];
1200 * Rescale the transforms.
1203 ale_pos rescale_factor = (double) scale_factor
1204 / (double) pow(2, lod)
1205 / (double) here.get_offset().scale();
1207 here.rescale(rescale_factor, si);
1209 } else {
1210 adj_p = perturb / pow(2, lod);
1214 * Announce changes
1217 ui::get()->alignment_perturbation_level(perturb, lod);
1220 ui::get()->set_match(here.get_error());
1221 ui::get()->set_offset(here.get_offset());
1224 if (lod > 0) {
1225 ale_pos rescale_factor = (double) scale_factor
1226 / (double) here.get_offset().scale();
1228 here.rescale(rescale_factor, scale_clusters[0]);
1231 return here;
1235 * Align frame m against the reference.
1237 * XXX: the transformation class currently combines ordinary
1238 * transformations with scaling. This is somewhat convenient for
1239 * some things, but can also be confusing. This method, _align(), is
1240 * one case where special care must be taken to ensure that the scale
1241 * is always set correctly (by using the 'rescale' method).
1243 static diff_stat_multi _align(int m, int local_gs, astate_t *astate) {
1245 ale_image input_frame = astate->get_input_frame();
1248 * Local upper/lower data, possibly dependent on image
1249 * dimensions.
1252 ale_pos local_lower, local_upper;
1253 ale_accum local_gs_mo;
1256 * Select the minimum dimension as the reference.
1259 ale_pos reference_size = input_frame->height();
1260 if (input_frame->width() < reference_size)
1261 reference_size = input_frame->width();
1262 ale_accum reference_area = input_frame->height()
1263 * input_frame->width();
1265 if (perturb_lower_percent)
1266 local_lower = (double) perturb_lower
1267 * (double) reference_size
1268 * (double) 0.01
1269 * (double) scale_factor;
1270 else
1271 local_lower = perturb_lower;
1273 if (perturb_upper_percent)
1274 local_upper = (double) perturb_upper
1275 * (double) reference_size
1276 * (double) 0.01
1277 * (double) scale_factor;
1278 else
1279 local_upper = perturb_upper;
1281 local_upper = pow(2, floor(log(local_upper) / log(2)));
1283 if (gs_mo_percent)
1284 local_gs_mo = (double) _gs_mo
1285 * (double) reference_area
1286 * (double) 0.01
1287 * (double) scale_factor;
1288 else
1289 local_gs_mo = _gs_mo;
1292 * Logarithms aren't exact, so we divide repeatedly to discover
1293 * how many steps will occur, and pass this information to the
1294 * user interface.
1297 int step_count = 0;
1298 double step_variable = local_upper;
1299 while (step_variable >= local_lower) {
1300 step_variable /= 2;
1301 step_count++;
1304 ale_pos perturb = local_upper;
1306 if (_keep) {
1307 kept_t[latest] = latest_t;
1308 kept_ok[latest] = latest_ok;
1312 * Determine how many levels of detail should be prepared, by
1313 * calculating the initial (largest) value for the
1314 * level-of-detail variable.
1317 int lod = lrint(log(perturb) / log(2)) - lod_preferred;
1319 if (lod < 0)
1320 lod = 0;
1322 while (lod > 0 && (reference_image->width() < pow(2, lod) * min_dimension
1323 || reference_image->height() < pow(2, lod) * min_dimension))
1324 lod--;
1326 unsigned int steps = (unsigned int) lod + 1;
1329 * Prepare multiple levels of detail.
1332 int local_ax_count;
1333 struct scale_cluster *scale_clusters = init_clusters(m,
1334 scale_factor, input_frame, steps,
1335 &local_ax_count);
1337 #error add check for non-NULL return
1340 * Initialize the default initial transform
1343 astate->init_default();
1346 * Set the default transformation.
1349 transformation offset = astate->get_default();
1352 * Establish boundaries
1355 offset.set_current_bounds(reference_image);
1357 ui::get()->alignment_degree_max(offset.get_coordinate(offset.stack_depth() - 1).degree);
1359 if (offset.stack_depth() == 1) {
1360 ui::get()->set_steps(step_count, 0);
1361 } else {
1362 ui::get()->set_steps(offset.get_coordinate(offset.stack_depth() - 1).degree + 1, 1);
1366 * Load any file-specified transformations
1369 for (unsigned int index = 0; index < offset.stack_depth(); index++) {
1370 int is_default = 1;
1371 unsigned int index_2;
1372 offset.set_current_index(index);
1374 offset = tload_next(tload, alignment_class == 2,
1375 offset,
1376 &is_default, offset.get_current_index() == 0);
1378 index_2 = offset.get_current_index();
1380 if (index_2 > index) {
1381 for (unsigned int index_3 = index; index_3 < index_2; index_3++)
1382 astate->set_is_default(index_3, 1);
1384 index = index_2;
1387 astate->set_is_default(index, is_default);
1390 offset.set_current_index(0);
1392 astate->init_frame_alignment_primary(&offset, lod, perturb);
1395 * Control point alignment
1398 if (local_gs == 5) {
1400 transformation o = offset;
1403 * Determine centroid data
1406 point current, previous;
1407 centroids(m, &current, &previous);
1409 if (current.defined() && previous.defined()) {
1410 o = orig_t;
1411 o.set_dimensions(input_frame);
1412 o.translate((previous - current) * o.scale());
1413 current = previous;
1417 * Determine rotation for alignment classes other than translation.
1420 ale_pos lowest_error = cp_rms_error(m, o);
1422 ale_pos rot_lower = 2 * (double) local_lower
1423 / sqrt(pow(scale_clusters[0].input->height(), 2)
1424 + pow(scale_clusters[0].input->width(), 2))
1425 * 180
1426 / M_PI;
1428 if (alignment_class > 0)
1429 for (double rot = 30; rot > rot_lower; rot /= 2)
1430 for (double srot = -rot; srot < rot * 1.5; srot += rot * 2) {
1431 int is_improved = 1;
1432 while (is_improved) {
1433 is_improved = 0;
1434 transformation test_t = o;
1436 * XXX: is this right?
1438 test_t.rotate(current * o.scale(), srot);
1439 ale_pos test_v = cp_rms_error(m, test_t);
1441 if (test_v < lowest_error) {
1442 lowest_error = test_v;
1443 o = test_t;
1444 srot += 3 * rot;
1445 is_improved = 1;
1451 * Determine projective parameters through a local
1452 * minimum search.
1455 if (alignment_class == 2) {
1456 ale_pos adj_p = lowest_error;
1458 if (adj_p < local_lower)
1459 adj_p = local_lower;
1461 while (adj_p >= local_lower) {
1462 transformation test_t = o;
1463 int is_improved = 1;
1464 ale_pos test_v;
1465 ale_pos adj_s;
1467 while (is_improved) {
1468 is_improved = 0;
1470 for (int i = 0; i < 4; i++)
1471 for (int j = 0; j < 2; j++)
1472 for (adj_s = -adj_p; adj_s <= adj_p; adj_s += 2 * adj_p) {
1474 test_t = o;
1476 if (perturb_type == 0)
1477 test_t.gpt_modify(j, i, adj_s);
1478 else if (perturb_type == 1)
1479 test_t.gr_modify(j, i, adj_s);
1480 else
1481 assert(0);
1483 test_v = cp_rms_error(m, test_t);
1485 if (test_v < lowest_error) {
1486 lowest_error = test_v;
1487 o = test_t;
1488 adj_s += 3 * adj_p;
1489 is_improved = 1;
1493 adj_p /= 2;
1499 * Pre-alignment exposure adjustment
1502 if (_exp_register) {
1503 ui::get()->exposure_1();
1504 set_exposure_ratio(m, scale_clusters[0], offset, local_ax_count, 0);
1508 * Scale transform for lod
1511 for (int lod_ = 0; lod_ < lod; lod_++) {
1512 transformation s = offset;
1513 transformation t = offset;
1515 t.rescale(1 / (double) 2);
1517 if (!(t.scaled_height() > 0 && t.scaled_height() < s.scaled_height())
1518 || !(t.scaled_width() > 0 && t.scaled_width() < s.scaled_width())) {
1519 perturb /= pow(2, lod - lod_);
1520 lod = lod_;
1521 break;
1522 } else {
1523 offset = t;
1527 ui::get()->set_offset(offset);
1529 struct scale_cluster si = scale_clusters[lod];
1532 * Projective adjustment value
1535 ale_pos adj_p = perturb / pow(2, lod);
1538 * Orientational adjustment value in degrees.
1540 * Since rotational perturbation is now specified as an
1541 * arclength, we have to convert.
1544 ale_pos adj_o = (double) 2 * (double) perturb
1545 / sqrt(pow((double) scale_clusters[0].input->height(), (double) 2)
1546 + pow((double) scale_clusters[0].input->width(), (double) 2))
1547 * (double) 180
1548 / M_PI;
1551 * Barrel distortion adjustment value
1554 ale_pos adj_b = perturb * bda_mult;
1557 * Global search overlap requirements.
1560 local_gs_mo = (double) local_gs_mo / pow(pow(2, lod), 2);
1563 * Alignment statistics.
1566 diff_stat_t here(offset.elem_bounds());
1569 * Current difference (error) value
1572 ui::get()->prematching();
1573 here.diff(si, offset.get_current_element(), local_ax_count, m);
1574 ui::get()->set_match(here.get_error());
1577 * Current and modified barrel distortion parameters
1580 ale_pos current_bd[BARREL_DEGREE];
1581 ale_pos modified_bd[BARREL_DEGREE];
1582 offset.bd_get(current_bd);
1583 offset.bd_get(modified_bd);
1586 * Translational global search step
1589 if (perturb >= local_lower && local_gs != 0 && local_gs != 5
1590 && (local_gs != 6 || astate->get_is_default(0))) {
1592 ui::get()->global_alignment(perturb, lod);
1593 ui::get()->gs_mo(local_gs_mo);
1595 test_globals(&here, si, offset, local_gs, adj_p,
1596 local_ax_count, m, local_gs_mo, perturb);
1598 ui::get()->set_match(here.get_error());
1599 ui::get()->set_offset(here.get_offset());
1603 * Perturbation adjustment loop.
1606 offset.set_current_element(here.get_offset());
1608 for (unsigned int i = 0; i < offset.stack_depth(); i++) {
1610 ui::get()->aligning_element(i, offset.stack_depth());
1612 offset.set_current_index(i);
1614 ui::get()->start_multi_alignment_element(offset);
1616 ui::get()->set_offset(offset);
1618 if (i > 0) {
1619 astate->init_frame_alignment_nonprimary(&offset, lod, perturb, i);
1621 if (_exp_register == 1) {
1622 ui::get()->exposure_1();
1623 pixel_accum asum(0, 0, 0), bsum(0, 0, 0);
1624 exposure_ratio_iterate eri(&asum, &bsum, scale_clusters[0], offset, local_ax_count, 0,
1625 offset.elem_bounds().scale_to_bounds(scale_clusters[0].accum->height(),
1626 scale_clusters[0].accum->width()));
1628 eri.run();
1629 pixel_accum tr = asum / bsum;
1630 ui::get()->exp_multiplier(tr[0], tr[1], tr[2]);
1631 offset.set_tonal_multiplier(tr);
1635 int e_lod = lod;
1636 int e_div = offset.get_current_coordinate().degree;
1637 ale_pos e_perturb = perturb;
1638 ale_pos e_adj_p = adj_p;
1639 ale_pos e_adj_b = adj_b;
1641 for (int d = 0; d < e_div; d++) {
1642 e_adj_b = 0;
1643 e_perturb *= 0.5;
1644 if (e_lod > 0) {
1645 e_lod--;
1646 } else {
1647 e_adj_p *= 0.5;
1651 if (i > 0) {
1653 d2::trans_multi::elem_bounds_t b = offset.elem_bounds();
1655 for (int dim_satisfied = 0; e_lod > 0 && !dim_satisfied; ) {
1656 int height = scale_clusters[e_lod].accum->height();
1657 int width = scale_clusters[e_lod].accum->width();
1659 d2::trans_multi::elem_bounds_int_t bi = b.scale_to_bounds(height, width);
1661 dim_satisfied = bi.satisfies_min_dim(min_dimension);
1663 if (!dim_satisfied) {
1664 e_lod--;
1665 e_adj_p *= 2;
1670 * Scale transform for lod
1673 for (int lod_ = 0; lod_ < e_lod; lod_++) {
1674 trans_single s = offset.get_element(i);
1675 trans_single t = offset.get_element(i);
1677 t.rescale(1 / (double) 2);
1679 if (!(t.scaled_height() > 0 && t.scaled_height() < s.scaled_height())
1680 || !(t.scaled_width() > 0 && t.scaled_width() < s.scaled_width())) {
1681 e_perturb /= pow(2, e_lod - lod_);
1682 e_lod = lod_;
1683 break;
1684 } else {
1685 offset.set_element(i, t);
1689 ui::get()->set_offset(offset);
1693 * Announce perturbation size
1696 ui::get()->aligning(e_perturb, e_lod);
1698 si = scale_clusters[e_lod];
1700 here.set_elem_bounds(offset.elem_bounds());
1702 here.diff(si, offset.get_current_element(), local_ax_count, m);
1704 here.confirm();
1706 here = check_ancestor_path(offset, si, here, local_ax_count, m);
1708 here = _align_element(e_perturb, local_lower, scale_clusters,
1709 here, e_adj_p, adj_o, e_adj_b, current_bd, modified_bd,
1710 astate, e_lod, si);
1712 offset.rescale(here.get_offset().scale() / offset.scale());
1714 offset.set_current_element(here.get_offset());
1716 if (i > 0 && _exp_register == 1) {
1717 if (ma_cert_satisfied(scale_clusters[0], offset, i)) {
1718 ui::get()->exposure_2();
1719 pixel_accum asum(0, 0, 0), bsum(0, 0, 0);
1720 exposure_ratio_iterate eri(&asum, &bsum, scale_clusters[0], offset, local_ax_count, 1,
1721 offset.elem_bounds().scale_to_bounds(scale_clusters[0].accum->height(),
1722 scale_clusters[0].accum->width()));
1724 eri.run();
1725 pixel_accum tr = asum / bsum;
1726 ui::get()->exp_multiplier(tr[0], tr[1], tr[2]);
1727 offset.set_tonal_multiplier(tr);
1728 } else {
1729 offset.set_tonal_multiplier(offset.get_element(offset.parent_index(i)).get_tonal_multiplier(point(0, 0)));
1731 } else if (_exp_register == 1) {
1732 ui::get()->exposure_2();
1733 set_exposure_ratio(m, scale_clusters[0], offset, local_ax_count, 1);
1736 ui::get()->set_offset(offset);
1738 if (i + 1 == offset.stack_depth())
1739 ui::get()->alignment_degree_complete(offset.get_coordinate(i).degree);
1740 else if (offset.get_coordinate(i).degree != offset.get_coordinate(i + 1).degree)
1741 ui::get()->alignment_degree_complete(offset.get_coordinate(i + 1).degree);
1744 offset.set_current_index(0);
1746 ui::get()->multi();
1747 offset.set_multi(reference_image, input_frame);
1750 * Recalculate error on whole frame.
1753 ui::get()->postmatching();
1754 diff_stat_generic<transformation> multi_here(offset.elem_bounds());
1755 multi_here.diff(scale_clusters[0], offset, local_ax_count, m);
1756 ui::get()->set_match(multi_here.get_error());
1759 * Free the level-of-detail structures
1762 final_clusters(scale_clusters, scale_factor, steps);
1765 * Ensure that the match meets the threshold.
1768 if (threshold_ok(multi_here.get_error())) {
1770 * Update alignment variables
1772 latest_ok = 1;
1773 astate->set_default(offset);
1774 astate->set_final(offset);
1775 ui::get()->alignment_match_ok();
1776 } else if (local_gs == 4) {
1779 * Align with outer starting points.
1783 * XXX: This probably isn't exactly the right thing to do,
1784 * since variables like old_initial_value have been overwritten.
1787 diff_stat_multi nested_result = _align(m, -1, astate);
1789 if (threshold_ok(nested_result.get_error())) {
1790 return nested_result;
1791 } else if (nested_result.get_error() < multi_here.get_error()) {
1792 multi_here = nested_result;
1795 if (is_fail_default)
1796 offset = astate->get_default();
1798 ui::get()->set_match(multi_here.get_error());
1799 ui::get()->alignment_no_match();
1801 } else if (local_gs == -1) {
1803 latest_ok = 0;
1804 latest_t = offset;
1805 return multi_here;
1807 } else {
1808 if (is_fail_default)
1809 offset = astate->get_default();
1810 latest_ok = 0;
1811 ui::get()->alignment_no_match();
1815 * Write the tonal registration multiplier as a comment.
1818 pixel trm = image_rw::exp(m).get_multiplier();
1819 tsave_trm(tsave, trm[0], trm[1], trm[2]);
1822 * Save the transformation information
1825 for (unsigned int index = 0; index < offset.stack_depth(); index++) {
1826 offset.set_current_index(index);
1828 tsave_next(tsave, offset, alignment_class == 2,
1829 offset.get_current_index() == 0);
1832 offset.set_current_index(0);
1835 * Update match statistics.
1838 match_sum += (1 - multi_here.get_error()) * (ale_accum) 100;
1839 match_count++;
1840 latest = m;
1841 latest_t = offset;
1843 return multi_here;
1846 int ale_align(ale_context ac, ale_image a, ale_image b, ale_trans start,
1847 ale_align_properties align_properties, ale_trans result) {
1848 #warning function unfinished.