include/ale: Disable ALE_POINTER table offsets for now, and explain the TABLE alternative
[libale.git] / src / align.c
blob5eb6558b8b9e0bc42c22e68eac36336f450a5177
1 /*
2 * Copyright 2002, 2004, 2005, 2006, 2007, 2008, 2009 David Hilvert <dhilvert@gmail.com>
4 * This file is part of libale.
6 * libale is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU Affero General Public License as published by the Free
8 * Software Foundation, either version 3 of the License, or (at your option)
9 * any later version.
11 * libale is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 * FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
14 * more details.
16 * You should have received a copy of the GNU Affero General Public License
17 * along with libale. If not, see <http://www.gnu.org/licenses/>.
20 #include <math.h>
22 #include "libale.h"
25 * API Implementation.
29 * Types for scale clusters.
32 #if 0
35 * Experimental non-linear scaling approach.
38 struct nl_scale_cluster {
39 ale_image accum_max;
40 ale_image accum_min;
41 ale_image aweight_max;
42 ale_image aweight_min;
44 ale_pos input_scale;
45 ale_image input_certainty_max;
46 ale_image input_certainty_min;
47 ale_image input_max;
48 ale_image input_min;
50 #endif
52 struct scale_cluster {
53 ale_image accum;
54 ale_image aweight;
56 ale_pos input_scale;
57 ale_image input_certainty;
58 ale_image input;
60 #if 0
61 nl_scale_cluster *nl_scale_clusters;
62 #endif
66 static struct scale_cluster *init_clusters(ale_context ac, int frame, ale_pos scale_factor,
67 ale_image input_frame, ale_trans input_trans, ale_image reference_image,
68 ale_image alignment_weights, unsigned int steps, ale_filter interpolant) {
70 int input_bayer = ale_trans_get_bayer(input_trans);
71 ale_real input_gamma = ale_trans_get_gamma(input_trans);
72 ale_certainty input_certainty = ale_trans_retain_certainty(input_trans);
73 ale_real input_black = ale_trans_get_black(input_trans);
75 unsigned int step;
78 * Allocate memory for the array.
81 struct scale_cluster *scale_clusters =
82 (struct scale_cluster *) malloc(steps * sizeof(struct scale_cluster));
84 if (!scale_clusters)
85 return NULL;
88 * Prepare images and exclusion regions for the highest level
89 * of detail.
92 scale_clusters[0].accum = reference_image;
94 ale_context_signal(ac, "ui_constructing_lod_clusters", 0.0);
96 scale_clusters[0].input_scale = scale_factor;
97 if (scale_factor < 1.0 && interpolant == NULL)
98 scale_clusters[0].input = ale_scale_image(input_frame, scale_factor, ale_context_get_color_type(ac), input_gamma, input_bayer);
99 else
100 scale_clusters[0].input = input_frame;
102 scale_clusters[0].aweight = alignment_weights;
105 * Allocate and determine input frame certainty.
108 scale_clusters[0].input_certainty = ale_clone_image(scale_clusters[0].input, ale_context_get_certainty_type(ac));
110 ale_eval("MAP_PIXEL(%0I, p, CERTAINTY(%1t, p, %2f, GET_AFFINE_PIXEL(%3i, p, %4f)))",
111 scale_clusters[0].input_certainty, input_black, input_certainty,
112 scale_clusters[0].input, (scale_factor < 1.0 && interpolant == NULL) ? 1 : input_gamma);
114 #if 0
115 init_nl_cluster(&(scale_clusters[0]));
116 #endif
119 * Prepare reduced-detail images and exclusion
120 * regions.
123 for (step = 1; step < steps; step++) {
124 ale_context_signal(ac, "ui_constructing_lod_clusters", step);
125 scale_clusters[step].accum = ale_scale_image(scale_clusters[step - 1].accum, 0.5, ale_context_get_color_type(ac), 1, ALE_BAYER_NONE);
126 scale_clusters[step].aweight = ale_scale_image_wt(scale_clusters[step - 1].aweight, 0.5, ale_context_get_weight_type(ac), ALE_BAYER_NONE);
128 ale_pos sf = scale_clusters[step - 1].input_scale;
130 scale_clusters[step].input_scale = sf / 2;
132 if (sf >= 2.0 || interpolant != NULL) {
133 scale_clusters[step].input = scale_clusters[step - 1].input;
134 scale_clusters[step].input_certainty = scale_clusters[step - 1].input_certainty;
135 } else if (sf >= 1.0) {
136 scale_clusters[step].input = ale_scale_image(scale_clusters[step - 1].input, sf / 2, ale_context_get_color_type(ac), input_gamma, input_bayer);
137 scale_clusters[step].input_certainty = ale_scale_image_wt(scale_clusters[step - 1].input_certainty, sf / 2, ale_context_get_certainty_type(ac), input_bayer);
138 } else {
139 scale_clusters[step].input = ale_scale_image(scale_clusters[step - 1].input, 0.5, ale_context_get_color_type(ac), 1, ALE_BAYER_NONE);
140 scale_clusters[step].input_certainty = ale_scale_image_wt(scale_clusters[step - 1].input_certainty, 0.5, ale_context_get_certainty_type(ac), ALE_BAYER_NONE);
143 #if 0
144 init_nl_cluster(&(scale_clusters[step]));
145 #endif
148 ale_certainty_release(input_certainty);
150 return scale_clusters;
154 * Not-quite-symmetric difference function. Determines the difference in areas
155 * where the arrays overlap. Uses the reference array's notion of pixel positions.
157 * For the purposes of determining the difference, this function divides each
158 * pixel value by the corresponding image's average pixel magnitude, unless we
159 * are:
161 * a) Extending the boundaries of the image, or
163 * b) following the previous frame's transform
165 * If we are doing monte-carlo pixel sampling for alignment, we
166 * typically sample a subset of available pixels; otherwise, we sample
167 * all pixels.
171 ALE_STRUCT(_run, \
172 ale_trans offset; \
173 ale_accum result; \
174 ale_accum divisor; \
175 point max, min; \
176 ale_accum centroid[2], centroid_divisor; \
177 ale_accum de_centroid[2], de_centroid_v, de_sum; \
180 typedef struct _run run;
182 static void run_init(run *r, ale_trans offset) {
184 r->result = 0;
185 r->divisor = 0;
187 r->min = point_posinf(2);
188 r->max = point_neginf(2);
190 r->centroid[0] = 0;
191 r->centroid[1] = 0;
192 r->centroid_divisor = 0;
194 r->de_centroid[0] = 0;
195 r->de_centroid[1] = 0;
197 r->de_centroid_v = 0;
199 r->de_sum = 0;
201 r->offset = offset;
204 static void run_add(run *r, const run *s) {
205 int d;
207 r->result += s->result;
208 r->divisor += s->divisor;
210 for (d = 0; d < 2; d++) {
211 if (r->min.x[d] > s->min.x[d])
212 r->min.x[d] = s->min.x[d];
213 if (r->max.x[d] < s->max.x[d])
214 r->max.x[d] = s->max.x[d];
215 r->centroid[d] += s->centroid[d];
216 r->de_centroid[d] += s->de_centroid[d];
219 r->centroid_divisor += s->centroid_divisor;
220 r->de_centroid_v += s->de_centroid_v;
221 r->de_sum += s->de_sum;
224 static ale_accum run_get_error(run *r, ale_real metric_exponent) {
225 return pow(r->result / r->divisor, 1/(ale_accum) metric_exponent);
228 static void run_sample(int f, scale_cluster c, int i, int j, point t, point u) {
230 pixel pa = c.accum->get_pixel(i, j);
232 ale_real this_result[2] = { 0, 0 };
233 ale_real this_divisor[2] = { 0, 0 };
235 pixel p[2];
236 pixel weight[2];
237 weight[0] = pixel(1, 1, 1);
238 weight[1] = pixel(1, 1, 1);
240 pixel tm = offset.get_tonal_multiplier(point(i, j) + c.accum->offset());
242 if (interpolant != NULL) {
243 interpolant->filtered(i, j, &p[0], &weight[0], 1, f);
245 if (weight[0].min_norm() > ale_real_weight_floor) {
246 p[0] /= weight[0];
247 } else {
248 return;
251 } else {
252 p[0] = c.input->get_bl(t);
255 p[0] *= tm;
257 if (u.defined()) {
258 p[1] = c.input->get_bl(u);
259 p[1] *= tm;
264 * Handle certainty.
267 if (certainty_weights == 1) {
270 * For speed, use arithmetic interpolation (get_bl(.))
271 * instead of geometric (get_bl(., 1))
274 weight[0] *= c.input_certainty->get_bl(t);
275 if (u.defined())
276 weight[1] *= c.input_certainty->get_bl(u);
277 weight[0] *= c.certainty->get_pixel(i, j);
278 weight[1] *= c.certainty->get_pixel(i, j);
281 if (c.aweight != NULL) {
282 weight[0] *= c.aweight->get_pixel(i, j);
283 weight[1] *= c.aweight->get_pixel(i, j);
287 * Update sampling area statistics
290 if (min[0] > i)
291 min[0] = i;
292 if (min[1] > j)
293 min[1] = j;
294 if (max[0] < i)
295 max[0] = i;
296 if (max[1] < j)
297 max[1] = j;
299 centroid[0] += (weight[0][0] + weight[0][1] + weight[0][2]) * i;
300 centroid[1] += (weight[0][0] + weight[0][1] + weight[0][2]) * j;
301 centroid_divisor += (weight[0][0] + weight[0][1] + weight[0][2]);
304 * Determine alignment type.
307 for (int m = 0; m < (u.defined() ? 2 : 1); m++)
308 if (channel_alignment_type == 0) {
310 * Align based on all channels.
314 for (int k = 0; k < 3; k++) {
315 ale_real achan = pa[k];
316 ale_real bchan = p[m][k];
318 this_result[m] += weight[m][k] * pow(fabs(achan - bchan), metric_exponent);
319 this_divisor[m] += weight[m][k] * pow(achan > bchan ? achan : bchan, metric_exponent);
321 } else if (channel_alignment_type == 1) {
323 * Align based on the green channel.
326 ale_real achan = pa[1];
327 ale_real bchan = p[m][1];
329 this_result[m] = weight[m][1] * pow(fabs(achan - bchan), metric_exponent);
330 this_divisor[m] = weight[m][1] * pow(achan > bchan ? achan : bchan, metric_exponent);
331 } else if (channel_alignment_type == 2) {
333 * Align based on the sum of all channels.
336 ale_real asum = 0;
337 ale_real bsum = 0;
338 ale_real wsum = 0;
340 for (int k = 0; k < 3; k++) {
341 asum += pa[k];
342 bsum += p[m][k];
343 wsum += weight[m][k] / 3;
346 this_result[m] = wsum * pow(fabs(asum - bsum), metric_exponent);
347 this_divisor[m] = wsum * pow(asum > bsum ? asum : bsum, metric_exponent);
350 if (u.defined()) {
351 // ale_real de = fabs(this_result[0] / this_divisor[0]
352 // - this_result[1] / this_divisor[1]);
353 ale_real de = fabs(this_result[0] - this_result[1]);
355 de_centroid[0] += de * (ale_real) i;
356 de_centroid[1] += de * (ale_real) j;
358 de_centroid_v += de * (ale_real) t.lengthto(u);
360 de_sum += de;
363 result += (this_result[0]);
364 divisor += (this_divisor[0]);
367 static void run_rescale(ale_pos scale) {
368 offset.rescale(scale);
370 de_centroid[0] *= scale;
371 de_centroid[1] *= scale;
372 de_centroid_v *= scale;
375 static point run_get_centroid() {
376 point result = point(centroid[0] / centroid_divisor, centroid[1] / centroid_divisor);
378 assert (finite(centroid[0])
379 && finite(centroid[1])
380 && (result.defined() || centroid_divisor == 0));
382 return result;
385 static point run_get_error_centroid() {
386 point result = point(de_centroid[0] / de_sum, de_centroid[1] / de_sum);
387 return result;
391 static ale_pos run_get_error_perturb() {
392 ale_pos result = de_centroid_v / de_sum;
394 return result;
397 template<class diff_trans>
398 class diff_stat_generic {
400 transformation::elem_bounds_t elem_bounds;
403 * When non-empty, runs.front() is best, runs.back() is
404 * testing.
407 std::vector<run> runs;
410 * old_runs stores the latest available perturbation set for
411 * each multi-alignment element.
414 typedef int run_index;
415 std::map<run_index, run> old_runs;
417 static void *diff_subdomain(void *args);
419 struct subdomain_args {
420 struct scale_cluster c;
421 std::vector<run> runs;
422 int ax_count;
423 int f;
424 int i_min, i_max, j_min, j_max;
425 int subdomain;
428 struct scale_cluster si;
429 int ax_count;
430 int frame;
432 std::vector<ale_pos> perturb_multipliers;
434 public:
435 void diff(struct scale_cluster c, const diff_trans &t,
436 int _ax_count, int f) {
438 if (runs.size() == 2)
439 runs.pop_back();
441 runs.push_back(run(t));
443 si = c;
444 ax_count = _ax_count;
445 frame = f;
447 ui::get()->d2_align_sample_start();
449 if (interpolant != NULL) {
452 * XXX: This has not been tested, and may be completely
453 * wrong.
456 transformation tt = transformation::eu_identity();
457 tt.set_current_element(t);
458 interpolant->set_parameters(tt, c.input, c.accum->offset());
461 int N;
462 #ifdef USE_PTHREAD
463 N = thread::count();
465 pthread_t *threads = (pthread_t *) malloc(sizeof(pthread_t) * N);
466 pthread_attr_t *thread_attr = (pthread_attr_t *) malloc(sizeof(pthread_attr_t) * N);
468 #else
469 N = 1;
470 #endif
472 subdomain_args *args = new subdomain_args[N];
474 transformation::elem_bounds_int_t b = elem_bounds.scale_to_bounds(c.accum->height(), c.accum->width());
476 // fprintf(stdout, "[%d %d] [%d %d]\n",
477 // global_i_min, global_i_max, global_j_min, global_j_max);
479 for (int ti = 0; ti < N; ti++) {
480 args[ti].c = c;
481 args[ti].runs = runs;
482 args[ti].ax_count = ax_count;
483 args[ti].f = f;
484 args[ti].i_min = b.imin + ((b.imax - b.imin) * ti) / N;
485 args[ti].i_max = b.imin + ((b.imax - b.imin) * (ti + 1)) / N;
486 args[ti].j_min = b.jmin;
487 args[ti].j_max = b.jmax;
488 args[ti].subdomain = ti;
489 #ifdef USE_PTHREAD
490 pthread_attr_init(&thread_attr[ti]);
491 pthread_attr_setdetachstate(&thread_attr[ti], PTHREAD_CREATE_JOINABLE);
492 pthread_create(&threads[ti], &thread_attr[ti], diff_subdomain, &args[ti]);
493 #else
494 diff_subdomain(&args[ti]);
495 #endif
498 for (int ti = 0; ti < N; ti++) {
499 #ifdef USE_PTHREAD
500 pthread_join(threads[ti], NULL);
501 #endif
502 runs.back().add(args[ti].runs.back());
505 #ifdef USE_PTHREAD
506 free(threads);
507 free(thread_attr);
508 #endif
510 delete[] args;
512 ui::get()->d2_align_sample_stop();
516 private:
517 void rediff() {
518 std::vector<diff_trans> t_array;
520 for (unsigned int r = 0; r < runs.size(); r++) {
521 t_array.push_back(runs[r].offset);
524 runs.clear();
526 for (unsigned int r = 0; r < t_array.size(); r++)
527 diff(si, t_array[r], ax_count, frame);
531 public:
532 int better() {
533 assert(runs.size() >= 2);
534 assert(runs[0].offset.scale() == runs[1].offset.scale());
536 return (runs[1].get_error() < runs[0].get_error()
537 || (!finite(runs[0].get_error()) && finite(runs[1].get_error())));
540 int better_defined() {
541 assert(runs.size() >= 2);
542 assert(runs[0].offset.scale() == runs[1].offset.scale());
544 return (runs[1].get_error() < runs[0].get_error());
547 diff_stat_generic(transformation::elem_bounds_t e)
548 : runs(), old_runs(), perturb_multipliers() {
549 elem_bounds = e;
552 run_index get_run_index(unsigned int perturb_index) {
553 return perturb_index;
556 run &get_run(unsigned int perturb_index) {
557 run_index index = get_run_index(perturb_index);
559 assert(old_runs.count(index));
560 return old_runs[index];
563 void rescale(ale_pos scale, scale_cluster _si) {
564 assert(runs.size() == 1);
566 si = _si;
568 runs[0].rescale(scale);
570 rediff();
573 ~diff_stat_generic() {
576 diff_stat_generic &operator=(const diff_stat_generic &dst) {
578 * Copy run information.
580 runs = dst.runs;
581 old_runs = dst.old_runs;
584 * Copy diff variables
586 si = dst.si;
587 ax_count = dst.ax_count;
588 frame = dst.frame;
589 perturb_multipliers = dst.perturb_multipliers;
590 elem_bounds = dst.elem_bounds;
592 return *this;
595 diff_stat_generic(const diff_stat_generic &dst) : runs(), old_runs(),
596 perturb_multipliers() {
597 operator=(dst);
600 void set_elem_bounds(transformation::elem_bounds_t e) {
601 elem_bounds = e;
604 ale_accum get_result() {
605 assert(runs.size() == 1);
606 return runs[0].result;
609 ale_accum get_divisor() {
610 assert(runs.size() == 1);
611 return runs[0].divisor;
614 diff_trans get_offset() {
615 assert(runs.size() == 1);
616 return runs[0].offset;
619 int operator!=(diff_stat_generic &param) {
620 return (get_error() != param.get_error());
623 int operator==(diff_stat_generic &param) {
624 return !(operator!=(param));
627 ale_pos get_error_perturb() {
628 assert(runs.size() == 1);
629 return runs[0].get_error_perturb();
632 ale_accum get_error() const {
633 assert(runs.size() == 1);
634 return runs[0].get_error();
637 public:
639 * Get the set of transformations produced by a given perturbation
641 void get_perturb_set(std::vector<diff_trans> *set,
642 ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
643 ale_pos *current_bd, ale_pos *modified_bd,
644 std::vector<ale_pos> multipliers = std::vector<ale_pos>()) {
646 assert(runs.size() == 1);
648 diff_trans test_t(diff_trans::eu_identity());
651 * Translational or euclidean transformation
654 for (unsigned int i = 0; i < 2; i++)
655 for (unsigned int s = 0; s < 2; s++) {
657 if (!multipliers.size())
658 multipliers.push_back(1);
660 assert(finite(multipliers[0]));
662 test_t = get_offset();
664 // test_t.eu_modify(i, (s ? -adj_p : adj_p) * multipliers[0]);
665 test_t.translate((i ? point(1, 0) : point(0, 1))
666 * (s ? -adj_p : adj_p)
667 * multipliers[0]);
669 test_t.snap(adj_p / 2);
671 set->push_back(test_t);
672 multipliers.erase(multipliers.begin());
676 if (alignment_class > 0)
677 for (unsigned int s = 0; s < 2; s++) {
679 if (!multipliers.size())
680 multipliers.push_back(1);
682 assert(multipliers.size());
683 assert(finite(multipliers[0]));
685 if (!(adj_o * multipliers[0] < rot_max))
686 return;
688 ale_pos adj_s = (s ? 1 : -1) * adj_o * multipliers[0];
690 test_t = get_offset();
692 run_index ori = get_run_index(set->size());
693 point centroid = point::undefined();
695 if (!old_runs.count(ori))
696 ori = get_run_index(0);
698 if (!centroid.finite() && old_runs.count(ori)) {
699 centroid = old_runs[ori].get_error_centroid();
701 if (!centroid.finite())
702 centroid = old_runs[ori].get_centroid();
704 centroid *= test_t.scale()
705 / old_runs[ori].offset.scale();
708 if (!centroid.finite() && !test_t.is_projective()) {
709 test_t.eu_modify(2, adj_s);
710 } else if (!centroid.finite()) {
711 centroid = point(si.input->height() / 2,
712 si.input->width() / 2);
714 test_t.rotate(centroid + si.accum->offset(),
715 adj_s);
716 } else {
717 test_t.rotate(centroid + si.accum->offset(),
718 adj_s);
721 test_t.snap(adj_p / 2);
723 set->push_back(test_t);
724 multipliers.erase(multipliers.begin());
727 if (alignment_class == 2) {
730 * Projective transformation
733 for (unsigned int i = 0; i < 4; i++)
734 for (unsigned int j = 0; j < 2; j++)
735 for (unsigned int s = 0; s < 2; s++) {
737 if (!multipliers.size())
738 multipliers.push_back(1);
740 assert(multipliers.size());
741 assert(finite(multipliers[0]));
743 ale_pos adj_s = (s ? -1 : 1) * adj_p * multipliers [0];
745 test_t = get_offset();
747 if (perturb_type == 0)
748 test_t.gpt_modify_bounded(j, i, adj_s, elem_bounds.scale_to_bounds(si.accum->height(), si.accum->width()));
749 else if (perturb_type == 1)
750 test_t.gr_modify(j, i, adj_s);
751 else
752 assert(0);
754 test_t.snap(adj_p / 2);
756 set->push_back(test_t);
757 multipliers.erase(multipliers.begin());
763 * Barrel distortion
766 if (bda_mult != 0 && adj_b != 0) {
768 for (unsigned int d = 0; d < get_offset().bd_count(); d++)
769 for (unsigned int s = 0; s < 2; s++) {
771 if (!multipliers.size())
772 multipliers.push_back(1);
774 assert (multipliers.size());
775 assert (finite(multipliers[0]));
777 ale_pos adj_s = (s ? -1 : 1) * adj_b * multipliers[0];
779 if (bda_rate > 0 && fabs(modified_bd[d] + adj_s - current_bd[d]) > bda_rate)
780 continue;
782 diff_trans test_t = get_offset();
784 test_t.bd_modify(d, adj_s);
786 set->push_back(test_t);
791 void confirm() {
792 assert(runs.size() == 2);
793 runs[0] = runs[1];
794 runs.pop_back();
797 void discard() {
798 assert(runs.size() == 2);
799 runs.pop_back();
802 void perturb_test(ale_pos perturb, ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
803 ale_pos *current_bd, ale_pos *modified_bd, int stable) {
805 assert(runs.size() == 1);
807 std::vector<diff_trans> t_set;
809 if (perturb_multipliers.size() == 0) {
810 get_perturb_set(&t_set, adj_p, adj_o, adj_b,
811 current_bd, modified_bd);
813 for (unsigned int i = 0; i < t_set.size(); i++) {
814 diff_stat_generic test = *this;
816 test.diff(si, t_set[i], ax_count, frame);
818 test.confirm();
820 if (finite(test.get_error_perturb()))
821 perturb_multipliers.push_back(adj_p / test.get_error_perturb());
822 else
823 perturb_multipliers.push_back(1);
827 t_set.clear();
830 get_perturb_set(&t_set, adj_p, adj_o, adj_b, current_bd, modified_bd,
831 perturb_multipliers);
833 int found_unreliable = 1;
834 std::vector<int> tested(t_set.size(), 0);
836 for (unsigned int i = 0; i < t_set.size(); i++) {
837 run_index ori = get_run_index(i);
840 * Check for stability
842 if (stable
843 && old_runs.count(ori)
844 && old_runs[ori].offset == t_set[i])
845 tested[i] = 1;
848 std::vector<ale_pos> perturb_multipliers_original = perturb_multipliers;
850 while (found_unreliable) {
852 found_unreliable = 0;
854 for (unsigned int i = 0; i < t_set.size(); i++) {
856 if (tested[i])
857 continue;
859 diff(si, t_set[i], ax_count, frame);
861 if (!(i < perturb_multipliers.size())
862 || !finite(perturb_multipliers[i])) {
864 perturb_multipliers.resize(i + 1);
866 if (finite(perturb_multipliers[i])
867 && finite(adj_p)
868 && finite(adj_p / runs[1].get_error_perturb())) {
869 perturb_multipliers[i] =
870 adj_p / runs[1].get_error_perturb();
872 found_unreliable = 1;
873 } else
874 perturb_multipliers[i] = 1;
876 continue;
879 run_index ori = get_run_index(i);
881 if (old_runs.count(ori) == 0)
882 old_runs.insert(std::pair<run_index, run>(ori, runs[1]));
883 else
884 old_runs[ori] = runs[1];
886 if (finite(perturb_multipliers_original[i])
887 && finite(runs[1].get_error_perturb())
888 && finite(adj_p)
889 && finite(perturb_multipliers_original[i] * adj_p / runs[1].get_error_perturb()))
890 perturb_multipliers[i] = perturb_multipliers_original[i]
891 * adj_p / runs[1].get_error_perturb();
892 else
893 perturb_multipliers[i] = 1;
895 tested[i] = 1;
897 if (better()
898 && runs[1].get_error() < runs[0].get_error()
899 && perturb_multipliers[i]
900 / perturb_multipliers_original[i] < 2) {
901 runs[0] = runs[1];
902 runs.pop_back();
903 return;
908 if (runs.size() > 1)
909 runs.pop_back();
911 if (!found_unreliable)
912 return;
918 typedef diff_stat_generic<trans_single> diff_stat_t;
919 typedef diff_stat_generic<trans_multi> diff_stat_multi;
923 * Adjust exposure for an aligned frame B against reference A.
925 * Expects full-LOD images.
927 * Note: This method does not use any weighting, by certainty or
928 * otherwise, in the first exposure registration pass, as any bias of
929 * weighting according to color may also bias the exposure registration
930 * result; it does use weighting, including weighting by certainty
931 * (even if certainty weighting is not specified), in the second pass,
932 * under the assumption that weighting by certainty improves handling
933 * of out-of-range highlights, and that bias of exposure measurements
934 * according to color may generally be less harmful after spatial
935 * registration has been performed.
937 class exposure_ratio_iterate : public thread::decompose_domain {
938 pixel_accum *asums;
939 pixel_accum *bsums;
940 pixel_accum *asum;
941 pixel_accum *bsum;
942 struct scale_cluster c;
943 const transformation &t;
944 int ax_count;
945 int pass_number;
946 protected:
947 void prepare_subdomains(unsigned int N) {
948 asums = new pixel_accum[N];
949 bsums = new pixel_accum[N];
951 void subdomain_algorithm(unsigned int thread,
952 int i_min, int i_max, int j_min, int j_max) {
954 point offset = c.accum->offset();
956 for (mc_iterate m(i_min, i_max, j_min, j_max, thread); !m.done(); m++) {
958 unsigned int i = (unsigned int) m.get_i();
959 unsigned int j = (unsigned int) m.get_j();
961 if (ref_excluded(i, j, offset, c.ax_parameters, ax_count))
962 continue;
965 * Transform
968 struct point q;
970 q = (c.input_scale < 1.0 && interpolant == NULL)
971 ? t.scaled_inverse_transform(
972 point(i + offset[0], j + offset[1]))
973 : t.unscaled_inverse_transform(
974 point(i + offset[0], j + offset[1]));
977 * Check that the transformed coordinates are within
978 * the boundaries of array c.input, that they are not
979 * subject to exclusion, and that the weight value in
980 * the accumulated array is nonzero.
983 if (input_excluded(q[0], q[1], c.ax_parameters, ax_count))
984 continue;
986 if (q[0] >= 0
987 && q[0] <= c.input->height() - 1
988 && q[1] >= 0
989 && q[1] <= c.input->width() - 1
990 && ((pixel) c.certainty->get_pixel(i, j)).minabs_norm() != 0) {
991 pixel a = c.accum->get_pixel(i, j);
992 pixel b;
994 b = c.input->get_bl(q);
996 pixel weight = ((c.aweight && pass_number)
997 ? (pixel) c.aweight->get_pixel(i, j)
998 : pixel(1, 1, 1))
999 * (pass_number
1000 ? psqrt(c.certainty->get_pixel(i, j)
1001 * c.input_certainty->get_bl(q, 1))
1002 : pixel(1, 1, 1));
1004 asums[thread] += a * weight;
1005 bsums[thread] += b * weight;
1010 void finish_subdomains(unsigned int N) {
1011 for (unsigned int n = 0; n < N; n++) {
1012 *asum += asums[n];
1013 *bsum += bsums[n];
1015 delete[] asums;
1016 delete[] bsums;
1018 public:
1019 exposure_ratio_iterate(pixel_accum *_asum,
1020 pixel_accum *_bsum,
1021 struct scale_cluster _c,
1022 const transformation &_t,
1023 int _ax_count,
1024 int _pass_number) : decompose_domain(0, _c.accum->height(),
1025 0, _c.accum->width()),
1026 t(_t) {
1028 asum = _asum;
1029 bsum = _bsum;
1030 c = _c;
1031 ax_count = _ax_count;
1032 pass_number = _pass_number;
1035 exposure_ratio_iterate(pixel_accum *_asum,
1036 pixel_accum *_bsum,
1037 struct scale_cluster _c,
1038 const transformation &_t,
1039 int _ax_count,
1040 int _pass_number,
1041 transformation::elem_bounds_int_t b) : decompose_domain(b.imin, b.imax,
1042 b.jmin, b.jmax),
1043 t(_t) {
1045 asum = _asum;
1046 bsum = _bsum;
1047 c = _c;
1048 ax_count = _ax_count;
1049 pass_number = _pass_number;
1053 static void set_exposure_ratio(unsigned int m, struct scale_cluster c,
1054 const transformation &t, int ax_count, int pass_number) {
1056 if (_exp_register == 2) {
1058 * Use metadata only.
1060 ale_real gain_multiplier = image_rw::exp(m).get_gain_multiplier();
1061 pixel multiplier = pixel(gain_multiplier, gain_multiplier, gain_multiplier);
1063 image_rw::exp(m).set_multiplier(multiplier);
1064 ui::get()->exp_multiplier(multiplier[0],
1065 multiplier[1],
1066 multiplier[2]);
1068 return;
1071 pixel_accum asum(0, 0, 0), bsum(0, 0, 0);
1073 exposure_ratio_iterate eri(&asum, &bsum, c, t, ax_count, pass_number);
1074 eri.run();
1076 // std::cerr << (asum / bsum) << " ";
1078 pixel_accum new_multiplier;
1080 new_multiplier = asum / bsum * image_rw::exp(m).get_multiplier();
1082 if (finite(new_multiplier[0])
1083 && finite(new_multiplier[1])
1084 && finite(new_multiplier[2])) {
1085 image_rw::exp(m).set_multiplier(new_multiplier);
1086 ui::get()->exp_multiplier(new_multiplier[0],
1087 new_multiplier[1],
1088 new_multiplier[2]);
1092 static diff_stat_t _align_element(ale_pos perturb, ale_pos local_lower,
1093 scale_cluster *scale_clusters, diff_stat_t here,
1094 ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
1095 ale_pos *current_bd, ale_pos *modified_bd,
1096 astate_t *astate, int lod, scale_cluster si) {
1099 * Run initial tests to get perturbation multipliers and error
1100 * centroids.
1103 std::vector<d2::trans_single> t_set;
1105 here.get_perturb_set(&t_set, adj_p, adj_o, adj_b, current_bd, modified_bd);
1107 int stable_count = 0;
1109 while (perturb >= local_lower) {
1111 ui::get()->alignment_dims(scale_clusters[lod].accum->height(), scale_clusters[lod].accum->width(),
1112 scale_clusters[lod].input->height(), scale_clusters[lod].input->width());
1115 * Orientational adjustment value in degrees.
1117 * Since rotational perturbation is now specified as an
1118 * arclength, we have to convert.
1121 ale_pos adj_o = 2 * (double) perturb
1122 / sqrt(pow(scale_clusters[0].input->height(), 2)
1123 + pow(scale_clusters[0].input->width(), 2))
1124 * 180
1125 / M_PI;
1128 * Barrel distortion adjustment value
1131 ale_pos adj_b = perturb * bda_mult;
1133 trans_single old_offset = here.get_offset();
1135 here.perturb_test(perturb, adj_p, adj_o, adj_b, current_bd, modified_bd,
1136 stable_count);
1138 if (here.get_offset() == old_offset)
1139 stable_count++;
1140 else
1141 stable_count = 0;
1143 if (stable_count == 3) {
1145 stable_count = 0;
1147 perturb *= 0.5;
1149 if (lod > 0
1150 && lod > lrint(log(perturb) / log(2)) - lod_preferred) {
1153 * Work with images twice as large
1156 lod--;
1157 si = scale_clusters[lod];
1160 * Rescale the transforms.
1163 ale_pos rescale_factor = (double) scale_factor
1164 / (double) pow(2, lod)
1165 / (double) here.get_offset().scale();
1167 here.rescale(rescale_factor, si);
1169 } else {
1170 adj_p = perturb / pow(2, lod);
1174 * Announce changes
1177 ui::get()->alignment_perturbation_level(perturb, lod);
1180 ui::get()->set_match(here.get_error());
1181 ui::get()->set_offset(here.get_offset());
1184 if (lod > 0) {
1185 ale_pos rescale_factor = (double) scale_factor
1186 / (double) here.get_offset().scale();
1188 here.rescale(rescale_factor, scale_clusters[0]);
1191 return here;
1195 * Align frame m against the reference.
1197 * XXX: the transformation class currently combines ordinary
1198 * transformations with scaling. This is somewhat convenient for
1199 * some things, but can also be confusing. This method, _align(), is
1200 * one case where special care must be taken to ensure that the scale
1201 * is always set correctly (by using the 'rescale' method).
1203 static diff_stat_multi _align(int m, int local_gs, astate_t *astate) {
1205 ale_image input_frame = astate->get_input_frame();
1208 * Local upper/lower data, possibly dependent on image
1209 * dimensions.
1212 ale_pos local_lower, local_upper;
1213 ale_accum local_gs_mo;
1216 * Select the minimum dimension as the reference.
1219 ale_pos reference_size = input_frame->height();
1220 if (input_frame->width() < reference_size)
1221 reference_size = input_frame->width();
1222 ale_accum reference_area = input_frame->height()
1223 * input_frame->width();
1225 if (perturb_lower_percent)
1226 local_lower = (double) perturb_lower
1227 * (double) reference_size
1228 * (double) 0.01
1229 * (double) scale_factor;
1230 else
1231 local_lower = perturb_lower;
1233 if (perturb_upper_percent)
1234 local_upper = (double) perturb_upper
1235 * (double) reference_size
1236 * (double) 0.01
1237 * (double) scale_factor;
1238 else
1239 local_upper = perturb_upper;
1241 local_upper = pow(2, floor(log(local_upper) / log(2)));
1243 if (gs_mo_percent)
1244 local_gs_mo = (double) _gs_mo
1245 * (double) reference_area
1246 * (double) 0.01
1247 * (double) scale_factor;
1248 else
1249 local_gs_mo = _gs_mo;
1252 * Logarithms aren't exact, so we divide repeatedly to discover
1253 * how many steps will occur, and pass this information to the
1254 * user interface.
1257 int step_count = 0;
1258 double step_variable = local_upper;
1259 while (step_variable >= local_lower) {
1260 step_variable /= 2;
1261 step_count++;
1264 ale_pos perturb = local_upper;
1266 if (_keep) {
1267 kept_t[latest] = latest_t;
1268 kept_ok[latest] = latest_ok;
1272 * Determine how many levels of detail should be prepared, by
1273 * calculating the initial (largest) value for the
1274 * level-of-detail variable.
1277 int lod = lrint(log(perturb) / log(2)) - lod_preferred;
1279 if (lod < 0)
1280 lod = 0;
1282 while (lod > 0 && (reference_image->width() < pow(2, lod) * min_dimension
1283 || reference_image->height() < pow(2, lod) * min_dimension))
1284 lod--;
1286 unsigned int steps = (unsigned int) lod + 1;
1289 * Prepare multiple levels of detail.
1292 int local_ax_count;
1293 struct scale_cluster *scale_clusters = init_clusters(m,
1294 scale_factor, input_frame, steps,
1295 &local_ax_count);
1297 #error add check for non-NULL return
1300 * Initialize the default initial transform
1303 astate->init_default();
1306 * Set the default transformation.
1309 transformation offset = astate->get_default();
1312 * Establish boundaries
1315 offset.set_current_bounds(reference_image);
1317 ui::get()->alignment_degree_max(offset.get_coordinate(offset.stack_depth() - 1).degree);
1319 if (offset.stack_depth() == 1) {
1320 ui::get()->set_steps(step_count, 0);
1321 } else {
1322 ui::get()->set_steps(offset.get_coordinate(offset.stack_depth() - 1).degree + 1, 1);
1326 * Load any file-specified transformations
1329 for (unsigned int index = 0; index < offset.stack_depth(); index++) {
1330 int is_default = 1;
1331 unsigned int index_2;
1332 offset.set_current_index(index);
1334 offset = tload_next(tload, alignment_class == 2,
1335 offset,
1336 &is_default, offset.get_current_index() == 0);
1338 index_2 = offset.get_current_index();
1340 if (index_2 > index) {
1341 for (unsigned int index_3 = index; index_3 < index_2; index_3++)
1342 astate->set_is_default(index_3, 1);
1344 index = index_2;
1347 astate->set_is_default(index, is_default);
1350 offset.set_current_index(0);
1352 astate->init_frame_alignment_primary(&offset, lod, perturb);
1355 * Control point alignment
1358 if (local_gs == 5) {
1360 transformation o = offset;
1363 * Determine centroid data
1366 point current, previous;
1367 centroids(m, &current, &previous);
1369 if (current.defined() && previous.defined()) {
1370 o = orig_t;
1371 o.set_dimensions(input_frame);
1372 o.translate((previous - current) * o.scale());
1373 current = previous;
1377 * Determine rotation for alignment classes other than translation.
1380 ale_pos lowest_error = cp_rms_error(m, o);
1382 ale_pos rot_lower = 2 * (double) local_lower
1383 / sqrt(pow(scale_clusters[0].input->height(), 2)
1384 + pow(scale_clusters[0].input->width(), 2))
1385 * 180
1386 / M_PI;
1388 if (alignment_class > 0)
1389 for (double rot = 30; rot > rot_lower; rot /= 2)
1390 for (double srot = -rot; srot < rot * 1.5; srot += rot * 2) {
1391 int is_improved = 1;
1392 while (is_improved) {
1393 is_improved = 0;
1394 transformation test_t = o;
1396 * XXX: is this right?
1398 test_t.rotate(current * o.scale(), srot);
1399 ale_pos test_v = cp_rms_error(m, test_t);
1401 if (test_v < lowest_error) {
1402 lowest_error = test_v;
1403 o = test_t;
1404 srot += 3 * rot;
1405 is_improved = 1;
1411 * Determine projective parameters through a local
1412 * minimum search.
1415 if (alignment_class == 2) {
1416 ale_pos adj_p = lowest_error;
1418 if (adj_p < local_lower)
1419 adj_p = local_lower;
1421 while (adj_p >= local_lower) {
1422 transformation test_t = o;
1423 int is_improved = 1;
1424 ale_pos test_v;
1425 ale_pos adj_s;
1427 while (is_improved) {
1428 is_improved = 0;
1430 for (int i = 0; i < 4; i++)
1431 for (int j = 0; j < 2; j++)
1432 for (adj_s = -adj_p; adj_s <= adj_p; adj_s += 2 * adj_p) {
1434 test_t = o;
1436 if (perturb_type == 0)
1437 test_t.gpt_modify(j, i, adj_s);
1438 else if (perturb_type == 1)
1439 test_t.gr_modify(j, i, adj_s);
1440 else
1441 assert(0);
1443 test_v = cp_rms_error(m, test_t);
1445 if (test_v < lowest_error) {
1446 lowest_error = test_v;
1447 o = test_t;
1448 adj_s += 3 * adj_p;
1449 is_improved = 1;
1453 adj_p /= 2;
1459 * Pre-alignment exposure adjustment
1462 if (_exp_register) {
1463 ui::get()->exposure_1();
1464 set_exposure_ratio(m, scale_clusters[0], offset, local_ax_count, 0);
1468 * Scale transform for lod
1471 for (int lod_ = 0; lod_ < lod; lod_++) {
1472 transformation s = offset;
1473 transformation t = offset;
1475 t.rescale(1 / (double) 2);
1477 if (!(t.scaled_height() > 0 && t.scaled_height() < s.scaled_height())
1478 || !(t.scaled_width() > 0 && t.scaled_width() < s.scaled_width())) {
1479 perturb /= pow(2, lod - lod_);
1480 lod = lod_;
1481 break;
1482 } else {
1483 offset = t;
1487 ui::get()->set_offset(offset);
1489 struct scale_cluster si = scale_clusters[lod];
1492 * Projective adjustment value
1495 ale_pos adj_p = perturb / pow(2, lod);
1498 * Orientational adjustment value in degrees.
1500 * Since rotational perturbation is now specified as an
1501 * arclength, we have to convert.
1504 ale_pos adj_o = (double) 2 * (double) perturb
1505 / sqrt(pow((double) scale_clusters[0].input->height(), (double) 2)
1506 + pow((double) scale_clusters[0].input->width(), (double) 2))
1507 * (double) 180
1508 / M_PI;
1511 * Barrel distortion adjustment value
1514 ale_pos adj_b = perturb * bda_mult;
1517 * Global search overlap requirements.
1520 local_gs_mo = (double) local_gs_mo / pow(pow(2, lod), 2);
1523 * Alignment statistics.
1526 diff_stat_t here(offset.elem_bounds());
1529 * Current difference (error) value
1532 ui::get()->prematching();
1533 here.diff(si, offset.get_current_element(), local_ax_count, m);
1534 ui::get()->set_match(here.get_error());
1537 * Current and modified barrel distortion parameters
1540 ale_pos current_bd[BARREL_DEGREE];
1541 ale_pos modified_bd[BARREL_DEGREE];
1542 offset.bd_get(current_bd);
1543 offset.bd_get(modified_bd);
1546 * Translational global search step
1549 if (perturb >= local_lower && local_gs != 0 && local_gs != 5
1550 && (local_gs != 6 || astate->get_is_default(0))) {
1552 ui::get()->global_alignment(perturb, lod);
1553 ui::get()->gs_mo(local_gs_mo);
1555 test_globals(&here, si, offset, local_gs, adj_p,
1556 local_ax_count, m, local_gs_mo, perturb);
1558 ui::get()->set_match(here.get_error());
1559 ui::get()->set_offset(here.get_offset());
1563 * Perturbation adjustment loop.
1566 offset.set_current_element(here.get_offset());
1568 for (unsigned int i = 0; i < offset.stack_depth(); i++) {
1570 ui::get()->aligning_element(i, offset.stack_depth());
1572 offset.set_current_index(i);
1574 ui::get()->start_multi_alignment_element(offset);
1576 ui::get()->set_offset(offset);
1578 if (i > 0) {
1579 astate->init_frame_alignment_nonprimary(&offset, lod, perturb, i);
1581 if (_exp_register == 1) {
1582 ui::get()->exposure_1();
1583 pixel_accum asum(0, 0, 0), bsum(0, 0, 0);
1584 exposure_ratio_iterate eri(&asum, &bsum, scale_clusters[0], offset, local_ax_count, 0,
1585 offset.elem_bounds().scale_to_bounds(scale_clusters[0].accum->height(),
1586 scale_clusters[0].accum->width()));
1588 eri.run();
1589 pixel_accum tr = asum / bsum;
1590 ui::get()->exp_multiplier(tr[0], tr[1], tr[2]);
1591 offset.set_tonal_multiplier(tr);
1595 int e_lod = lod;
1596 int e_div = offset.get_current_coordinate().degree;
1597 ale_pos e_perturb = perturb;
1598 ale_pos e_adj_p = adj_p;
1599 ale_pos e_adj_b = adj_b;
1601 for (int d = 0; d < e_div; d++) {
1602 e_adj_b = 0;
1603 e_perturb *= 0.5;
1604 if (e_lod > 0) {
1605 e_lod--;
1606 } else {
1607 e_adj_p *= 0.5;
1611 if (i > 0) {
1613 d2::trans_multi::elem_bounds_t b = offset.elem_bounds();
1615 for (int dim_satisfied = 0; e_lod > 0 && !dim_satisfied; ) {
1616 int height = scale_clusters[e_lod].accum->height();
1617 int width = scale_clusters[e_lod].accum->width();
1619 d2::trans_multi::elem_bounds_int_t bi = b.scale_to_bounds(height, width);
1621 dim_satisfied = bi.satisfies_min_dim(min_dimension);
1623 if (!dim_satisfied) {
1624 e_lod--;
1625 e_adj_p *= 2;
1630 * Scale transform for lod
1633 for (int lod_ = 0; lod_ < e_lod; lod_++) {
1634 trans_single s = offset.get_element(i);
1635 trans_single t = offset.get_element(i);
1637 t.rescale(1 / (double) 2);
1639 if (!(t.scaled_height() > 0 && t.scaled_height() < s.scaled_height())
1640 || !(t.scaled_width() > 0 && t.scaled_width() < s.scaled_width())) {
1641 e_perturb /= pow(2, e_lod - lod_);
1642 e_lod = lod_;
1643 break;
1644 } else {
1645 offset.set_element(i, t);
1649 ui::get()->set_offset(offset);
1653 * Announce perturbation size
1656 ui::get()->aligning(e_perturb, e_lod);
1658 si = scale_clusters[e_lod];
1660 here.set_elem_bounds(offset.elem_bounds());
1662 here.diff(si, offset.get_current_element(), local_ax_count, m);
1664 here.confirm();
1666 here = check_ancestor_path(offset, si, here, local_ax_count, m);
1668 here = _align_element(e_perturb, local_lower, scale_clusters,
1669 here, e_adj_p, adj_o, e_adj_b, current_bd, modified_bd,
1670 astate, e_lod, si);
1672 offset.rescale(here.get_offset().scale() / offset.scale());
1674 offset.set_current_element(here.get_offset());
1676 if (i > 0 && _exp_register == 1) {
1677 if (ma_cert_satisfied(scale_clusters[0], offset, i)) {
1678 ui::get()->exposure_2();
1679 pixel_accum asum(0, 0, 0), bsum(0, 0, 0);
1680 exposure_ratio_iterate eri(&asum, &bsum, scale_clusters[0], offset, local_ax_count, 1,
1681 offset.elem_bounds().scale_to_bounds(scale_clusters[0].accum->height(),
1682 scale_clusters[0].accum->width()));
1684 eri.run();
1685 pixel_accum tr = asum / bsum;
1686 ui::get()->exp_multiplier(tr[0], tr[1], tr[2]);
1687 offset.set_tonal_multiplier(tr);
1688 } else {
1689 offset.set_tonal_multiplier(offset.get_element(offset.parent_index(i)).get_tonal_multiplier(point(0, 0)));
1691 } else if (_exp_register == 1) {
1692 ui::get()->exposure_2();
1693 set_exposure_ratio(m, scale_clusters[0], offset, local_ax_count, 1);
1696 ui::get()->set_offset(offset);
1698 if (i + 1 == offset.stack_depth())
1699 ui::get()->alignment_degree_complete(offset.get_coordinate(i).degree);
1700 else if (offset.get_coordinate(i).degree != offset.get_coordinate(i + 1).degree)
1701 ui::get()->alignment_degree_complete(offset.get_coordinate(i + 1).degree);
1704 offset.set_current_index(0);
1706 ui::get()->multi();
1707 offset.set_multi(reference_image, input_frame);
1710 * Recalculate error on whole frame.
1713 ui::get()->postmatching();
1714 diff_stat_generic<transformation> multi_here(offset.elem_bounds());
1715 multi_here.diff(scale_clusters[0], offset, local_ax_count, m);
1716 ui::get()->set_match(multi_here.get_error());
1719 * Free the level-of-detail structures
1722 final_clusters(scale_clusters, scale_factor, steps);
1725 * Ensure that the match meets the threshold.
1728 if (threshold_ok(multi_here.get_error())) {
1730 * Update alignment variables
1732 latest_ok = 1;
1733 astate->set_default(offset);
1734 astate->set_final(offset);
1735 ui::get()->alignment_match_ok();
1736 } else if (local_gs == 4) {
1739 * Align with outer starting points.
1743 * XXX: This probably isn't exactly the right thing to do,
1744 * since variables like old_initial_value have been overwritten.
1747 diff_stat_multi nested_result = _align(m, -1, astate);
1749 if (threshold_ok(nested_result.get_error())) {
1750 return nested_result;
1751 } else if (nested_result.get_error() < multi_here.get_error()) {
1752 multi_here = nested_result;
1755 if (is_fail_default)
1756 offset = astate->get_default();
1758 ui::get()->set_match(multi_here.get_error());
1759 ui::get()->alignment_no_match();
1761 } else if (local_gs == -1) {
1763 latest_ok = 0;
1764 latest_t = offset;
1765 return multi_here;
1767 } else {
1768 if (is_fail_default)
1769 offset = astate->get_default();
1770 latest_ok = 0;
1771 ui::get()->alignment_no_match();
1775 * Write the tonal registration multiplier as a comment.
1778 pixel trm = image_rw::exp(m).get_multiplier();
1779 tsave_trm(tsave, trm[0], trm[1], trm[2]);
1782 * Save the transformation information
1785 for (unsigned int index = 0; index < offset.stack_depth(); index++) {
1786 offset.set_current_index(index);
1788 tsave_next(tsave, offset, alignment_class == 2,
1789 offset.get_current_index() == 0);
1792 offset.set_current_index(0);
1795 * Update match statistics.
1798 match_sum += (1 - multi_here.get_error()) * (ale_accum) 100;
1799 match_count++;
1800 latest = m;
1801 latest_t = offset;
1803 return multi_here;
1806 int ale_align(ale_context ac, ale_image a, ale_image b, ale_trans start,
1807 ale_align_properties align_properties, ale_trans result) {
1808 #warning function unfinished.