src/align: Remove run constructors.
[libale.git] / src / align.c
blob2f3f2abc6b40e579350b3bb01c894b8912a0b83e
1 /*
2 * Copyright 2002, 2004, 2005, 2006, 2007, 2008, 2009 David Hilvert <dhilvert@gmail.com>
4 * This file is part of libale.
6 * libale is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU Affero General Public License as published by the Free
8 * Software Foundation, either version 3 of the License, or (at your option)
9 * any later version.
11 * libale is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 * FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
14 * more details.
16 * You should have received a copy of the GNU Affero General Public License
17 * along with libale. If not, see <http://www.gnu.org/licenses/>.
20 #include "libale.h"
23 * API Implementation.
27 * Types for scale clusters.
30 #if 0
33 * Experimental non-linear scaling approach.
36 struct nl_scale_cluster {
37 ale_image accum_max;
38 ale_image accum_min;
39 ale_image aweight_max;
40 ale_image aweight_min;
42 ale_pos input_scale;
43 ale_image input_certainty_max;
44 ale_image input_certainty_min;
45 ale_image input_max;
46 ale_image input_min;
48 #endif
50 struct scale_cluster {
51 ale_image accum;
52 ale_image aweight;
54 ale_pos input_scale;
55 ale_image input_certainty;
56 ale_image input;
58 #if 0
59 nl_scale_cluster *nl_scale_clusters;
60 #endif
64 static struct scale_cluster *init_clusters(ale_context ac, int frame, ale_pos scale_factor,
65 ale_image input_frame, ale_trans input_trans, ale_image reference_image,
66 ale_image alignment_weights, unsigned int steps, ale_filter interpolant) {
68 int input_bayer = ale_trans_get_bayer(input_trans);
69 ale_real input_gamma = ale_trans_get_gamma(input_trans);
70 ale_certainty input_certainty = ale_trans_retain_certainty(input_trans);
71 ale_real input_black = ale_trans_get_black(input_trans);
73 unsigned int step;
76 * Allocate memory for the array.
79 struct scale_cluster *scale_clusters =
80 (struct scale_cluster *) malloc(steps * sizeof(struct scale_cluster));
82 if (!scale_clusters)
83 return NULL;
86 * Prepare images and exclusion regions for the highest level
87 * of detail.
90 scale_clusters[0].accum = reference_image;
92 ale_context_signal(ac, "ui_constructing_lod_clusters", 0.0);
94 scale_clusters[0].input_scale = scale_factor;
95 if (scale_factor < 1.0 && interpolant == NULL)
96 scale_clusters[0].input = ale_scale_image(input_frame, scale_factor, ale_context_get_color_type(ac), input_gamma, input_bayer);
97 else
98 scale_clusters[0].input = input_frame;
100 scale_clusters[0].aweight = alignment_weights;
103 * Allocate and determine input frame certainty.
106 scale_clusters[0].input_certainty = ale_clone_image(scale_clusters[0].input, ale_context_get_certainty_type(ac));
108 ale_eval("MAP_PIXEL(%0I, p, CERTAINTY(%1t, p, %2f, GET_AFFINE_PIXEL(%3i, p, %4f)))",
109 scale_clusters[0].input_certainty, input_black, input_certainty,
110 scale_clusters[0].input, (scale_factor < 1.0 && interpolant == NULL) ? 1 : input_gamma);
112 #if 0
113 init_nl_cluster(&(scale_clusters[0]));
114 #endif
117 * Prepare reduced-detail images and exclusion
118 * regions.
121 for (step = 1; step < steps; step++) {
122 ale_context_signal(ac, "ui_constructing_lod_clusters", step);
123 scale_clusters[step].accum = ale_scale_image(scale_clusters[step - 1].accum, 0.5, ale_context_get_color_type(ac), 1, ALE_BAYER_NONE);
124 scale_clusters[step].aweight = ale_scale_image_wt(scale_clusters[step - 1].aweight, 0.5, ale_context_get_weight_type(ac), ALE_BAYER_NONE);
126 ale_pos sf = scale_clusters[step - 1].input_scale;
128 scale_clusters[step].input_scale = sf / 2;
130 if (sf >= 2.0 || interpolant != NULL) {
131 scale_clusters[step].input = scale_clusters[step - 1].input;
132 scale_clusters[step].input_certainty = scale_clusters[step - 1].input_certainty;
133 } else if (sf >= 1.0) {
134 scale_clusters[step].input = ale_scale_image(scale_clusters[step - 1].input, sf / 2, ale_context_get_color_type(ac), input_gamma, input_bayer);
135 scale_clusters[step].input_certainty = ale_scale_image_wt(scale_clusters[step - 1].input_certainty, sf / 2, ale_context_get_certainty_type(ac), input_bayer);
136 } else {
137 scale_clusters[step].input = ale_scale_image(scale_clusters[step - 1].input, 0.5, ale_context_get_color_type(ac), 1, ALE_BAYER_NONE);
138 scale_clusters[step].input_certainty = ale_scale_image_wt(scale_clusters[step - 1].input_certainty, 0.5, ale_context_get_certainty_type(ac), ALE_BAYER_NONE);
141 #if 0
142 init_nl_cluster(&(scale_clusters[step]));
143 #endif
146 ale_certainty_release(input_certainty);
148 return scale_clusters;
152 * Not-quite-symmetric difference function. Determines the difference in areas
153 * where the arrays overlap. Uses the reference array's notion of pixel positions.
155 * For the purposes of determining the difference, this function divides each
156 * pixel value by the corresponding image's average pixel magnitude, unless we
157 * are:
159 * a) Extending the boundaries of the image, or
161 * b) following the previous frame's transform
163 * If we are doing monte-carlo pixel sampling for alignment, we
164 * typically sample a subset of available pixels; otherwise, we sample
165 * all pixels.
169 typedef struct {
171 ale_trans offset;
173 ale_accum result;
174 ale_accum divisor;
176 point max, min;
177 ale_accum centroid[2], centroid_divisor;
178 ale_accum de_centroid[2], de_centroid_v, de_sum;
179 } run;
181 static void run_init(run *r, ale_trans offset) {
183 r->result = 0;
184 r->divisor = 0;
186 r->min = point_posinf(2);
187 r->max = point_neginf(2);
189 r->centroid[0] = 0;
190 r->centroid[1] = 0;
191 r->centroid_divisor = 0;
193 r->de_centroid[0] = 0;
194 r->de_centroid[1] = 0;
196 r->de_centroid_v = 0;
198 r->de_sum = 0;
200 r->offset = offset;
203 static void run_add(const run &_run) {
204 result += _run.result;
205 divisor += _run.divisor;
207 for (int d = 0; d < 2; d++) {
208 if (min[d] > _run.min[d])
209 min[d] = _run.min[d];
210 if (max[d] < _run.max[d])
211 max[d] = _run.max[d];
212 centroid[d] += _run.centroid[d];
213 de_centroid[d] += _run.de_centroid[d];
216 centroid_divisor += _run.centroid_divisor;
217 de_centroid_v += _run.de_centroid_v;
218 de_sum += _run.de_sum;
221 static run_run(const run &_run) : offset(_run.offset) {
224 * Initialize
226 init(_run.offset);
229 * Add
231 add(_run);
234 static run &run_operator=(const run &_run) {
237 * Initialize
239 init(_run.offset);
242 * Add
244 add(_run);
246 return *this;
249 static ale_accum run_get_error() const {
250 return pow(result / divisor, 1/(ale_accum) metric_exponent);
253 static void run_sample(int f, scale_cluster c, int i, int j, point t, point u,
254 const run &comparison) {
256 pixel pa = c.accum->get_pixel(i, j);
258 ale_real this_result[2] = { 0, 0 };
259 ale_real this_divisor[2] = { 0, 0 };
261 pixel p[2];
262 pixel weight[2];
263 weight[0] = pixel(1, 1, 1);
264 weight[1] = pixel(1, 1, 1);
266 pixel tm = offset.get_tonal_multiplier(point(i, j) + c.accum->offset());
268 if (interpolant != NULL) {
269 interpolant->filtered(i, j, &p[0], &weight[0], 1, f);
271 if (weight[0].min_norm() > ale_real_weight_floor) {
272 p[0] /= weight[0];
273 } else {
274 return;
277 } else {
278 p[0] = c.input->get_bl(t);
281 p[0] *= tm;
283 if (u.defined()) {
284 p[1] = c.input->get_bl(u);
285 p[1] *= tm;
290 * Handle certainty.
293 if (certainty_weights == 1) {
296 * For speed, use arithmetic interpolation (get_bl(.))
297 * instead of geometric (get_bl(., 1))
300 weight[0] *= c.input_certainty->get_bl(t);
301 if (u.defined())
302 weight[1] *= c.input_certainty->get_bl(u);
303 weight[0] *= c.certainty->get_pixel(i, j);
304 weight[1] *= c.certainty->get_pixel(i, j);
307 if (c.aweight != NULL) {
308 weight[0] *= c.aweight->get_pixel(i, j);
309 weight[1] *= c.aweight->get_pixel(i, j);
313 * Update sampling area statistics
316 if (min[0] > i)
317 min[0] = i;
318 if (min[1] > j)
319 min[1] = j;
320 if (max[0] < i)
321 max[0] = i;
322 if (max[1] < j)
323 max[1] = j;
325 centroid[0] += (weight[0][0] + weight[0][1] + weight[0][2]) * i;
326 centroid[1] += (weight[0][0] + weight[0][1] + weight[0][2]) * j;
327 centroid_divisor += (weight[0][0] + weight[0][1] + weight[0][2]);
330 * Determine alignment type.
333 for (int m = 0; m < (u.defined() ? 2 : 1); m++)
334 if (channel_alignment_type == 0) {
336 * Align based on all channels.
340 for (int k = 0; k < 3; k++) {
341 ale_real achan = pa[k];
342 ale_real bchan = p[m][k];
344 this_result[m] += weight[m][k] * pow(fabs(achan - bchan), metric_exponent);
345 this_divisor[m] += weight[m][k] * pow(achan > bchan ? achan : bchan, metric_exponent);
347 } else if (channel_alignment_type == 1) {
349 * Align based on the green channel.
352 ale_real achan = pa[1];
353 ale_real bchan = p[m][1];
355 this_result[m] = weight[m][1] * pow(fabs(achan - bchan), metric_exponent);
356 this_divisor[m] = weight[m][1] * pow(achan > bchan ? achan : bchan, metric_exponent);
357 } else if (channel_alignment_type == 2) {
359 * Align based on the sum of all channels.
362 ale_real asum = 0;
363 ale_real bsum = 0;
364 ale_real wsum = 0;
366 for (int k = 0; k < 3; k++) {
367 asum += pa[k];
368 bsum += p[m][k];
369 wsum += weight[m][k] / 3;
372 this_result[m] = wsum * pow(fabs(asum - bsum), metric_exponent);
373 this_divisor[m] = wsum * pow(asum > bsum ? asum : bsum, metric_exponent);
376 if (u.defined()) {
377 // ale_real de = fabs(this_result[0] / this_divisor[0]
378 // - this_result[1] / this_divisor[1]);
379 ale_real de = fabs(this_result[0] - this_result[1]);
381 de_centroid[0] += de * (ale_real) i;
382 de_centroid[1] += de * (ale_real) j;
384 de_centroid_v += de * (ale_real) t.lengthto(u);
386 de_sum += de;
389 result += (this_result[0]);
390 divisor += (this_divisor[0]);
393 static void run_rescale(ale_pos scale) {
394 offset.rescale(scale);
396 de_centroid[0] *= scale;
397 de_centroid[1] *= scale;
398 de_centroid_v *= scale;
401 static point run_get_centroid() {
402 point result = point(centroid[0] / centroid_divisor, centroid[1] / centroid_divisor);
404 assert (finite(centroid[0])
405 && finite(centroid[1])
406 && (result.defined() || centroid_divisor == 0));
408 return result;
411 static point run_get_error_centroid() {
412 point result = point(de_centroid[0] / de_sum, de_centroid[1] / de_sum);
413 return result;
417 static ale_pos run_get_error_perturb() {
418 ale_pos result = de_centroid_v / de_sum;
420 return result;
423 template<class diff_trans>
424 class diff_stat_generic {
426 transformation::elem_bounds_t elem_bounds;
429 * When non-empty, runs.front() is best, runs.back() is
430 * testing.
433 std::vector<run> runs;
436 * old_runs stores the latest available perturbation set for
437 * each multi-alignment element.
440 typedef int run_index;
441 std::map<run_index, run> old_runs;
443 static void *diff_subdomain(void *args);
445 struct subdomain_args {
446 struct scale_cluster c;
447 std::vector<run> runs;
448 int ax_count;
449 int f;
450 int i_min, i_max, j_min, j_max;
451 int subdomain;
454 struct scale_cluster si;
455 int ax_count;
456 int frame;
458 std::vector<ale_pos> perturb_multipliers;
460 public:
461 void diff(struct scale_cluster c, const diff_trans &t,
462 int _ax_count, int f) {
464 if (runs.size() == 2)
465 runs.pop_back();
467 runs.push_back(run(t));
469 si = c;
470 ax_count = _ax_count;
471 frame = f;
473 ui::get()->d2_align_sample_start();
475 if (interpolant != NULL) {
478 * XXX: This has not been tested, and may be completely
479 * wrong.
482 transformation tt = transformation::eu_identity();
483 tt.set_current_element(t);
484 interpolant->set_parameters(tt, c.input, c.accum->offset());
487 int N;
488 #ifdef USE_PTHREAD
489 N = thread::count();
491 pthread_t *threads = (pthread_t *) malloc(sizeof(pthread_t) * N);
492 pthread_attr_t *thread_attr = (pthread_attr_t *) malloc(sizeof(pthread_attr_t) * N);
494 #else
495 N = 1;
496 #endif
498 subdomain_args *args = new subdomain_args[N];
500 transformation::elem_bounds_int_t b = elem_bounds.scale_to_bounds(c.accum->height(), c.accum->width());
502 // fprintf(stdout, "[%d %d] [%d %d]\n",
503 // global_i_min, global_i_max, global_j_min, global_j_max);
505 for (int ti = 0; ti < N; ti++) {
506 args[ti].c = c;
507 args[ti].runs = runs;
508 args[ti].ax_count = ax_count;
509 args[ti].f = f;
510 args[ti].i_min = b.imin + ((b.imax - b.imin) * ti) / N;
511 args[ti].i_max = b.imin + ((b.imax - b.imin) * (ti + 1)) / N;
512 args[ti].j_min = b.jmin;
513 args[ti].j_max = b.jmax;
514 args[ti].subdomain = ti;
515 #ifdef USE_PTHREAD
516 pthread_attr_init(&thread_attr[ti]);
517 pthread_attr_setdetachstate(&thread_attr[ti], PTHREAD_CREATE_JOINABLE);
518 pthread_create(&threads[ti], &thread_attr[ti], diff_subdomain, &args[ti]);
519 #else
520 diff_subdomain(&args[ti]);
521 #endif
524 for (int ti = 0; ti < N; ti++) {
525 #ifdef USE_PTHREAD
526 pthread_join(threads[ti], NULL);
527 #endif
528 runs.back().add(args[ti].runs.back());
531 #ifdef USE_PTHREAD
532 free(threads);
533 free(thread_attr);
534 #endif
536 delete[] args;
538 ui::get()->d2_align_sample_stop();
542 private:
543 void rediff() {
544 std::vector<diff_trans> t_array;
546 for (unsigned int r = 0; r < runs.size(); r++) {
547 t_array.push_back(runs[r].offset);
550 runs.clear();
552 for (unsigned int r = 0; r < t_array.size(); r++)
553 diff(si, t_array[r], ax_count, frame);
557 public:
558 int better() {
559 assert(runs.size() >= 2);
560 assert(runs[0].offset.scale() == runs[1].offset.scale());
562 return (runs[1].get_error() < runs[0].get_error()
563 || (!finite(runs[0].get_error()) && finite(runs[1].get_error())));
566 int better_defined() {
567 assert(runs.size() >= 2);
568 assert(runs[0].offset.scale() == runs[1].offset.scale());
570 return (runs[1].get_error() < runs[0].get_error());
573 diff_stat_generic(transformation::elem_bounds_t e)
574 : runs(), old_runs(), perturb_multipliers() {
575 elem_bounds = e;
578 run_index get_run_index(unsigned int perturb_index) {
579 return perturb_index;
582 run &get_run(unsigned int perturb_index) {
583 run_index index = get_run_index(perturb_index);
585 assert(old_runs.count(index));
586 return old_runs[index];
589 void rescale(ale_pos scale, scale_cluster _si) {
590 assert(runs.size() == 1);
592 si = _si;
594 runs[0].rescale(scale);
596 rediff();
599 ~diff_stat_generic() {
602 diff_stat_generic &operator=(const diff_stat_generic &dst) {
604 * Copy run information.
606 runs = dst.runs;
607 old_runs = dst.old_runs;
610 * Copy diff variables
612 si = dst.si;
613 ax_count = dst.ax_count;
614 frame = dst.frame;
615 perturb_multipliers = dst.perturb_multipliers;
616 elem_bounds = dst.elem_bounds;
618 return *this;
621 diff_stat_generic(const diff_stat_generic &dst) : runs(), old_runs(),
622 perturb_multipliers() {
623 operator=(dst);
626 void set_elem_bounds(transformation::elem_bounds_t e) {
627 elem_bounds = e;
630 ale_accum get_result() {
631 assert(runs.size() == 1);
632 return runs[0].result;
635 ale_accum get_divisor() {
636 assert(runs.size() == 1);
637 return runs[0].divisor;
640 diff_trans get_offset() {
641 assert(runs.size() == 1);
642 return runs[0].offset;
645 int operator!=(diff_stat_generic &param) {
646 return (get_error() != param.get_error());
649 int operator==(diff_stat_generic &param) {
650 return !(operator!=(param));
653 ale_pos get_error_perturb() {
654 assert(runs.size() == 1);
655 return runs[0].get_error_perturb();
658 ale_accum get_error() const {
659 assert(runs.size() == 1);
660 return runs[0].get_error();
663 public:
665 * Get the set of transformations produced by a given perturbation
667 void get_perturb_set(std::vector<diff_trans> *set,
668 ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
669 ale_pos *current_bd, ale_pos *modified_bd,
670 std::vector<ale_pos> multipliers = std::vector<ale_pos>()) {
672 assert(runs.size() == 1);
674 diff_trans test_t(diff_trans::eu_identity());
677 * Translational or euclidean transformation
680 for (unsigned int i = 0; i < 2; i++)
681 for (unsigned int s = 0; s < 2; s++) {
683 if (!multipliers.size())
684 multipliers.push_back(1);
686 assert(finite(multipliers[0]));
688 test_t = get_offset();
690 // test_t.eu_modify(i, (s ? -adj_p : adj_p) * multipliers[0]);
691 test_t.translate((i ? point(1, 0) : point(0, 1))
692 * (s ? -adj_p : adj_p)
693 * multipliers[0]);
695 test_t.snap(adj_p / 2);
697 set->push_back(test_t);
698 multipliers.erase(multipliers.begin());
702 if (alignment_class > 0)
703 for (unsigned int s = 0; s < 2; s++) {
705 if (!multipliers.size())
706 multipliers.push_back(1);
708 assert(multipliers.size());
709 assert(finite(multipliers[0]));
711 if (!(adj_o * multipliers[0] < rot_max))
712 return;
714 ale_pos adj_s = (s ? 1 : -1) * adj_o * multipliers[0];
716 test_t = get_offset();
718 run_index ori = get_run_index(set->size());
719 point centroid = point::undefined();
721 if (!old_runs.count(ori))
722 ori = get_run_index(0);
724 if (!centroid.finite() && old_runs.count(ori)) {
725 centroid = old_runs[ori].get_error_centroid();
727 if (!centroid.finite())
728 centroid = old_runs[ori].get_centroid();
730 centroid *= test_t.scale()
731 / old_runs[ori].offset.scale();
734 if (!centroid.finite() && !test_t.is_projective()) {
735 test_t.eu_modify(2, adj_s);
736 } else if (!centroid.finite()) {
737 centroid = point(si.input->height() / 2,
738 si.input->width() / 2);
740 test_t.rotate(centroid + si.accum->offset(),
741 adj_s);
742 } else {
743 test_t.rotate(centroid + si.accum->offset(),
744 adj_s);
747 test_t.snap(adj_p / 2);
749 set->push_back(test_t);
750 multipliers.erase(multipliers.begin());
753 if (alignment_class == 2) {
756 * Projective transformation
759 for (unsigned int i = 0; i < 4; i++)
760 for (unsigned int j = 0; j < 2; j++)
761 for (unsigned int s = 0; s < 2; s++) {
763 if (!multipliers.size())
764 multipliers.push_back(1);
766 assert(multipliers.size());
767 assert(finite(multipliers[0]));
769 ale_pos adj_s = (s ? -1 : 1) * adj_p * multipliers [0];
771 test_t = get_offset();
773 if (perturb_type == 0)
774 test_t.gpt_modify_bounded(j, i, adj_s, elem_bounds.scale_to_bounds(si.accum->height(), si.accum->width()));
775 else if (perturb_type == 1)
776 test_t.gr_modify(j, i, adj_s);
777 else
778 assert(0);
780 test_t.snap(adj_p / 2);
782 set->push_back(test_t);
783 multipliers.erase(multipliers.begin());
789 * Barrel distortion
792 if (bda_mult != 0 && adj_b != 0) {
794 for (unsigned int d = 0; d < get_offset().bd_count(); d++)
795 for (unsigned int s = 0; s < 2; s++) {
797 if (!multipliers.size())
798 multipliers.push_back(1);
800 assert (multipliers.size());
801 assert (finite(multipliers[0]));
803 ale_pos adj_s = (s ? -1 : 1) * adj_b * multipliers[0];
805 if (bda_rate > 0 && fabs(modified_bd[d] + adj_s - current_bd[d]) > bda_rate)
806 continue;
808 diff_trans test_t = get_offset();
810 test_t.bd_modify(d, adj_s);
812 set->push_back(test_t);
817 void confirm() {
818 assert(runs.size() == 2);
819 runs[0] = runs[1];
820 runs.pop_back();
823 void discard() {
824 assert(runs.size() == 2);
825 runs.pop_back();
828 void perturb_test(ale_pos perturb, ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
829 ale_pos *current_bd, ale_pos *modified_bd, int stable) {
831 assert(runs.size() == 1);
833 std::vector<diff_trans> t_set;
835 if (perturb_multipliers.size() == 0) {
836 get_perturb_set(&t_set, adj_p, adj_o, adj_b,
837 current_bd, modified_bd);
839 for (unsigned int i = 0; i < t_set.size(); i++) {
840 diff_stat_generic test = *this;
842 test.diff(si, t_set[i], ax_count, frame);
844 test.confirm();
846 if (finite(test.get_error_perturb()))
847 perturb_multipliers.push_back(adj_p / test.get_error_perturb());
848 else
849 perturb_multipliers.push_back(1);
853 t_set.clear();
856 get_perturb_set(&t_set, adj_p, adj_o, adj_b, current_bd, modified_bd,
857 perturb_multipliers);
859 int found_unreliable = 1;
860 std::vector<int> tested(t_set.size(), 0);
862 for (unsigned int i = 0; i < t_set.size(); i++) {
863 run_index ori = get_run_index(i);
866 * Check for stability
868 if (stable
869 && old_runs.count(ori)
870 && old_runs[ori].offset == t_set[i])
871 tested[i] = 1;
874 std::vector<ale_pos> perturb_multipliers_original = perturb_multipliers;
876 while (found_unreliable) {
878 found_unreliable = 0;
880 for (unsigned int i = 0; i < t_set.size(); i++) {
882 if (tested[i])
883 continue;
885 diff(si, t_set[i], ax_count, frame);
887 if (!(i < perturb_multipliers.size())
888 || !finite(perturb_multipliers[i])) {
890 perturb_multipliers.resize(i + 1);
892 if (finite(perturb_multipliers[i])
893 && finite(adj_p)
894 && finite(adj_p / runs[1].get_error_perturb())) {
895 perturb_multipliers[i] =
896 adj_p / runs[1].get_error_perturb();
898 found_unreliable = 1;
899 } else
900 perturb_multipliers[i] = 1;
902 continue;
905 run_index ori = get_run_index(i);
907 if (old_runs.count(ori) == 0)
908 old_runs.insert(std::pair<run_index, run>(ori, runs[1]));
909 else
910 old_runs[ori] = runs[1];
912 if (finite(perturb_multipliers_original[i])
913 && finite(runs[1].get_error_perturb())
914 && finite(adj_p)
915 && finite(perturb_multipliers_original[i] * adj_p / runs[1].get_error_perturb()))
916 perturb_multipliers[i] = perturb_multipliers_original[i]
917 * adj_p / runs[1].get_error_perturb();
918 else
919 perturb_multipliers[i] = 1;
921 tested[i] = 1;
923 if (better()
924 && runs[1].get_error() < runs[0].get_error()
925 && perturb_multipliers[i]
926 / perturb_multipliers_original[i] < 2) {
927 runs[0] = runs[1];
928 runs.pop_back();
929 return;
934 if (runs.size() > 1)
935 runs.pop_back();
937 if (!found_unreliable)
938 return;
944 typedef diff_stat_generic<trans_single> diff_stat_t;
945 typedef diff_stat_generic<trans_multi> diff_stat_multi;
949 * Adjust exposure for an aligned frame B against reference A.
951 * Expects full-LOD images.
953 * Note: This method does not use any weighting, by certainty or
954 * otherwise, in the first exposure registration pass, as any bias of
955 * weighting according to color may also bias the exposure registration
956 * result; it does use weighting, including weighting by certainty
957 * (even if certainty weighting is not specified), in the second pass,
958 * under the assumption that weighting by certainty improves handling
959 * of out-of-range highlights, and that bias of exposure measurements
960 * according to color may generally be less harmful after spatial
961 * registration has been performed.
963 class exposure_ratio_iterate : public thread::decompose_domain {
964 pixel_accum *asums;
965 pixel_accum *bsums;
966 pixel_accum *asum;
967 pixel_accum *bsum;
968 struct scale_cluster c;
969 const transformation &t;
970 int ax_count;
971 int pass_number;
972 protected:
973 void prepare_subdomains(unsigned int N) {
974 asums = new pixel_accum[N];
975 bsums = new pixel_accum[N];
977 void subdomain_algorithm(unsigned int thread,
978 int i_min, int i_max, int j_min, int j_max) {
980 point offset = c.accum->offset();
982 for (mc_iterate m(i_min, i_max, j_min, j_max, thread); !m.done(); m++) {
984 unsigned int i = (unsigned int) m.get_i();
985 unsigned int j = (unsigned int) m.get_j();
987 if (ref_excluded(i, j, offset, c.ax_parameters, ax_count))
988 continue;
991 * Transform
994 struct point q;
996 q = (c.input_scale < 1.0 && interpolant == NULL)
997 ? t.scaled_inverse_transform(
998 point(i + offset[0], j + offset[1]))
999 : t.unscaled_inverse_transform(
1000 point(i + offset[0], j + offset[1]));
1003 * Check that the transformed coordinates are within
1004 * the boundaries of array c.input, that they are not
1005 * subject to exclusion, and that the weight value in
1006 * the accumulated array is nonzero.
1009 if (input_excluded(q[0], q[1], c.ax_parameters, ax_count))
1010 continue;
1012 if (q[0] >= 0
1013 && q[0] <= c.input->height() - 1
1014 && q[1] >= 0
1015 && q[1] <= c.input->width() - 1
1016 && ((pixel) c.certainty->get_pixel(i, j)).minabs_norm() != 0) {
1017 pixel a = c.accum->get_pixel(i, j);
1018 pixel b;
1020 b = c.input->get_bl(q);
1022 pixel weight = ((c.aweight && pass_number)
1023 ? (pixel) c.aweight->get_pixel(i, j)
1024 : pixel(1, 1, 1))
1025 * (pass_number
1026 ? psqrt(c.certainty->get_pixel(i, j)
1027 * c.input_certainty->get_bl(q, 1))
1028 : pixel(1, 1, 1));
1030 asums[thread] += a * weight;
1031 bsums[thread] += b * weight;
1036 void finish_subdomains(unsigned int N) {
1037 for (unsigned int n = 0; n < N; n++) {
1038 *asum += asums[n];
1039 *bsum += bsums[n];
1041 delete[] asums;
1042 delete[] bsums;
1044 public:
1045 exposure_ratio_iterate(pixel_accum *_asum,
1046 pixel_accum *_bsum,
1047 struct scale_cluster _c,
1048 const transformation &_t,
1049 int _ax_count,
1050 int _pass_number) : decompose_domain(0, _c.accum->height(),
1051 0, _c.accum->width()),
1052 t(_t) {
1054 asum = _asum;
1055 bsum = _bsum;
1056 c = _c;
1057 ax_count = _ax_count;
1058 pass_number = _pass_number;
1061 exposure_ratio_iterate(pixel_accum *_asum,
1062 pixel_accum *_bsum,
1063 struct scale_cluster _c,
1064 const transformation &_t,
1065 int _ax_count,
1066 int _pass_number,
1067 transformation::elem_bounds_int_t b) : decompose_domain(b.imin, b.imax,
1068 b.jmin, b.jmax),
1069 t(_t) {
1071 asum = _asum;
1072 bsum = _bsum;
1073 c = _c;
1074 ax_count = _ax_count;
1075 pass_number = _pass_number;
1079 static void set_exposure_ratio(unsigned int m, struct scale_cluster c,
1080 const transformation &t, int ax_count, int pass_number) {
1082 if (_exp_register == 2) {
1084 * Use metadata only.
1086 ale_real gain_multiplier = image_rw::exp(m).get_gain_multiplier();
1087 pixel multiplier = pixel(gain_multiplier, gain_multiplier, gain_multiplier);
1089 image_rw::exp(m).set_multiplier(multiplier);
1090 ui::get()->exp_multiplier(multiplier[0],
1091 multiplier[1],
1092 multiplier[2]);
1094 return;
1097 pixel_accum asum(0, 0, 0), bsum(0, 0, 0);
1099 exposure_ratio_iterate eri(&asum, &bsum, c, t, ax_count, pass_number);
1100 eri.run();
1102 // std::cerr << (asum / bsum) << " ";
1104 pixel_accum new_multiplier;
1106 new_multiplier = asum / bsum * image_rw::exp(m).get_multiplier();
1108 if (finite(new_multiplier[0])
1109 && finite(new_multiplier[1])
1110 && finite(new_multiplier[2])) {
1111 image_rw::exp(m).set_multiplier(new_multiplier);
1112 ui::get()->exp_multiplier(new_multiplier[0],
1113 new_multiplier[1],
1114 new_multiplier[2]);
1118 static diff_stat_t _align_element(ale_pos perturb, ale_pos local_lower,
1119 scale_cluster *scale_clusters, diff_stat_t here,
1120 ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
1121 ale_pos *current_bd, ale_pos *modified_bd,
1122 astate_t *astate, int lod, scale_cluster si) {
1125 * Run initial tests to get perturbation multipliers and error
1126 * centroids.
1129 std::vector<d2::trans_single> t_set;
1131 here.get_perturb_set(&t_set, adj_p, adj_o, adj_b, current_bd, modified_bd);
1133 int stable_count = 0;
1135 while (perturb >= local_lower) {
1137 ui::get()->alignment_dims(scale_clusters[lod].accum->height(), scale_clusters[lod].accum->width(),
1138 scale_clusters[lod].input->height(), scale_clusters[lod].input->width());
1141 * Orientational adjustment value in degrees.
1143 * Since rotational perturbation is now specified as an
1144 * arclength, we have to convert.
1147 ale_pos adj_o = 2 * (double) perturb
1148 / sqrt(pow(scale_clusters[0].input->height(), 2)
1149 + pow(scale_clusters[0].input->width(), 2))
1150 * 180
1151 / M_PI;
1154 * Barrel distortion adjustment value
1157 ale_pos adj_b = perturb * bda_mult;
1159 trans_single old_offset = here.get_offset();
1161 here.perturb_test(perturb, adj_p, adj_o, adj_b, current_bd, modified_bd,
1162 stable_count);
1164 if (here.get_offset() == old_offset)
1165 stable_count++;
1166 else
1167 stable_count = 0;
1169 if (stable_count == 3) {
1171 stable_count = 0;
1173 perturb *= 0.5;
1175 if (lod > 0
1176 && lod > lrint(log(perturb) / log(2)) - lod_preferred) {
1179 * Work with images twice as large
1182 lod--;
1183 si = scale_clusters[lod];
1186 * Rescale the transforms.
1189 ale_pos rescale_factor = (double) scale_factor
1190 / (double) pow(2, lod)
1191 / (double) here.get_offset().scale();
1193 here.rescale(rescale_factor, si);
1195 } else {
1196 adj_p = perturb / pow(2, lod);
1200 * Announce changes
1203 ui::get()->alignment_perturbation_level(perturb, lod);
1206 ui::get()->set_match(here.get_error());
1207 ui::get()->set_offset(here.get_offset());
1210 if (lod > 0) {
1211 ale_pos rescale_factor = (double) scale_factor
1212 / (double) here.get_offset().scale();
1214 here.rescale(rescale_factor, scale_clusters[0]);
1217 return here;
1221 * Align frame m against the reference.
1223 * XXX: the transformation class currently combines ordinary
1224 * transformations with scaling. This is somewhat convenient for
1225 * some things, but can also be confusing. This method, _align(), is
1226 * one case where special care must be taken to ensure that the scale
1227 * is always set correctly (by using the 'rescale' method).
1229 static diff_stat_multi _align(int m, int local_gs, astate_t *astate) {
1231 ale_image input_frame = astate->get_input_frame();
1234 * Local upper/lower data, possibly dependent on image
1235 * dimensions.
1238 ale_pos local_lower, local_upper;
1239 ale_accum local_gs_mo;
1242 * Select the minimum dimension as the reference.
1245 ale_pos reference_size = input_frame->height();
1246 if (input_frame->width() < reference_size)
1247 reference_size = input_frame->width();
1248 ale_accum reference_area = input_frame->height()
1249 * input_frame->width();
1251 if (perturb_lower_percent)
1252 local_lower = (double) perturb_lower
1253 * (double) reference_size
1254 * (double) 0.01
1255 * (double) scale_factor;
1256 else
1257 local_lower = perturb_lower;
1259 if (perturb_upper_percent)
1260 local_upper = (double) perturb_upper
1261 * (double) reference_size
1262 * (double) 0.01
1263 * (double) scale_factor;
1264 else
1265 local_upper = perturb_upper;
1267 local_upper = pow(2, floor(log(local_upper) / log(2)));
1269 if (gs_mo_percent)
1270 local_gs_mo = (double) _gs_mo
1271 * (double) reference_area
1272 * (double) 0.01
1273 * (double) scale_factor;
1274 else
1275 local_gs_mo = _gs_mo;
1278 * Logarithms aren't exact, so we divide repeatedly to discover
1279 * how many steps will occur, and pass this information to the
1280 * user interface.
1283 int step_count = 0;
1284 double step_variable = local_upper;
1285 while (step_variable >= local_lower) {
1286 step_variable /= 2;
1287 step_count++;
1290 ale_pos perturb = local_upper;
1292 if (_keep) {
1293 kept_t[latest] = latest_t;
1294 kept_ok[latest] = latest_ok;
1298 * Determine how many levels of detail should be prepared, by
1299 * calculating the initial (largest) value for the
1300 * level-of-detail variable.
1303 int lod = lrint(log(perturb) / log(2)) - lod_preferred;
1305 if (lod < 0)
1306 lod = 0;
1308 while (lod > 0 && (reference_image->width() < pow(2, lod) * min_dimension
1309 || reference_image->height() < pow(2, lod) * min_dimension))
1310 lod--;
1312 unsigned int steps = (unsigned int) lod + 1;
1315 * Prepare multiple levels of detail.
1318 int local_ax_count;
1319 struct scale_cluster *scale_clusters = init_clusters(m,
1320 scale_factor, input_frame, steps,
1321 &local_ax_count);
1323 #error add check for non-NULL return
1326 * Initialize the default initial transform
1329 astate->init_default();
1332 * Set the default transformation.
1335 transformation offset = astate->get_default();
1338 * Establish boundaries
1341 offset.set_current_bounds(reference_image);
1343 ui::get()->alignment_degree_max(offset.get_coordinate(offset.stack_depth() - 1).degree);
1345 if (offset.stack_depth() == 1) {
1346 ui::get()->set_steps(step_count, 0);
1347 } else {
1348 ui::get()->set_steps(offset.get_coordinate(offset.stack_depth() - 1).degree + 1, 1);
1352 * Load any file-specified transformations
1355 for (unsigned int index = 0; index < offset.stack_depth(); index++) {
1356 int is_default = 1;
1357 unsigned int index_2;
1358 offset.set_current_index(index);
1360 offset = tload_next(tload, alignment_class == 2,
1361 offset,
1362 &is_default, offset.get_current_index() == 0);
1364 index_2 = offset.get_current_index();
1366 if (index_2 > index) {
1367 for (unsigned int index_3 = index; index_3 < index_2; index_3++)
1368 astate->set_is_default(index_3, 1);
1370 index = index_2;
1373 astate->set_is_default(index, is_default);
1376 offset.set_current_index(0);
1378 astate->init_frame_alignment_primary(&offset, lod, perturb);
1381 * Control point alignment
1384 if (local_gs == 5) {
1386 transformation o = offset;
1389 * Determine centroid data
1392 point current, previous;
1393 centroids(m, &current, &previous);
1395 if (current.defined() && previous.defined()) {
1396 o = orig_t;
1397 o.set_dimensions(input_frame);
1398 o.translate((previous - current) * o.scale());
1399 current = previous;
1403 * Determine rotation for alignment classes other than translation.
1406 ale_pos lowest_error = cp_rms_error(m, o);
1408 ale_pos rot_lower = 2 * (double) local_lower
1409 / sqrt(pow(scale_clusters[0].input->height(), 2)
1410 + pow(scale_clusters[0].input->width(), 2))
1411 * 180
1412 / M_PI;
1414 if (alignment_class > 0)
1415 for (double rot = 30; rot > rot_lower; rot /= 2)
1416 for (double srot = -rot; srot < rot * 1.5; srot += rot * 2) {
1417 int is_improved = 1;
1418 while (is_improved) {
1419 is_improved = 0;
1420 transformation test_t = o;
1422 * XXX: is this right?
1424 test_t.rotate(current * o.scale(), srot);
1425 ale_pos test_v = cp_rms_error(m, test_t);
1427 if (test_v < lowest_error) {
1428 lowest_error = test_v;
1429 o = test_t;
1430 srot += 3 * rot;
1431 is_improved = 1;
1437 * Determine projective parameters through a local
1438 * minimum search.
1441 if (alignment_class == 2) {
1442 ale_pos adj_p = lowest_error;
1444 if (adj_p < local_lower)
1445 adj_p = local_lower;
1447 while (adj_p >= local_lower) {
1448 transformation test_t = o;
1449 int is_improved = 1;
1450 ale_pos test_v;
1451 ale_pos adj_s;
1453 while (is_improved) {
1454 is_improved = 0;
1456 for (int i = 0; i < 4; i++)
1457 for (int j = 0; j < 2; j++)
1458 for (adj_s = -adj_p; adj_s <= adj_p; adj_s += 2 * adj_p) {
1460 test_t = o;
1462 if (perturb_type == 0)
1463 test_t.gpt_modify(j, i, adj_s);
1464 else if (perturb_type == 1)
1465 test_t.gr_modify(j, i, adj_s);
1466 else
1467 assert(0);
1469 test_v = cp_rms_error(m, test_t);
1471 if (test_v < lowest_error) {
1472 lowest_error = test_v;
1473 o = test_t;
1474 adj_s += 3 * adj_p;
1475 is_improved = 1;
1479 adj_p /= 2;
1485 * Pre-alignment exposure adjustment
1488 if (_exp_register) {
1489 ui::get()->exposure_1();
1490 set_exposure_ratio(m, scale_clusters[0], offset, local_ax_count, 0);
1494 * Scale transform for lod
1497 for (int lod_ = 0; lod_ < lod; lod_++) {
1498 transformation s = offset;
1499 transformation t = offset;
1501 t.rescale(1 / (double) 2);
1503 if (!(t.scaled_height() > 0 && t.scaled_height() < s.scaled_height())
1504 || !(t.scaled_width() > 0 && t.scaled_width() < s.scaled_width())) {
1505 perturb /= pow(2, lod - lod_);
1506 lod = lod_;
1507 break;
1508 } else {
1509 offset = t;
1513 ui::get()->set_offset(offset);
1515 struct scale_cluster si = scale_clusters[lod];
1518 * Projective adjustment value
1521 ale_pos adj_p = perturb / pow(2, lod);
1524 * Orientational adjustment value in degrees.
1526 * Since rotational perturbation is now specified as an
1527 * arclength, we have to convert.
1530 ale_pos adj_o = (double) 2 * (double) perturb
1531 / sqrt(pow((double) scale_clusters[0].input->height(), (double) 2)
1532 + pow((double) scale_clusters[0].input->width(), (double) 2))
1533 * (double) 180
1534 / M_PI;
1537 * Barrel distortion adjustment value
1540 ale_pos adj_b = perturb * bda_mult;
1543 * Global search overlap requirements.
1546 local_gs_mo = (double) local_gs_mo / pow(pow(2, lod), 2);
1549 * Alignment statistics.
1552 diff_stat_t here(offset.elem_bounds());
1555 * Current difference (error) value
1558 ui::get()->prematching();
1559 here.diff(si, offset.get_current_element(), local_ax_count, m);
1560 ui::get()->set_match(here.get_error());
1563 * Current and modified barrel distortion parameters
1566 ale_pos current_bd[BARREL_DEGREE];
1567 ale_pos modified_bd[BARREL_DEGREE];
1568 offset.bd_get(current_bd);
1569 offset.bd_get(modified_bd);
1572 * Translational global search step
1575 if (perturb >= local_lower && local_gs != 0 && local_gs != 5
1576 && (local_gs != 6 || astate->get_is_default(0))) {
1578 ui::get()->global_alignment(perturb, lod);
1579 ui::get()->gs_mo(local_gs_mo);
1581 test_globals(&here, si, offset, local_gs, adj_p,
1582 local_ax_count, m, local_gs_mo, perturb);
1584 ui::get()->set_match(here.get_error());
1585 ui::get()->set_offset(here.get_offset());
1589 * Perturbation adjustment loop.
1592 offset.set_current_element(here.get_offset());
1594 for (unsigned int i = 0; i < offset.stack_depth(); i++) {
1596 ui::get()->aligning_element(i, offset.stack_depth());
1598 offset.set_current_index(i);
1600 ui::get()->start_multi_alignment_element(offset);
1602 ui::get()->set_offset(offset);
1604 if (i > 0) {
1605 astate->init_frame_alignment_nonprimary(&offset, lod, perturb, i);
1607 if (_exp_register == 1) {
1608 ui::get()->exposure_1();
1609 pixel_accum asum(0, 0, 0), bsum(0, 0, 0);
1610 exposure_ratio_iterate eri(&asum, &bsum, scale_clusters[0], offset, local_ax_count, 0,
1611 offset.elem_bounds().scale_to_bounds(scale_clusters[0].accum->height(),
1612 scale_clusters[0].accum->width()));
1614 eri.run();
1615 pixel_accum tr = asum / bsum;
1616 ui::get()->exp_multiplier(tr[0], tr[1], tr[2]);
1617 offset.set_tonal_multiplier(tr);
1621 int e_lod = lod;
1622 int e_div = offset.get_current_coordinate().degree;
1623 ale_pos e_perturb = perturb;
1624 ale_pos e_adj_p = adj_p;
1625 ale_pos e_adj_b = adj_b;
1627 for (int d = 0; d < e_div; d++) {
1628 e_adj_b = 0;
1629 e_perturb *= 0.5;
1630 if (e_lod > 0) {
1631 e_lod--;
1632 } else {
1633 e_adj_p *= 0.5;
1637 if (i > 0) {
1639 d2::trans_multi::elem_bounds_t b = offset.elem_bounds();
1641 for (int dim_satisfied = 0; e_lod > 0 && !dim_satisfied; ) {
1642 int height = scale_clusters[e_lod].accum->height();
1643 int width = scale_clusters[e_lod].accum->width();
1645 d2::trans_multi::elem_bounds_int_t bi = b.scale_to_bounds(height, width);
1647 dim_satisfied = bi.satisfies_min_dim(min_dimension);
1649 if (!dim_satisfied) {
1650 e_lod--;
1651 e_adj_p *= 2;
1656 * Scale transform for lod
1659 for (int lod_ = 0; lod_ < e_lod; lod_++) {
1660 trans_single s = offset.get_element(i);
1661 trans_single t = offset.get_element(i);
1663 t.rescale(1 / (double) 2);
1665 if (!(t.scaled_height() > 0 && t.scaled_height() < s.scaled_height())
1666 || !(t.scaled_width() > 0 && t.scaled_width() < s.scaled_width())) {
1667 e_perturb /= pow(2, e_lod - lod_);
1668 e_lod = lod_;
1669 break;
1670 } else {
1671 offset.set_element(i, t);
1675 ui::get()->set_offset(offset);
1679 * Announce perturbation size
1682 ui::get()->aligning(e_perturb, e_lod);
1684 si = scale_clusters[e_lod];
1686 here.set_elem_bounds(offset.elem_bounds());
1688 here.diff(si, offset.get_current_element(), local_ax_count, m);
1690 here.confirm();
1692 here = check_ancestor_path(offset, si, here, local_ax_count, m);
1694 here = _align_element(e_perturb, local_lower, scale_clusters,
1695 here, e_adj_p, adj_o, e_adj_b, current_bd, modified_bd,
1696 astate, e_lod, si);
1698 offset.rescale(here.get_offset().scale() / offset.scale());
1700 offset.set_current_element(here.get_offset());
1702 if (i > 0 && _exp_register == 1) {
1703 if (ma_cert_satisfied(scale_clusters[0], offset, i)) {
1704 ui::get()->exposure_2();
1705 pixel_accum asum(0, 0, 0), bsum(0, 0, 0);
1706 exposure_ratio_iterate eri(&asum, &bsum, scale_clusters[0], offset, local_ax_count, 1,
1707 offset.elem_bounds().scale_to_bounds(scale_clusters[0].accum->height(),
1708 scale_clusters[0].accum->width()));
1710 eri.run();
1711 pixel_accum tr = asum / bsum;
1712 ui::get()->exp_multiplier(tr[0], tr[1], tr[2]);
1713 offset.set_tonal_multiplier(tr);
1714 } else {
1715 offset.set_tonal_multiplier(offset.get_element(offset.parent_index(i)).get_tonal_multiplier(point(0, 0)));
1717 } else if (_exp_register == 1) {
1718 ui::get()->exposure_2();
1719 set_exposure_ratio(m, scale_clusters[0], offset, local_ax_count, 1);
1722 ui::get()->set_offset(offset);
1724 if (i + 1 == offset.stack_depth())
1725 ui::get()->alignment_degree_complete(offset.get_coordinate(i).degree);
1726 else if (offset.get_coordinate(i).degree != offset.get_coordinate(i + 1).degree)
1727 ui::get()->alignment_degree_complete(offset.get_coordinate(i + 1).degree);
1730 offset.set_current_index(0);
1732 ui::get()->multi();
1733 offset.set_multi(reference_image, input_frame);
1736 * Recalculate error on whole frame.
1739 ui::get()->postmatching();
1740 diff_stat_generic<transformation> multi_here(offset.elem_bounds());
1741 multi_here.diff(scale_clusters[0], offset, local_ax_count, m);
1742 ui::get()->set_match(multi_here.get_error());
1745 * Free the level-of-detail structures
1748 final_clusters(scale_clusters, scale_factor, steps);
1751 * Ensure that the match meets the threshold.
1754 if (threshold_ok(multi_here.get_error())) {
1756 * Update alignment variables
1758 latest_ok = 1;
1759 astate->set_default(offset);
1760 astate->set_final(offset);
1761 ui::get()->alignment_match_ok();
1762 } else if (local_gs == 4) {
1765 * Align with outer starting points.
1769 * XXX: This probably isn't exactly the right thing to do,
1770 * since variables like old_initial_value have been overwritten.
1773 diff_stat_multi nested_result = _align(m, -1, astate);
1775 if (threshold_ok(nested_result.get_error())) {
1776 return nested_result;
1777 } else if (nested_result.get_error() < multi_here.get_error()) {
1778 multi_here = nested_result;
1781 if (is_fail_default)
1782 offset = astate->get_default();
1784 ui::get()->set_match(multi_here.get_error());
1785 ui::get()->alignment_no_match();
1787 } else if (local_gs == -1) {
1789 latest_ok = 0;
1790 latest_t = offset;
1791 return multi_here;
1793 } else {
1794 if (is_fail_default)
1795 offset = astate->get_default();
1796 latest_ok = 0;
1797 ui::get()->alignment_no_match();
1801 * Write the tonal registration multiplier as a comment.
1804 pixel trm = image_rw::exp(m).get_multiplier();
1805 tsave_trm(tsave, trm[0], trm[1], trm[2]);
1808 * Save the transformation information
1811 for (unsigned int index = 0; index < offset.stack_depth(); index++) {
1812 offset.set_current_index(index);
1814 tsave_next(tsave, offset, alignment_class == 2,
1815 offset.get_current_index() == 0);
1818 offset.set_current_index(0);
1821 * Update match statistics.
1824 match_sum += (1 - multi_here.get_error()) * (ale_accum) 100;
1825 match_count++;
1826 latest = m;
1827 latest_t = offset;
1829 return multi_here;
1832 int ale_align(ale_context ac, ale_image a, ale_image b, ale_trans start,
1833 ale_align_properties align_properties, ale_trans result) {
1834 #warning function unfinished.