d2/align, d2/image_rw: Revise migration-related messages to use pre-processor warnings.
[Ale.git] / d2 / align.h
blobc29360e174458c06e38b307f6ce1af329b00018e
1 // Copyright 2002, 2004, 2007 David Hilvert <dhilvert@auricle.dyndns.org>,
2 // <dhilvert@ugcs.caltech.edu>
4 /* This file is part of the Anti-Lamenessing Engine.
6 The Anti-Lamenessing Engine is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 The Anti-Lamenessing Engine is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with the Anti-Lamenessing Engine; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * align.h: Handle alignment of frames.
25 #ifndef __d2align_h__
26 #define __d2align_h__
28 #include "filter.h"
29 #include "transformation.h"
30 #include "image.h"
31 #include "point.h"
32 #include "render.h"
33 #include "tfile.h"
34 #include "image_rw.h"
36 class align {
37 private:
40 * Private data members
43 static ale_pos scale_factor;
46 * Original frame transformation
48 static transformation orig_t;
51 * Keep data older than latest
53 static int _keep;
54 static transformation *kept_t;
55 static int *kept_ok;
58 * Transformation file handlers
61 static tload_t *tload;
62 static tsave_t *tsave;
65 * Control point variables
68 static const point **cp_array;
69 static unsigned int cp_count;
72 * Reference rendering to align against
75 static render *reference;
76 static filter::scaled_filter *interpolant;
77 static ale_image reference_image;
80 * Per-pixel alignment weight map
83 static ale_image weight_map;
86 * Frequency-dependent alignment weights
89 static double horiz_freq_cut;
90 static double vert_freq_cut;
91 static double avg_freq_cut;
92 static const char *fw_output;
95 * Algorithmic alignment weighting
98 static const char *wmx_exec;
99 static const char *wmx_file;
100 static const char *wmx_defs;
103 * Non-certainty alignment weights
106 static ale_image alignment_weights;
109 * Latest transformation.
112 static transformation latest_t;
115 * Flag indicating whether the latest transformation
116 * resulted in a match.
119 static int latest_ok;
122 * Frame number most recently aligned.
125 static int latest;
128 * Exposure registration
130 * 0. Preserve the original exposure of images.
132 * 1. Match exposure between images.
134 * 2. Use only image metadata for registering exposure.
137 static int _exp_register;
140 * Alignment class.
142 * 0. Translation only. Only adjust the x and y position of images.
143 * Do not rotate input images or perform projective transformations.
145 * 1. Euclidean transformations only. Adjust the x and y position
146 * of images and the orientation of the image about the image center.
148 * 2. Perform general projective transformations. See the file gpt.h
149 * for more information about general projective transformations.
152 static int alignment_class;
155 * Default initial alignment type.
157 * 0. Identity transformation.
159 * 1. Most recently accepted frame's final transformation.
162 static int default_initial_alignment_type;
165 * Projective group behavior
167 * 0. Perturb in output coordinates.
169 * 1. Perturb in source coordinates
172 static int perturb_type;
175 * Alignment state
177 * This structure contains alignment state information. The change
178 * between the non-default old initial alignment and old final
179 * alignment is used to adjust the non-default current initial
180 * alignment. If either the old or new initial alignment is a default
181 * alignment, the old --follow semantics are preserved.
184 class astate_t {
185 transformation old_initial_alignment;
186 transformation old_final_alignment;
187 transformation default_initial_alignment;
188 int old_is_default;
189 std::vector<int> is_default;
190 const image *input_frame;
192 public:
193 astate_t() :
194 old_initial_alignment(transformation::eu_identity()),
195 old_final_alignment(transformation::eu_identity()),
196 default_initial_alignment(transformation::eu_identity()),
197 is_default(1) {
199 input_frame = NULL;
200 is_default[0] = 1;
201 old_is_default = 1;
204 const image *get_input_frame() const {
205 return input_frame;
208 void set_is_default(unsigned int index, int value) {
211 * Expand the array, if necessary.
213 if (index == is_default.size());
214 is_default.resize(index + 1);
216 assert (index < is_default.size());
217 is_default[index] = value;
220 int get_is_default(unsigned int index) {
221 assert (index < is_default.size());
222 return is_default[index];
225 transformation get_default() {
226 return default_initial_alignment;
229 void set_default(transformation t) {
230 default_initial_alignment = t;
233 void default_set_original_bounds(const image *i) {
234 default_initial_alignment.set_original_bounds(i);
237 void set_final(transformation t) {
238 old_final_alignment = t;
241 void set_input_frame(const image *i) {
242 input_frame = i;
246 * Implement new delta --follow semantics.
248 * If we have a transformation T such that
250 * prev_final == T(prev_init)
252 * Then we also have
254 * current_init_follow == T(current_init)
256 * We can calculate T as follows:
258 * T == prev_final(prev_init^-1)
260 * Where ^-1 is the inverse operator.
262 static trans_single follow(trans_single a, trans_single b, trans_single c) {
263 trans_single cc = c;
265 if (alignment_class == 0) {
267 * Translational transformations
270 ale_pos t0 = -a.eu_get(0) + b.eu_get(0);
271 ale_pos t1 = -a.eu_get(1) + b.eu_get(1);
273 cc.eu_modify(0, t0);
274 cc.eu_modify(1, t1);
276 } else if (alignment_class == 1) {
278 * Euclidean transformations
281 ale_pos t2 = -a.eu_get(2) + b.eu_get(2);
283 cc.eu_modify(2, t2);
285 point p( c.scaled_height()/2 + c.eu_get(0) - a.eu_get(0),
286 c.scaled_width()/2 + c.eu_get(1) - a.eu_get(1) );
288 p = b.transform_scaled(p);
290 cc.eu_modify(0, p[0] - c.scaled_height()/2 - c.eu_get(0));
291 cc.eu_modify(1, p[1] - c.scaled_width()/2 - c.eu_get(1));
293 } else if (alignment_class == 2) {
295 * Projective transformations
298 point p[4];
300 p[0] = b.transform_scaled(a
301 . scaled_inverse_transform(c.transform_scaled(point( 0 , 0 ))));
302 p[1] = b.transform_scaled(a
303 . scaled_inverse_transform(c.transform_scaled(point(c.scaled_height(), 0 ))));
304 p[2] = b.transform_scaled(a
305 . scaled_inverse_transform(c.transform_scaled(point(c.scaled_height(), c.scaled_width()))));
306 p[3] = b.transform_scaled(a
307 . scaled_inverse_transform(c.transform_scaled(point( 0 , c.scaled_width()))));
309 cc.gpt_set(p);
312 return cc;
316 * For multi-alignment following, we use the following approach, not
317 * guaranteed to work with large changes in scene or perspective, but
318 * which should be somewhat flexible:
320 * For
322 * t[][] calculated final alignments
323 * s[][] alignments as loaded from file
324 * previous frame n
325 * current frame n+1
326 * fundamental (primary) 0
327 * non-fundamental (non-primary) m!=0
328 * parent element m'
329 * follow(a, b, c) applying the (a, b) delta T=b(a^-1) to c
331 * following in the case of missing file data might be generated by
333 * t[n+1][0] = t[n][0]
334 * t[n+1][m!=0] = follow(t[n][m'], t[n+1][m'], t[n][m])
336 * cases with all noted file data present might be generated by
338 * t[n+1][0] = follow(s[n][0], t[n][0], s[n+1][0])
339 * t[n+1][m!=0] = follow(s[n+1][m'], t[n+1][m'], s[n+1][m])
341 * For non-following behavior, or where assigning the above is
342 * impossible, we assign the following default
344 * t[n+1][0] = Identity
345 * t[n+1][m!=0] = t[n+1][m']
348 void init_frame_alignment_primary(transformation *offset, int lod, ale_pos perturb) {
350 if (perturb > 0 && !old_is_default && !get_is_default(0)
351 && default_initial_alignment_type == 1) {
354 * Apply following logic for the primary element.
357 ui::get()->following();
359 trans_single new_offset = follow(old_initial_alignment.get_element(0),
360 old_final_alignment.get_element(0),
361 offset->get_element(0));
363 old_initial_alignment = *offset;
365 offset->set_element(0, new_offset);
367 ui::get()->set_offset(new_offset);
368 } else {
369 old_initial_alignment = *offset;
372 is_default.resize(old_initial_alignment.stack_depth());
375 void init_frame_alignment_nonprimary(transformation *offset,
376 int lod, ale_pos perturb, unsigned int index) {
378 assert (index > 0);
380 unsigned int parent_index = offset->parent_index(index);
382 if (perturb > 0
383 && !get_is_default(parent_index)
384 && !get_is_default(index)
385 && default_initial_alignment_type == 1) {
388 * Apply file-based following logic for the
389 * given element.
392 ui::get()->following();
394 trans_single new_offset = follow(old_initial_alignment.get_element(parent_index),
395 offset->get_element(parent_index),
396 offset->get_element(index));
398 old_initial_alignment.set_element(index, offset->get_element(index));
399 offset->set_element(index, new_offset);
401 ui::get()->set_offset(new_offset);
403 return;
406 offset->get_coordinate(parent_index);
409 if (perturb > 0
410 && old_final_alignment.exists(offset->get_coordinate(parent_index))
411 && old_final_alignment.exists(offset->get_current_coordinate())
412 && default_initial_alignment_type == 1) {
415 * Apply nonfile-based following logic for
416 * the given element.
419 ui::get()->following();
422 * XXX: Although it is different, the below
423 * should be equivalent to the comment
424 * description.
427 trans_single a = old_final_alignment.get_element(offset->get_coordinate(parent_index));
428 trans_single b = old_final_alignment.get_element(offset->get_current_coordinate());
429 trans_single c = offset->get_element(parent_index);
431 trans_single new_offset = follow(a, b, c);
433 offset->set_element(index, new_offset);
434 ui::get()->set_offset(new_offset);
436 return;
440 * Handle other cases.
443 if (get_is_default(index)) {
444 offset->set_element(index, offset->get_element(parent_index));
445 ui::get()->set_offset(offset->get_element(index));
449 void init_default() {
451 if (default_initial_alignment_type == 0) {
454 * Follow the transformation of the original frame,
455 * setting new image dimensions.
458 // astate->default_initial_alignment = orig_t;
459 default_initial_alignment.set_current_element(orig_t.get_element(0));
460 default_initial_alignment.set_dimensions(input_frame);
462 } else if (default_initial_alignment_type == 1)
465 * Follow previous transformation, setting new image
466 * dimensions.
469 default_initial_alignment.set_dimensions(input_frame);
471 else
472 assert(0);
474 old_is_default = get_is_default(0);
479 * Alignment for failed frames -- default or optimal?
481 * A frame that does not meet the match threshold can be assigned the
482 * best alignment found, or can be assigned its alignment default.
485 static int is_fail_default;
488 * Alignment code.
490 * 0. Align images with an error contribution from each color channel.
492 * 1. Align images with an error contribution only from the green channel.
493 * Other color channels do not affect alignment.
495 * 2. Align images using a summation of channels. May be useful when dealing
496 * with images that have high frequency color ripples due to color aliasing.
499 static int channel_alignment_type;
502 * Error metric exponent
505 static ale_real metric_exponent;
508 * Match threshold
511 static float match_threshold;
514 * Perturbation lower and upper bounds.
517 static ale_pos perturb_lower;
518 static int perturb_lower_percent;
519 static ale_pos perturb_upper;
520 static int perturb_upper_percent;
523 * Preferred level-of-detail scale factor is 2^lod_preferred/perturb.
526 static int lod_preferred;
529 * Minimum dimension for reduced LOD.
532 static int min_dimension;
535 * Maximum rotational perturbation
538 static ale_pos rot_max;
541 * Barrel distortion alignment multiplier
544 static ale_pos bda_mult;
547 * Barrel distortion maximum adjustment rate
550 static ale_pos bda_rate;
553 * Alignment match sum
556 static ale_accum match_sum;
559 * Alignment match count.
562 static int match_count;
565 * Monte Carlo parameter
568 static ale_pos _mc;
571 * Certainty weight flag
573 * 0. Don't use certainty weights for alignment.
575 * 1. Use certainty weights for alignment.
578 static int certainty_weights;
581 * Global search parameter
583 * 0. Local: Local search only.
584 * 1. Inner: Alignment reference image inner region
585 * 2. Outer: Alignment reference image outer region
586 * 3. All: Alignment reference image inner and outer regions.
587 * 4. Central: Inner if possible; else, best of inner and outer.
588 * 5. Points: Align by control points.
591 static int _gs;
594 * Minimum overlap for global searches
597 static ale_accum _gs_mo;
598 static int gs_mo_percent;
601 * Minimum certainty for multi-alignment element registration.
604 static ale_real _ma_cert;
607 * Exclusion regions
610 static exclusion *ax_parameters;
611 static int ax_count;
614 * Types for scale clusters.
617 struct nl_scale_cluster {
618 const image *accum_max;
619 const image *accum_min;
620 const image *certainty_max;
621 const image *certainty_min;
622 const image *aweight_max;
623 const image *aweight_min;
624 exclusion *ax_parameters;
626 ale_pos input_scale;
627 const image *input_certainty_max;
628 const image *input_certainty_min;
629 const image *input_max;
630 const image *input_min;
633 struct scale_cluster {
634 const image *accum;
635 const image *certainty;
636 const image *aweight;
637 exclusion *ax_parameters;
639 ale_pos input_scale;
640 const image *input_certainty;
641 const image *input;
643 nl_scale_cluster *nl_scale_clusters;
647 * Check for exclusion region coverage in the reference
648 * array.
650 static int ref_excluded(int i, int j, point offset, exclusion *params, int param_count) {
651 for (int idx = 0; idx < param_count; idx++)
652 if (params[idx].type == exclusion::RENDER
653 && i + offset[0] >= params[idx].x[0]
654 && i + offset[0] <= params[idx].x[1]
655 && j + offset[1] >= params[idx].x[2]
656 && j + offset[1] <= params[idx].x[3])
657 return 1;
659 return 0;
663 * Check for exclusion region coverage in the input
664 * array.
666 static int input_excluded(ale_pos ti, ale_pos tj, exclusion *params, int param_count) {
667 for (int idx = 0; idx < param_count; idx++)
668 if (params[idx].type == exclusion::FRAME
669 && ti >= params[idx].x[0]
670 && ti <= params[idx].x[1]
671 && tj >= params[idx].x[2]
672 && tj <= params[idx].x[3])
673 return 1;
675 return 0;
679 * Overlap function. Determines the number of pixels in areas where
680 * the arrays overlap. Uses the reference array's notion of pixel
681 * positions.
683 static unsigned int overlap(struct scale_cluster c, transformation t, int ax_count) {
684 assert (reference_image);
686 unsigned int result = 0;
688 point offset = c.accum->offset();
690 for (unsigned int i = 0; i < c.accum->height(); i++)
691 for (unsigned int j = 0; j < c.accum->width(); j++) {
693 if (ref_excluded(i, j, offset, c.ax_parameters, ax_count))
694 continue;
697 * Transform
700 struct point q;
702 q = (c.input_scale < 1.0 && interpolant == NULL)
703 ? t.scaled_inverse_transform(
704 point(i + offset[0], j + offset[1]))
705 : t.unscaled_inverse_transform(
706 point(i + offset[0], j + offset[1]));
708 ale_pos ti = q[0];
709 ale_pos tj = q[1];
712 * Check that the transformed coordinates are within
713 * the boundaries of array c.input, and check that the
714 * weight value in the accumulated array is nonzero,
715 * unless we know it is nonzero by virtue of the fact
716 * that it falls within the region of the original
717 * frame (e.g. when we're not increasing image
718 * extents). Also check for frame exclusion.
721 if (input_excluded(ti, tj, c.ax_parameters, ax_count))
722 continue;
724 if (ti >= 0
725 && ti <= c.input->height() - 1
726 && tj >= 0
727 && tj <= c.input->width() - 1
728 && c.certainty->get_pixel(i, j)[0] != 0)
729 result++;
732 return result;
736 * Monte carlo iteration class.
738 * Monte Carlo alignment has been used for statistical comparisons in
739 * spatial registration, and is now used for tonal registration
740 * and final match calculation.
744 * We use a random process for which the expected number of sampled
745 * pixels is +/- .000003 from the coverage in the range [.005,.995] for
746 * an image with 100,000 pixels. (The actual number may still deviate
747 * from the expected number by more than this amount, however.) The
748 * method is as follows:
750 * We have coverage == USE/ALL, or (expected # pixels to use)/(# total
751 * pixels). We derive from this SKIP/USE.
753 * SKIP/USE == (SKIP/ALL)/(USE/ALL) == (1 - (USE/ALL))/(USE/ALL)
755 * Once we have SKIP/USE, we know the expected number of pixels to skip
756 * in each iteration. We use a random selection process that provides
757 * SKIP/USE close to this calculated value.
759 * If we can draw uniformly to select the number of pixels to skip, we
760 * do. In this case, the maximum number of pixels to skip is twice the
761 * expected number.
763 * If we cannot draw uniformly, we still assign equal probability to
764 * each of the integer values in the interval [0, 2 * (SKIP/USE)], but
765 * assign an unequal amount to the integer value ceil(2 * SKIP/USE) +
766 * 1.
770 * When reseeding the random number generator, we want the same set of
771 * pixels to be used in cases where two alignment options are compared.
772 * If we wanted to avoid bias from repeatedly utilizing the same seed,
773 * we could seed with the number of the frame most recently aligned:
775 * srand(latest);
777 * However, in cursory tests, it seems okay to just use the default
778 * seed of 1, and so we do this, since it is simpler; both of these
779 * approaches to reseeding achieve better results than not reseeding.
780 * (1 is the default seed according to the GNU Manual Page for
781 * rand(3).)
783 * For subdomain calculations, we vary the seed by adding the subdomain
784 * index.
787 class mc_iterate {
788 ale_pos mc_max;
789 unsigned int index;
790 unsigned int index_max;
791 int i_min;
792 int i_max;
793 int j_min;
794 int j_max;
796 rng_t rng;
798 public:
799 mc_iterate(int _i_min, int _i_max, int _j_min, int _j_max, unsigned int subdomain)
800 : rng() {
802 ale_pos coverage;
804 i_min = _i_min;
805 i_max = _i_max;
806 j_min = _j_min;
807 j_max = _j_max;
809 if (i_max < i_min || j_max < j_min)
810 index_max = 0;
811 else
812 index_max = (i_max - i_min) * (j_max - j_min);
814 if (index_max < 500 || _mc > 100 || _mc <= 0)
815 coverage = 1;
816 else
817 coverage = _mc / 100;
819 double su = (1 - coverage) / coverage;
821 mc_max = (floor(2*su) * (1 + floor(2*su)) + 2*su)
822 / (2 + 2 * floor(2*su) - 2*su);
824 rng.seed(1 + subdomain);
826 #define FIXED16 3
827 #if ALE_COORDINATES == FIXED16
829 * XXX: This calculation might not yield the correct
830 * expected value.
832 index = -1 + (int) ceil(((ale_pos) mc_max+1)
833 / (ale_pos) ( (1 + 0xffffff)
834 / (1 + (rng.get() & 0xffffff))));
835 #else
836 index = -1 + (int) ceil((ale_accum) (mc_max+1)
837 * ( (1 + ((ale_accum) (rng.get())) )
838 / (1 + ((ale_accum) RAND_MAX)) ));
839 #endif
840 #undef FIXED16
843 int get_i() {
844 return index / (j_max - j_min) + i_min;
847 int get_j() {
848 return index % (j_max - j_min) + j_min;
851 void operator++(int whats_this_for) {
852 #define FIXED16 3
853 #if ALE_COORDINATES == FIXED16
854 index += (int) ceil(((ale_pos) mc_max+1)
855 / (ale_pos) ( (1 + 0xffffff)
856 / (1 + (rng.get() & 0xffffff))));
857 #else
858 index += (int) ceil((ale_accum) (mc_max+1)
859 * ( (1 + ((ale_accum) (rng.get())) )
860 / (1 + ((ale_accum) RAND_MAX)) ));
861 #endif
862 #undef FIXED16
865 int done() {
866 return (index >= index_max);
871 * Not-quite-symmetric difference function. Determines the difference in areas
872 * where the arrays overlap. Uses the reference array's notion of pixel positions.
874 * For the purposes of determining the difference, this function divides each
875 * pixel value by the corresponding image's average pixel magnitude, unless we
876 * are:
878 * a) Extending the boundaries of the image, or
880 * b) following the previous frame's transform
882 * If we are doing monte-carlo pixel sampling for alignment, we
883 * typically sample a subset of available pixels; otherwise, we sample
884 * all pixels.
888 template<class diff_trans>
889 class diff_stat_generic {
891 transformation::elem_bounds_t elem_bounds;
893 struct run {
895 diff_trans offset;
897 ale_accum result;
898 ale_accum divisor;
900 point max, min;
901 ale_accum centroid[2], centroid_divisor;
902 ale_accum de_centroid[2], de_centroid_v, de_sum;
904 void init() {
906 result = 0;
907 divisor = 0;
909 min = point::posinf();
910 max = point::neginf();
912 centroid[0] = 0;
913 centroid[1] = 0;
914 centroid_divisor = 0;
916 de_centroid[0] = 0;
917 de_centroid[1] = 0;
919 de_centroid_v = 0;
921 de_sum = 0;
924 void init(diff_trans _offset) {
925 offset = _offset;
926 init();
930 * Required for STL sanity.
932 run() : offset(diff_trans::eu_identity()) {
933 init();
936 run(diff_trans _offset) : offset(_offset) {
937 init(_offset);
940 void add(const run &_run) {
941 result += _run.result;
942 divisor += _run.divisor;
944 for (int d = 0; d < 2; d++) {
945 if (min[d] > _run.min[d])
946 min[d] = _run.min[d];
947 if (max[d] < _run.max[d])
948 max[d] = _run.max[d];
949 centroid[d] += _run.centroid[d];
950 de_centroid[d] += _run.de_centroid[d];
953 centroid_divisor += _run.centroid_divisor;
954 de_centroid_v += _run.de_centroid_v;
955 de_sum += _run.de_sum;
958 run(const run &_run) : offset(_run.offset) {
961 * Initialize
963 init(_run.offset);
966 * Add
968 add(_run);
971 run &operator=(const run &_run) {
974 * Initialize
976 init(_run.offset);
979 * Add
981 add(_run);
983 return *this;
986 ~run() {
989 ale_accum get_error() const {
990 return pow(result / divisor, 1/(ale_accum) metric_exponent);
993 void sample(int f, scale_cluster c, int i, int j, point t, point u,
994 const run &comparison) {
996 pixel pa = c.accum->get_pixel(i, j);
998 ale_real this_result[2] = { 0, 0 };
999 ale_real this_divisor[2] = { 0, 0 };
1001 pixel p[2];
1002 pixel weight[2];
1003 weight[0] = pixel(1, 1, 1);
1004 weight[1] = pixel(1, 1, 1);
1006 pixel tm = offset.get_tonal_multiplier(point(i, j) + c.accum->offset());
1008 if (interpolant != NULL) {
1009 interpolant->filtered(i, j, &p[0], &weight[0], 1, f);
1011 if (weight[0].min_norm() > ale_real_weight_floor) {
1012 p[0] /= weight[0];
1013 } else {
1014 return;
1017 } else {
1018 p[0] = c.input->get_bl(t);
1021 p[0] *= tm;
1023 if (u.defined()) {
1024 p[1] = c.input->get_bl(u);
1025 p[1] *= tm;
1030 * Handle certainty.
1033 if (certainty_weights == 1) {
1036 * For speed, use arithmetic interpolation (get_bl(.))
1037 * instead of geometric (get_bl(., 1))
1040 weight[0] *= c.input_certainty->get_bl(t);
1041 if (u.defined())
1042 weight[1] *= c.input_certainty->get_bl(u);
1043 weight[0] *= c.certainty->get_pixel(i, j);
1044 weight[1] *= c.certainty->get_pixel(i, j);
1047 if (c.aweight != NULL) {
1048 weight[0] *= c.aweight->get_pixel(i, j);
1049 weight[1] *= c.aweight->get_pixel(i, j);
1053 * Update sampling area statistics
1056 if (min[0] > i)
1057 min[0] = i;
1058 if (min[1] > j)
1059 min[1] = j;
1060 if (max[0] < i)
1061 max[0] = i;
1062 if (max[1] < j)
1063 max[1] = j;
1065 centroid[0] += (weight[0][0] + weight[0][1] + weight[0][2]) * i;
1066 centroid[1] += (weight[0][0] + weight[0][1] + weight[0][2]) * j;
1067 centroid_divisor += (weight[0][0] + weight[0][1] + weight[0][2]);
1070 * Determine alignment type.
1073 for (int m = 0; m < (u.defined() ? 2 : 1); m++)
1074 if (channel_alignment_type == 0) {
1076 * Align based on all channels.
1080 for (int k = 0; k < 3; k++) {
1081 ale_real achan = pa[k];
1082 ale_real bchan = p[m][k];
1084 this_result[m] += weight[m][k] * pow(fabs(achan - bchan), metric_exponent);
1085 this_divisor[m] += weight[m][k] * pow(achan > bchan ? achan : bchan, metric_exponent);
1087 } else if (channel_alignment_type == 1) {
1089 * Align based on the green channel.
1092 ale_real achan = pa[1];
1093 ale_real bchan = p[m][1];
1095 this_result[m] = weight[m][1] * pow(fabs(achan - bchan), metric_exponent);
1096 this_divisor[m] = weight[m][1] * pow(achan > bchan ? achan : bchan, metric_exponent);
1097 } else if (channel_alignment_type == 2) {
1099 * Align based on the sum of all channels.
1102 ale_real asum = 0;
1103 ale_real bsum = 0;
1104 ale_real wsum = 0;
1106 for (int k = 0; k < 3; k++) {
1107 asum += pa[k];
1108 bsum += p[m][k];
1109 wsum += weight[m][k] / 3;
1112 this_result[m] = wsum * pow(fabs(asum - bsum), metric_exponent);
1113 this_divisor[m] = wsum * pow(asum > bsum ? asum : bsum, metric_exponent);
1116 if (u.defined()) {
1117 // ale_real de = fabs(this_result[0] / this_divisor[0]
1118 // - this_result[1] / this_divisor[1]);
1119 ale_real de = fabs(this_result[0] - this_result[1]);
1121 de_centroid[0] += de * (ale_real) i;
1122 de_centroid[1] += de * (ale_real) j;
1124 de_centroid_v += de * (ale_real) t.lengthto(u);
1126 de_sum += de;
1129 result += (this_result[0]);
1130 divisor += (this_divisor[0]);
1133 void rescale(ale_pos scale) {
1134 offset.rescale(scale);
1136 de_centroid[0] *= scale;
1137 de_centroid[1] *= scale;
1138 de_centroid_v *= scale;
1141 point get_centroid() {
1142 point result = point(centroid[0] / centroid_divisor, centroid[1] / centroid_divisor);
1144 assert (finite(centroid[0])
1145 && finite(centroid[1])
1146 && (result.defined() || centroid_divisor == 0));
1148 return result;
1151 point get_error_centroid() {
1152 point result = point(de_centroid[0] / de_sum, de_centroid[1] / de_sum);
1153 return result;
1157 ale_pos get_error_perturb() {
1158 ale_pos result = de_centroid_v / de_sum;
1160 return result;
1166 * When non-empty, runs.front() is best, runs.back() is
1167 * testing.
1170 std::vector<run> runs;
1173 * old_runs stores the latest available perturbation set for
1174 * each multi-alignment element.
1177 typedef int run_index;
1178 std::map<run_index, run> old_runs;
1180 static void *diff_subdomain(void *args);
1182 struct subdomain_args {
1183 struct scale_cluster c;
1184 std::vector<run> runs;
1185 int ax_count;
1186 int f;
1187 int i_min, i_max, j_min, j_max;
1188 int subdomain;
1191 struct scale_cluster si;
1192 int ax_count;
1193 int frame;
1195 std::vector<ale_pos> perturb_multipliers;
1197 public:
1198 void diff(struct scale_cluster c, const diff_trans &t,
1199 int _ax_count, int f) {
1201 if (runs.size() == 2)
1202 runs.pop_back();
1204 runs.push_back(run(t));
1206 si = c;
1207 ax_count = _ax_count;
1208 frame = f;
1210 ui::get()->d2_align_sample_start();
1212 if (interpolant != NULL) {
1215 * XXX: This has not been tested, and may be completely
1216 * wrong.
1219 transformation tt = transformation::eu_identity();
1220 tt.set_current_element(t);
1221 interpolant->set_parameters(tt, c.input, c.accum->offset());
1224 int N;
1225 #ifdef USE_PTHREAD
1226 N = thread::count();
1228 pthread_t *threads = (pthread_t *) malloc(sizeof(pthread_t) * N);
1229 pthread_attr_t *thread_attr = (pthread_attr_t *) malloc(sizeof(pthread_attr_t) * N);
1231 #else
1232 N = 1;
1233 #endif
1235 subdomain_args *args = new subdomain_args[N];
1237 transformation::elem_bounds_int_t b = elem_bounds.scale_to_bounds(c.accum->height(), c.accum->width());
1239 // fprintf(stdout, "[%d %d] [%d %d]\n",
1240 // global_i_min, global_i_max, global_j_min, global_j_max);
1242 for (int ti = 0; ti < N; ti++) {
1243 args[ti].c = c;
1244 args[ti].runs = runs;
1245 args[ti].ax_count = ax_count;
1246 args[ti].f = f;
1247 args[ti].i_min = b.imin + ((b.imax - b.imin) * ti) / N;
1248 args[ti].i_max = b.imin + ((b.imax - b.imin) * (ti + 1)) / N;
1249 args[ti].j_min = b.jmin;
1250 args[ti].j_max = b.jmax;
1251 args[ti].subdomain = ti;
1252 #ifdef USE_PTHREAD
1253 pthread_attr_init(&thread_attr[ti]);
1254 pthread_attr_setdetachstate(&thread_attr[ti], PTHREAD_CREATE_JOINABLE);
1255 pthread_create(&threads[ti], &thread_attr[ti], diff_subdomain, &args[ti]);
1256 #else
1257 diff_subdomain(&args[ti]);
1258 #endif
1261 for (int ti = 0; ti < N; ti++) {
1262 #ifdef USE_PTHREAD
1263 pthread_join(threads[ti], NULL);
1264 #endif
1265 runs.back().add(args[ti].runs.back());
1268 #ifdef USE_PTHREAD
1269 free(threads);
1270 free(thread_attr);
1271 #endif
1273 delete[] args;
1275 ui::get()->d2_align_sample_stop();
1279 private:
1280 void rediff() {
1281 std::vector<diff_trans> t_array;
1283 for (unsigned int r = 0; r < runs.size(); r++) {
1284 t_array.push_back(runs[r].offset);
1287 runs.clear();
1289 for (unsigned int r = 0; r < t_array.size(); r++)
1290 diff(si, t_array[r], ax_count, frame);
1294 public:
1295 int better() {
1296 assert(runs.size() >= 2);
1297 assert(runs[0].offset.scale() == runs[1].offset.scale());
1299 return (runs[1].get_error() < runs[0].get_error()
1300 || (!finite(runs[0].get_error()) && finite(runs[1].get_error())));
1303 int better_defined() {
1304 assert(runs.size() >= 2);
1305 assert(runs[0].offset.scale() == runs[1].offset.scale());
1307 return (runs[1].get_error() < runs[0].get_error());
1310 diff_stat_generic(transformation::elem_bounds_t e)
1311 : runs(), old_runs(), perturb_multipliers() {
1312 elem_bounds = e;
1315 run_index get_run_index(unsigned int perturb_index) {
1316 return perturb_index;
1319 run &get_run(unsigned int perturb_index) {
1320 run_index index = get_run_index(perturb_index);
1322 assert(old_runs.count(index));
1323 return old_runs[index];
1326 void rescale(ale_pos scale, scale_cluster _si) {
1327 assert(runs.size() == 1);
1329 si = _si;
1331 runs[0].rescale(scale);
1333 rediff();
1336 ~diff_stat_generic() {
1339 diff_stat_generic &operator=(const diff_stat_generic &dst) {
1341 * Copy run information.
1343 runs = dst.runs;
1344 old_runs = dst.old_runs;
1347 * Copy diff variables
1349 si = dst.si;
1350 ax_count = dst.ax_count;
1351 frame = dst.frame;
1352 perturb_multipliers = dst.perturb_multipliers;
1353 elem_bounds = dst.elem_bounds;
1355 return *this;
1358 diff_stat_generic(const diff_stat_generic &dst) : runs(), old_runs(),
1359 perturb_multipliers() {
1360 operator=(dst);
1363 void set_elem_bounds(transformation::elem_bounds_t e) {
1364 elem_bounds = e;
1367 ale_accum get_result() {
1368 assert(runs.size() == 1);
1369 return runs[0].result;
1372 ale_accum get_divisor() {
1373 assert(runs.size() == 1);
1374 return runs[0].divisor;
1377 diff_trans get_offset() {
1378 assert(runs.size() == 1);
1379 return runs[0].offset;
1382 int operator!=(diff_stat_generic &param) {
1383 return (get_error() != param.get_error());
1386 int operator==(diff_stat_generic &param) {
1387 return !(operator!=(param));
1390 ale_pos get_error_perturb() {
1391 assert(runs.size() == 1);
1392 return runs[0].get_error_perturb();
1395 ale_accum get_error() const {
1396 assert(runs.size() == 1);
1397 return runs[0].get_error();
1400 public:
1402 * Get the set of transformations produced by a given perturbation
1404 void get_perturb_set(std::vector<diff_trans> *set,
1405 ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
1406 ale_pos *current_bd, ale_pos *modified_bd,
1407 std::vector<ale_pos> multipliers = std::vector<ale_pos>()) {
1409 assert(runs.size() == 1);
1411 diff_trans test_t(diff_trans::eu_identity());
1414 * Translational or euclidean transformation
1417 for (unsigned int i = 0; i < 2; i++)
1418 for (unsigned int s = 0; s < 2; s++) {
1420 if (!multipliers.size())
1421 multipliers.push_back(1);
1423 assert(finite(multipliers[0]));
1425 test_t = get_offset();
1427 // test_t.eu_modify(i, (s ? -adj_p : adj_p) * multipliers[0]);
1428 test_t.translate((i ? point(1, 0) : point(0, 1))
1429 * (s ? -adj_p : adj_p)
1430 * multipliers[0]);
1432 test_t.snap(adj_p / 2);
1434 set->push_back(test_t);
1435 multipliers.erase(multipliers.begin());
1439 if (alignment_class > 0)
1440 for (unsigned int s = 0; s < 2; s++) {
1442 if (!multipliers.size())
1443 multipliers.push_back(1);
1445 assert(multipliers.size());
1446 assert(finite(multipliers[0]));
1448 if (!(adj_o * multipliers[0] < rot_max))
1449 return;
1451 ale_pos adj_s = (s ? 1 : -1) * adj_o * multipliers[0];
1453 test_t = get_offset();
1455 run_index ori = get_run_index(set->size());
1456 point centroid = point::undefined();
1458 if (!old_runs.count(ori))
1459 ori = get_run_index(0);
1461 if (!centroid.finite() && old_runs.count(ori)) {
1462 centroid = old_runs[ori].get_error_centroid();
1464 if (!centroid.finite())
1465 centroid = old_runs[ori].get_centroid();
1467 centroid *= test_t.scale()
1468 / old_runs[ori].offset.scale();
1471 if (!centroid.finite() && !test_t.is_projective()) {
1472 test_t.eu_modify(2, adj_s);
1473 } else if (!centroid.finite()) {
1474 centroid = point(si.input->height() / 2,
1475 si.input->width() / 2);
1477 test_t.rotate(centroid + si.accum->offset(),
1478 adj_s);
1479 } else {
1480 test_t.rotate(centroid + si.accum->offset(),
1481 adj_s);
1484 test_t.snap(adj_p / 2);
1486 set->push_back(test_t);
1487 multipliers.erase(multipliers.begin());
1490 if (alignment_class == 2) {
1493 * Projective transformation
1496 for (unsigned int i = 0; i < 4; i++)
1497 for (unsigned int j = 0; j < 2; j++)
1498 for (unsigned int s = 0; s < 2; s++) {
1500 if (!multipliers.size())
1501 multipliers.push_back(1);
1503 assert(multipliers.size());
1504 assert(finite(multipliers[0]));
1506 ale_pos adj_s = (s ? -1 : 1) * adj_p * multipliers [0];
1508 test_t = get_offset();
1510 if (perturb_type == 0)
1511 test_t.gpt_modify_bounded(j, i, adj_s, elem_bounds.scale_to_bounds(si.accum->height(), si.accum->width()));
1512 else if (perturb_type == 1)
1513 test_t.gr_modify(j, i, adj_s);
1514 else
1515 assert(0);
1517 test_t.snap(adj_p / 2);
1519 set->push_back(test_t);
1520 multipliers.erase(multipliers.begin());
1526 * Barrel distortion
1529 if (bda_mult != 0 && adj_b != 0) {
1531 for (unsigned int d = 0; d < get_offset().bd_count(); d++)
1532 for (unsigned int s = 0; s < 2; s++) {
1534 if (!multipliers.size())
1535 multipliers.push_back(1);
1537 assert (multipliers.size());
1538 assert (finite(multipliers[0]));
1540 ale_pos adj_s = (s ? -1 : 1) * adj_b * multipliers[0];
1542 if (bda_rate > 0 && fabs(modified_bd[d] + adj_s - current_bd[d]) > bda_rate)
1543 continue;
1545 diff_trans test_t = get_offset();
1547 test_t.bd_modify(d, adj_s);
1549 set->push_back(test_t);
1554 void confirm() {
1555 assert(runs.size() == 2);
1556 runs[0] = runs[1];
1557 runs.pop_back();
1560 void discard() {
1561 assert(runs.size() == 2);
1562 runs.pop_back();
1565 void perturb_test(ale_pos perturb, ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
1566 ale_pos *current_bd, ale_pos *modified_bd, int stable) {
1568 assert(runs.size() == 1);
1570 std::vector<diff_trans> t_set;
1572 if (perturb_multipliers.size() == 0) {
1573 get_perturb_set(&t_set, adj_p, adj_o, adj_b,
1574 current_bd, modified_bd);
1576 for (unsigned int i = 0; i < t_set.size(); i++) {
1577 diff_stat_generic test = *this;
1579 test.diff(si, t_set[i], ax_count, frame);
1581 test.confirm();
1583 if (finite(test.get_error_perturb()))
1584 perturb_multipliers.push_back(adj_p / test.get_error_perturb());
1585 else
1586 perturb_multipliers.push_back(1);
1590 t_set.clear();
1593 get_perturb_set(&t_set, adj_p, adj_o, adj_b, current_bd, modified_bd,
1594 perturb_multipliers);
1596 int found_unreliable = 1;
1597 std::vector<int> tested(t_set.size(), 0);
1599 for (unsigned int i = 0; i < t_set.size(); i++) {
1600 run_index ori = get_run_index(i);
1603 * Check for stability
1605 if (stable
1606 && old_runs.count(ori)
1607 && old_runs[ori].offset == t_set[i])
1608 tested[i] = 1;
1611 std::vector<ale_pos> perturb_multipliers_original = perturb_multipliers;
1613 while (found_unreliable) {
1615 found_unreliable = 0;
1617 for (unsigned int i = 0; i < t_set.size(); i++) {
1619 if (tested[i])
1620 continue;
1622 diff(si, t_set[i], ax_count, frame);
1624 if (!(i < perturb_multipliers.size())
1625 || !finite(perturb_multipliers[i])) {
1627 perturb_multipliers.resize(i + 1);
1629 if (finite(perturb_multipliers[i])
1630 && finite(adj_p)
1631 && finite(adj_p / runs[1].get_error_perturb())) {
1632 perturb_multipliers[i] =
1633 adj_p / runs[1].get_error_perturb();
1635 found_unreliable = 1;
1636 } else
1637 perturb_multipliers[i] = 1;
1639 continue;
1642 run_index ori = get_run_index(i);
1644 if (old_runs.count(ori) == 0)
1645 old_runs.insert(std::pair<run_index, run>(ori, runs[1]));
1646 else
1647 old_runs[ori] = runs[1];
1649 if (finite(perturb_multipliers_original[i])
1650 && finite(runs[1].get_error_perturb())
1651 && finite(adj_p)
1652 && finite(perturb_multipliers_original[i] * adj_p / runs[1].get_error_perturb()))
1653 perturb_multipliers[i] = perturb_multipliers_original[i]
1654 * adj_p / runs[1].get_error_perturb();
1655 else
1656 perturb_multipliers[i] = 1;
1658 tested[i] = 1;
1660 if (better()
1661 && runs[1].get_error() < runs[0].get_error()
1662 && perturb_multipliers[i]
1663 / perturb_multipliers_original[i] < 2) {
1664 runs[0] = runs[1];
1665 runs.pop_back();
1666 return;
1671 if (runs.size() > 1)
1672 runs.pop_back();
1674 if (!found_unreliable)
1675 return;
1681 typedef diff_stat_generic<trans_single> diff_stat_t;
1682 typedef diff_stat_generic<trans_multi> diff_stat_multi;
1686 * Adjust exposure for an aligned frame B against reference A.
1688 * Expects full-LOD images.
1690 * Note: This method does not use any weighting, by certainty or
1691 * otherwise, in the first exposure registration pass, as any bias of
1692 * weighting according to color may also bias the exposure registration
1693 * result; it does use weighting, including weighting by certainty
1694 * (even if certainty weighting is not specified), in the second pass,
1695 * under the assumption that weighting by certainty improves handling
1696 * of out-of-range highlights, and that bias of exposure measurements
1697 * according to color may generally be less harmful after spatial
1698 * registration has been performed.
1700 class exposure_ratio_iterate : public thread::decompose_domain {
1701 pixel_accum *asums;
1702 pixel_accum *bsums;
1703 pixel_accum *asum;
1704 pixel_accum *bsum;
1705 struct scale_cluster c;
1706 const transformation &t;
1707 int ax_count;
1708 int pass_number;
1709 protected:
1710 void prepare_subdomains(unsigned int N) {
1711 asums = new pixel_accum[N];
1712 bsums = new pixel_accum[N];
1714 void subdomain_algorithm(unsigned int thread,
1715 int i_min, int i_max, int j_min, int j_max) {
1717 point offset = c.accum->offset();
1719 for (mc_iterate m(i_min, i_max, j_min, j_max, thread); !m.done(); m++) {
1721 unsigned int i = (unsigned int) m.get_i();
1722 unsigned int j = (unsigned int) m.get_j();
1724 if (ref_excluded(i, j, offset, c.ax_parameters, ax_count))
1725 continue;
1728 * Transform
1731 struct point q;
1733 q = (c.input_scale < 1.0 && interpolant == NULL)
1734 ? t.scaled_inverse_transform(
1735 point(i + offset[0], j + offset[1]))
1736 : t.unscaled_inverse_transform(
1737 point(i + offset[0], j + offset[1]));
1740 * Check that the transformed coordinates are within
1741 * the boundaries of array c.input, that they are not
1742 * subject to exclusion, and that the weight value in
1743 * the accumulated array is nonzero.
1746 if (input_excluded(q[0], q[1], c.ax_parameters, ax_count))
1747 continue;
1749 if (q[0] >= 0
1750 && q[0] <= c.input->height() - 1
1751 && q[1] >= 0
1752 && q[1] <= c.input->width() - 1
1753 && ((pixel) c.certainty->get_pixel(i, j)).minabs_norm() != 0) {
1754 pixel a = c.accum->get_pixel(i, j);
1755 pixel b;
1757 b = c.input->get_bl(q);
1759 pixel weight = ((c.aweight && pass_number)
1760 ? (pixel) c.aweight->get_pixel(i, j)
1761 : pixel(1, 1, 1))
1762 * (pass_number
1763 ? psqrt(c.certainty->get_pixel(i, j)
1764 * c.input_certainty->get_bl(q, 1))
1765 : pixel(1, 1, 1));
1767 asums[thread] += a * weight;
1768 bsums[thread] += b * weight;
1773 void finish_subdomains(unsigned int N) {
1774 for (unsigned int n = 0; n < N; n++) {
1775 *asum += asums[n];
1776 *bsum += bsums[n];
1778 delete[] asums;
1779 delete[] bsums;
1781 public:
1782 exposure_ratio_iterate(pixel_accum *_asum,
1783 pixel_accum *_bsum,
1784 struct scale_cluster _c,
1785 const transformation &_t,
1786 int _ax_count,
1787 int _pass_number) : decompose_domain(0, _c.accum->height(),
1788 0, _c.accum->width()),
1789 t(_t) {
1791 asum = _asum;
1792 bsum = _bsum;
1793 c = _c;
1794 ax_count = _ax_count;
1795 pass_number = _pass_number;
1798 exposure_ratio_iterate(pixel_accum *_asum,
1799 pixel_accum *_bsum,
1800 struct scale_cluster _c,
1801 const transformation &_t,
1802 int _ax_count,
1803 int _pass_number,
1804 transformation::elem_bounds_int_t b) : decompose_domain(b.imin, b.imax,
1805 b.jmin, b.jmax),
1806 t(_t) {
1808 asum = _asum;
1809 bsum = _bsum;
1810 c = _c;
1811 ax_count = _ax_count;
1812 pass_number = _pass_number;
1816 static void set_exposure_ratio(unsigned int m, struct scale_cluster c,
1817 const transformation &t, int ax_count, int pass_number) {
1819 if (_exp_register == 2) {
1821 * Use metadata only.
1823 ale_real gain_multiplier = image_rw::exp(m).get_gain_multiplier();
1824 pixel multiplier = pixel(gain_multiplier, gain_multiplier, gain_multiplier);
1826 image_rw::exp(m).set_multiplier(multiplier);
1827 ui::get()->exp_multiplier(multiplier[0],
1828 multiplier[1],
1829 multiplier[2]);
1831 return;
1834 pixel_accum asum(0, 0, 0), bsum(0, 0, 0);
1836 exposure_ratio_iterate eri(&asum, &bsum, c, t, ax_count, pass_number);
1837 eri.run();
1839 // std::cerr << (asum / bsum) << " ";
1841 pixel_accum new_multiplier;
1843 new_multiplier = asum / bsum * image_rw::exp(m).get_multiplier();
1845 if (finite(new_multiplier[0])
1846 && finite(new_multiplier[1])
1847 && finite(new_multiplier[2])) {
1848 image_rw::exp(m).set_multiplier(new_multiplier);
1849 ui::get()->exp_multiplier(new_multiplier[0],
1850 new_multiplier[1],
1851 new_multiplier[2]);
1856 * Copy all ax parameters.
1858 static exclusion *copy_ax_parameters(int local_ax_count, exclusion *source) {
1860 exclusion *dest = (exclusion *) malloc(local_ax_count * sizeof(exclusion));
1862 assert (dest);
1864 if (!dest)
1865 ui::get()->memory_error("exclusion regions");
1867 for (int idx = 0; idx < local_ax_count; idx++)
1868 dest[idx] = source[idx];
1870 return dest;
1874 * Copy ax parameters according to frame.
1876 static exclusion *filter_ax_parameters(int frame, int *local_ax_count) {
1878 exclusion *dest = (exclusion *) malloc(ax_count * sizeof(exclusion));
1880 assert (dest);
1882 if (!dest)
1883 ui::get()->memory_error("exclusion regions");
1885 *local_ax_count = 0;
1887 for (int idx = 0; idx < ax_count; idx++) {
1888 if (ax_parameters[idx].x[4] > frame
1889 || ax_parameters[idx].x[5] < frame)
1890 continue;
1892 dest[*local_ax_count] = ax_parameters[idx];
1894 (*local_ax_count)++;
1897 return dest;
1900 static void scale_ax_parameters(int local_ax_count, exclusion *ax_parameters,
1901 ale_pos ref_scale, ale_pos input_scale) {
1902 for (int i = 0; i < local_ax_count; i++) {
1903 ale_pos scale = (ax_parameters[i].type == exclusion::RENDER)
1904 ? ref_scale
1905 : input_scale;
1907 for (int n = 0; n < 6; n++) {
1908 ax_parameters[i].x[n] = ax_parameters[i].x[n] * scale;
1914 * Prepare the next level of detail for ordinary images.
1916 static const image *prepare_lod(const image *current) {
1917 if (current == NULL)
1918 return NULL;
1920 return current->scale_by_half("prepare_lod");
1924 * Prepare the next level of detail for definition maps.
1926 static const image *prepare_lod_def(const image *current) {
1927 if (current == NULL)
1928 return NULL;
1930 return current->defined_scale_by_half("prepare_lod_def");
1934 * Initialize scale cluster data structures.
1937 static void init_nl_cluster(struct scale_cluster *sc) {
1941 * Destroy the first element in the scale cluster data structure.
1943 static void final_clusters(struct scale_cluster *scale_clusters, ale_pos scale_factor,
1944 unsigned int steps) {
1946 if (scale_clusters[0].input_scale < 1.0) {
1947 delete scale_clusters[0].input;
1950 delete scale_clusters[0].input_certainty;
1952 free((void *)scale_clusters[0].ax_parameters);
1954 for (unsigned int step = 1; step < steps; step++) {
1955 delete scale_clusters[step].accum;
1956 delete scale_clusters[step].certainty;
1957 delete scale_clusters[step].aweight;
1959 if (scale_clusters[step].input_scale < 1.0) {
1960 delete scale_clusters[step].input;
1961 delete scale_clusters[step].input_certainty;
1964 free((void *)scale_clusters[step].ax_parameters);
1967 free(scale_clusters);
1971 * Calculate the centroid of a control point for the set of frames
1972 * having index lower than m. Divide by any scaling of the output.
1974 static point unscaled_centroid(unsigned int m, unsigned int p) {
1975 assert(_keep);
1977 point point_sum(0, 0);
1978 ale_accum divisor = 0;
1980 for(unsigned int j = 0; j < m; j++) {
1981 point pp = cp_array[p][j];
1983 if (pp.defined()) {
1984 point_sum += kept_t[j].transform_unscaled(pp)
1985 / kept_t[j].scale();
1986 divisor += 1;
1990 if (divisor == 0)
1991 return point::undefined();
1993 return point_sum / divisor;
1997 * Calculate centroid of this frame, and of all previous frames,
1998 * from points common to both sets.
2000 static void centroids(unsigned int m, point *current, point *previous) {
2002 * Calculate the translation
2004 point other_centroid(0, 0);
2005 point this_centroid(0, 0);
2006 ale_pos divisor = 0;
2008 for (unsigned int i = 0; i < cp_count; i++) {
2009 point other_c = unscaled_centroid(m, i);
2010 point this_c = cp_array[i][m];
2012 if (!other_c.defined() || !this_c.defined())
2013 continue;
2015 other_centroid += other_c;
2016 this_centroid += this_c;
2017 divisor += 1;
2021 if (divisor == 0) {
2022 *current = point::undefined();
2023 *previous = point::undefined();
2024 return;
2027 *current = this_centroid / divisor;
2028 *previous = other_centroid / divisor;
2032 * Calculate the RMS error of control points for frame m, with
2033 * transformation t, against control points for earlier frames.
2035 static ale_pos cp_rms_error(unsigned int m, transformation t) {
2036 assert (_keep);
2038 ale_accum err = 0;
2039 ale_accum divisor = 0;
2041 for (unsigned int i = 0; i < cp_count; i++)
2042 for (unsigned int j = 0; j < m; j++) {
2043 const point *p = cp_array[i];
2044 point p_ref = kept_t[j].transform_unscaled(p[j]);
2045 point p_cur = t.transform_unscaled(p[m]);
2047 if (!p_ref.defined() || !p_cur.defined())
2048 continue;
2050 err += p_ref.lengthtosq(p_cur);
2051 divisor += 1;
2054 return (ale_pos) sqrt(err / divisor);
2058 static void test_global(diff_stat_t *here, scale_cluster si, transformation t,
2059 int local_ax_count, int m, ale_accum local_gs_mo, ale_pos perturb) {
2061 diff_stat_t test(*here);
2063 test.diff(si, t.get_current_element(), local_ax_count, m);
2065 unsigned int ovl = overlap(si, t, local_ax_count);
2067 if (ovl >= local_gs_mo && test.better()) {
2068 test.confirm();
2069 *here = test;
2070 ui::get()->set_match(here->get_error());
2071 ui::get()->set_offset(here->get_offset());
2072 } else {
2073 test.discard();
2079 * Get the set of global transformations for a given density
2081 static void test_globals(diff_stat_t *here,
2082 scale_cluster si, transformation t, int local_gs, ale_pos adj_p,
2083 int local_ax_count, int m, ale_accum local_gs_mo, ale_pos perturb) {
2085 transformation offset = t;
2087 point min, max;
2089 transformation offset_p = offset;
2091 if (!offset_p.is_projective())
2092 offset_p.eu_to_gpt();
2094 min = max = offset_p.gpt_get(0);
2095 for (int p_index = 1; p_index < 4; p_index++) {
2096 point p = offset_p.gpt_get(p_index);
2097 if (p[0] < min[0])
2098 min[0] = p[0];
2099 if (p[1] < min[1])
2100 min[1] = p[1];
2101 if (p[0] > max[0])
2102 max[0] = p[0];
2103 if (p[1] > max[1])
2104 max[1] = p[1];
2107 point inner_min_t = -min;
2108 point inner_max_t = -max + point(si.accum->height(), si.accum->width());
2109 point outer_min_t = -max + point(adj_p - 1, adj_p - 1);
2110 point outer_max_t = point(si.accum->height(), si.accum->width()) - point(adj_p, adj_p);
2112 if (local_gs == 1 || local_gs == 3 || local_gs == 4 || local_gs == 6) {
2115 * Inner
2118 for (ale_pos i = inner_min_t[0]; i <= inner_max_t[0]; i += adj_p)
2119 for (ale_pos j = inner_min_t[1]; j <= inner_max_t[1]; j += adj_p) {
2120 transformation test_t = offset;
2121 test_t.translate(point(i, j));
2122 test_global(here, si, test_t, local_ax_count, m, local_gs_mo, perturb);
2126 if (local_gs == 2 || local_gs == 3 || local_gs == -1 || local_gs == 6) {
2129 * Outer
2132 for (ale_pos i = outer_min_t[0]; i <= outer_max_t[0]; i += adj_p)
2133 for (ale_pos j = outer_min_t[1]; j < inner_min_t[1]; j += adj_p) {
2134 transformation test_t = offset;
2135 test_t.translate(point(i, j));
2136 test_global(here, si, test_t, local_ax_count, m, local_gs_mo, perturb);
2138 for (ale_pos i = outer_min_t[0]; i <= outer_max_t[0]; i += adj_p)
2139 for (ale_pos j = outer_max_t[1]; j > inner_max_t[1]; j -= adj_p) {
2140 transformation test_t = offset;
2141 test_t.translate(point(i, j));
2142 test_global(here, si, test_t, local_ax_count, m, local_gs_mo, perturb);
2144 for (ale_pos i = outer_min_t[0]; i < inner_min_t[0]; i += adj_p)
2145 for (ale_pos j = outer_min_t[1]; j <= outer_max_t[1]; j += adj_p) {
2146 transformation test_t = offset;
2147 test_t.translate(point(i, j));
2148 test_global(here, si, test_t, local_ax_count, m, local_gs_mo, perturb);
2150 for (ale_pos i = outer_max_t[0]; i > inner_max_t[0]; i -= adj_p)
2151 for (ale_pos j = outer_min_t[1]; j <= outer_max_t[1]; j += adj_p) {
2152 transformation test_t = offset;
2153 test_t.translate(point(i, j));
2154 test_global(here, si, test_t, local_ax_count, m, local_gs_mo, perturb);
2159 static void get_translational_set(std::vector<transformation> *set,
2160 transformation t, ale_pos adj_p) {
2162 ale_pos adj_s;
2164 transformation offset = t;
2165 transformation test_t(transformation::eu_identity());
2167 for (int i = 0; i < 2; i++)
2168 for (adj_s = -adj_p; adj_s <= adj_p; adj_s += 2 * adj_p) {
2170 test_t = offset;
2172 test_t.translate(i ? point(adj_s, 0) : point(0, adj_s));
2174 set->push_back(test_t);
2178 static int threshold_ok(ale_accum error) {
2179 if ((1 - error) * (ale_accum) 100 >= match_threshold)
2180 return 1;
2182 if (!(match_threshold >= 0))
2183 return 1;
2185 return 0;
2188 static diff_stat_t _align_element(ale_pos perturb, ale_pos local_lower,
2189 scale_cluster *scale_clusters, diff_stat_t here,
2190 ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
2191 ale_pos *current_bd, ale_pos *modified_bd,
2192 astate_t *astate, int lod, scale_cluster si) {
2195 * Run initial tests to get perturbation multipliers and error
2196 * centroids.
2199 std::vector<d2::trans_single> t_set;
2201 here.get_perturb_set(&t_set, adj_p, adj_o, adj_b, current_bd, modified_bd);
2203 int stable_count = 0;
2205 while (perturb >= local_lower) {
2207 ui::get()->alignment_dims(scale_clusters[lod].accum->height(), scale_clusters[lod].accum->width(),
2208 scale_clusters[lod].input->height(), scale_clusters[lod].input->width());
2211 * Orientational adjustment value in degrees.
2213 * Since rotational perturbation is now specified as an
2214 * arclength, we have to convert.
2217 ale_pos adj_o = 2 * (double) perturb
2218 / sqrt(pow(scale_clusters[0].input->height(), 2)
2219 + pow(scale_clusters[0].input->width(), 2))
2220 * 180
2221 / M_PI;
2224 * Barrel distortion adjustment value
2227 ale_pos adj_b = perturb * bda_mult;
2229 trans_single old_offset = here.get_offset();
2231 here.perturb_test(perturb, adj_p, adj_o, adj_b, current_bd, modified_bd,
2232 stable_count);
2234 if (here.get_offset() == old_offset)
2235 stable_count++;
2236 else
2237 stable_count = 0;
2239 if (stable_count == 3) {
2241 stable_count = 0;
2243 perturb *= 0.5;
2245 if (lod > 0
2246 && lod > lrint(log(perturb) / log(2)) - lod_preferred) {
2249 * Work with images twice as large
2252 lod--;
2253 si = scale_clusters[lod];
2256 * Rescale the transforms.
2259 ale_pos rescale_factor = (double) scale_factor
2260 / (double) pow(2, lod)
2261 / (double) here.get_offset().scale();
2263 here.rescale(rescale_factor, si);
2265 } else {
2266 adj_p = perturb / pow(2, lod);
2270 * Announce changes
2273 ui::get()->alignment_perturbation_level(perturb, lod);
2276 ui::get()->set_match(here.get_error());
2277 ui::get()->set_offset(here.get_offset());
2280 if (lod > 0) {
2281 ale_pos rescale_factor = (double) scale_factor
2282 / (double) here.get_offset().scale();
2284 here.rescale(rescale_factor, scale_clusters[0]);
2287 return here;
2291 * Check for satisfaction of the certainty threshold.
2293 static int ma_cert_satisfied(const scale_cluster &c, const transformation &t, unsigned int i) {
2294 transformation::elem_bounds_int_t b = t.elem_bounds().scale_to_bounds(c.accum->height(), c.accum->width());
2295 ale_accum sum[3] = {0, 0, 0};
2297 for (unsigned int ii = b.imin; ii < b.imax; ii++)
2298 for (unsigned int jj = b.jmin; jj < b.jmax; jj++) {
2299 pixel p = c.accum->get_pixel(ii, jj);
2300 sum[0] += p[0];
2301 sum[1] += p[1];
2302 sum[2] += p[2];
2305 unsigned int count = (b.jmax - b.jmin) * (b.imax - b.imin);
2307 for (int k = 0; k < 3; k++)
2308 if (sum[k] / count < _ma_cert)
2309 return 0;
2311 return 1;
2314 static diff_stat_t check_ancestor_path(const trans_multi &offset, const scale_cluster &si, diff_stat_t here, int local_ax_count, int frame) {
2316 if (offset.get_current_index() > 0)
2317 for (int i = offset.parent_index(offset.get_current_index()); i >= 0; i = (i > 0) ? (int) offset.parent_index(i) : -1) {
2318 trans_single t = offset.get_element(i);
2319 t.rescale(offset.get_current_element().scale());
2321 here.diff(si, t, local_ax_count, frame);
2323 if (here.better_defined())
2324 here.confirm();
2325 else
2326 here.discard();
2329 return here;
2332 #ifdef USE_FFTW
2334 * High-pass filter for frequency weights
2336 static void hipass(int rows, int cols, fftw_complex *inout) {
2337 for (int i = 0; i < rows * vert_freq_cut; i++)
2338 for (int j = 0; j < cols; j++)
2339 for (int k = 0; k < 2; k++)
2340 inout[i * cols + j][k] = 0;
2341 for (int i = 0; i < rows; i++)
2342 for (int j = 0; j < cols * horiz_freq_cut; j++)
2343 for (int k = 0; k < 2; k++)
2344 inout[i * cols + j][k] = 0;
2345 for (int i = 0; i < rows; i++)
2346 for (int j = 0; j < cols; j++)
2347 for (int k = 0; k < 2; k++)
2348 if (i / (double) rows + j / (double) cols < 2 * avg_freq_cut)
2349 inout[i * cols + j][k] = 0;
2351 #endif
2355 * Reset alignment weights
2357 static void reset_weights() {
2358 if (alignment_weights != NULL)
2359 ale_image_release(alignment_weights);
2361 alignment_weights = NULL;
2365 * Initialize alignment weights
2367 static void init_weights() {
2368 if (alignment_weights != NULL)
2369 return;
2371 alignment_weights = ale_new_image(accel::context(), ALE_IMAGE_RGB, ale_image_get_type(reference_image));
2373 assert (alignment_weights);
2375 assert (!ale_resize_image(alignment_weights, 0, 0, ale_image_get_width(reference_image), ale_image_get_height(reference_image)));
2377 ale_image_map_0(alignment_weights, "SET_PIXEL(p, PIXEL(1, 1, 1))");
2381 * Update alignment weights with weight map
2383 static void map_update() {
2385 if (weight_map == NULL)
2386 return;
2388 init_weights();
2390 #warning migrate to libale (e.g., ale_image_map_2)
2391 #if 0
2392 point map_offset = reference_image->offset() - weight_map->offset();
2394 int rows = reference_image->height();
2395 int cols = reference_image->width();
2397 for (int i = 0; i < rows; i++)
2398 for (int j = 0; j < cols; j++) {
2399 point map_weight_position = map_offset + point(i, j);
2400 if (map_weight_position[0] >= 0
2401 && map_weight_position[1] >= 0
2402 && map_weight_position[0] <= weight_map->height() - 1
2403 && map_weight_position[1] <= weight_map->width() - 1)
2404 alignment_weights->set_pixel(i, j,
2405 alignment_weights->get_pixel(i, j)
2406 * weight_map->get_bl(map_weight_position));
2408 #endif
2410 #warning current filtering might be inappropriate
2412 * XXX: this should perhaps use GET_PIXEL_BI_GEOMETRIC, or GET_PIXEL_BI_MINIMUM
2413 * (as, e.g., implemented in the deprecated image::get_bl() method).
2416 ale_image_map_2(alignment_weights, alignment_weights, weight_map, " \
2417 SET_PIXEL(p, GET_PIXEL(0, p) * GET_PIXEL_BL(1, p))");
2421 * Update alignment weights with algorithmic weights
2423 static void wmx_update() {
2424 #ifdef USE_UNIX
2426 static exposure *exp_def = new exposure_default();
2427 static exposure *exp_bool = new exposure_boolean();
2429 if (wmx_file == NULL || wmx_exec == NULL || wmx_defs == NULL)
2430 return;
2432 unsigned int rows = ale_image_get_height(reference_image);
2433 unsigned int cols = ale_image_get_width(reference_image);
2435 image_rw::write_image(wmx_file, reference_image);
2436 image_rw::write_image(wmx_defs, reference_defined, exp_bool);
2438 /* execute ... */
2439 int exit_status = 1;
2440 if (!fork()) {
2441 execlp(wmx_exec, wmx_exec, wmx_file, wmx_defs, NULL);
2442 ui::get()->exec_failure(wmx_exec, wmx_file, wmx_defs);
2445 wait(&exit_status);
2447 if (exit_status)
2448 ui::get()->fork_failure("d2::align");
2450 ale_image wmx_weights = image_rw::read_image(wmx_file, exp_def);
2452 if (ale_image_get_height(wmx_weights) != rows || ale_image_get_width(wmx_weights) != cols)
2453 ui::get()->error("algorithmic weighting must not change image size");
2455 if (alignment_weights == NULL)
2456 alignment_weights = wmx_weights;
2457 else
2458 for (unsigned int i = 0; i < rows; i++)
2459 for (unsigned int j = 0; j < cols; j++)
2460 alignment_weights->set_pixel(i, j,
2461 (pixel) alignment_weights->get_pixel(i, j)
2462 * (pixel) wmx_weights->get_pixel(i, j));
2463 #endif
2467 * Update alignment weights with frequency weights
2469 static void fw_update() {
2470 #ifdef USE_FFTW
2471 if (horiz_freq_cut == 0
2472 && vert_freq_cut == 0
2473 && avg_freq_cut == 0)
2474 return;
2477 * Required for correct operation of --fwshow
2480 assert (alignment_weights == NULL);
2482 int rows = reference_image->height();
2483 int cols = reference_image->width();
2484 int colors = reference_image->depth();
2486 alignment_weights = new_image_ale_real(rows, cols,
2487 colors, "alignment_weights");
2489 fftw_complex *inout = (fftw_complex *) fftw_malloc(sizeof(fftw_complex) * rows * cols);
2491 assert (inout);
2493 fftw_plan pf = fftw_plan_dft_2d(rows, cols,
2494 inout, inout,
2495 FFTW_FORWARD, FFTW_ESTIMATE);
2497 fftw_plan pb = fftw_plan_dft_2d(rows, cols,
2498 inout, inout,
2499 FFTW_BACKWARD, FFTW_ESTIMATE);
2501 for (int k = 0; k < colors; k++) {
2502 for (int i = 0; i < rows * cols; i++) {
2503 inout[i][0] = reference_image->get_pixel(i / cols, i % cols)[k];
2504 inout[i][1] = 0;
2507 fftw_execute(pf);
2508 hipass(rows, cols, inout);
2509 fftw_execute(pb);
2511 for (int i = 0; i < rows * cols; i++) {
2512 #if 0
2513 alignment_weights->pix(i / cols, i % cols)[k] = fabs(inout[i][0] / (rows * cols));
2514 #else
2515 alignment_weights->set_chan(i / cols, i % cols, k,
2516 sqrt(pow(inout[i][0] / (rows * cols), 2)
2517 + pow(inout[i][1] / (rows * cols), 2)));
2518 #endif
2522 fftw_destroy_plan(pf);
2523 fftw_destroy_plan(pb);
2524 fftw_free(inout);
2526 if (fw_output != NULL)
2527 image_rw::write_image(fw_output, alignment_weights);
2528 #endif
2532 * Update alignment to frame N.
2534 static void update_to(int n) {
2536 assert (n <= latest + 1);
2537 assert (n >= 0);
2539 static astate_t astate;
2541 ui::get()->set_frame_num(n);
2543 if (latest < 0) {
2546 * Handle the initial frame
2549 astate.set_input_frame(image_rw::open(n));
2551 const image *i = astate.get_input_frame();
2552 int is_default;
2553 transformation result = alignment_class == 2
2554 ? transformation::gpt_identity(i, scale_factor)
2555 : transformation::eu_identity(i, scale_factor);
2556 result = tload_first(tload, alignment_class == 2, result, &is_default);
2557 tsave_first(tsave, result, alignment_class == 2);
2559 if (_keep > 0) {
2560 kept_t = transformation::new_eu_identity_array(image_rw::count());
2561 kept_ok = (int *) malloc(image_rw::count()
2562 * sizeof(int));
2563 assert (kept_t);
2564 assert (kept_ok);
2566 if (!kept_t || !kept_ok)
2567 ui::get()->memory_error("alignment");
2569 kept_ok[0] = 1;
2570 kept_t[0] = result;
2573 latest = 0;
2574 latest_ok = 1;
2575 latest_t = result;
2577 astate.set_default(result);
2578 orig_t = result;
2580 image_rw::close(n);
2583 for (int i = latest + 1; i <= n; i++) {
2586 * Handle supplemental frames.
2589 assert (reference != NULL);
2591 ui::get()->set_arender_current();
2592 reference->sync(i - 1);
2593 ui::get()->clear_arender_current();
2594 reference_image = reference->get_image();
2595 reference_defined = reference->get_defined();
2597 if (i == 1)
2598 astate.default_set_original_bounds(reference_image);
2600 reset_weights();
2601 fw_update();
2602 wmx_update();
2603 map_update();
2605 assert (reference_image != NULL);
2606 assert (reference_defined != NULL);
2608 astate.set_input_frame(image_rw::open(i));
2610 _align(i, _gs, &astate);
2612 image_rw::close(n);
2616 public:
2619 * Set the control point count
2621 static void set_cp_count(unsigned int n) {
2622 assert (cp_array == NULL);
2624 cp_count = n;
2625 cp_array = (const point **) malloc(n * sizeof(const point *));
2629 * Set control points.
2631 static void set_cp(unsigned int i, const point *p) {
2632 cp_array[i] = p;
2636 * Register exposure
2638 static void exp_register() {
2639 _exp_register = 1;
2643 * Register exposure only based on metadata
2645 static void exp_meta_only() {
2646 _exp_register = 2;
2650 * Don't register exposure
2652 static void exp_noregister() {
2653 _exp_register = 0;
2657 * Set alignment class to translation only. Only adjust the x and y
2658 * position of images. Do not rotate input images or perform
2659 * projective transformations.
2661 static void class_translation() {
2662 alignment_class = 0;
2666 * Set alignment class to Euclidean transformations only. Adjust the x
2667 * and y position of images and the orientation of the image about the
2668 * image center.
2670 static void class_euclidean() {
2671 alignment_class = 1;
2675 * Set alignment class to perform general projective transformations.
2676 * See the file gpt.h for more information about general projective
2677 * transformations.
2679 static void class_projective() {
2680 alignment_class = 2;
2684 * Set the default initial alignment to the identity transformation.
2686 static void initial_default_identity() {
2687 default_initial_alignment_type = 0;
2691 * Set the default initial alignment to the most recently matched
2692 * frame's final transformation.
2694 static void initial_default_follow() {
2695 default_initial_alignment_type = 1;
2699 * Perturb output coordinates.
2701 static void perturb_output() {
2702 perturb_type = 0;
2706 * Perturb source coordinates.
2708 static void perturb_source() {
2709 perturb_type = 1;
2713 * Frames under threshold align optimally
2715 static void fail_optimal() {
2716 is_fail_default = 0;
2720 * Frames under threshold keep their default alignments.
2722 static void fail_default() {
2723 is_fail_default = 1;
2727 * Align images with an error contribution from each color channel.
2729 static void all() {
2730 channel_alignment_type = 0;
2734 * Align images with an error contribution only from the green channel.
2735 * Other color channels do not affect alignment.
2737 static void green() {
2738 channel_alignment_type = 1;
2742 * Align images using a summation of channels. May be useful when
2743 * dealing with images that have high frequency color ripples due to
2744 * color aliasing.
2746 static void sum() {
2747 channel_alignment_type = 2;
2751 * Error metric exponent
2754 static void set_metric_exponent(float me) {
2755 metric_exponent = me;
2759 * Match threshold
2762 static void set_match_threshold(float mt) {
2763 match_threshold = mt;
2767 * Perturbation lower and upper bounds.
2770 static void set_perturb_lower(ale_pos pl, int plp) {
2771 perturb_lower = pl;
2772 perturb_lower_percent = plp;
2775 static void set_perturb_upper(ale_pos pu, int pup) {
2776 perturb_upper = pu;
2777 perturb_upper_percent = pup;
2781 * Maximum rotational perturbation.
2784 static void set_rot_max(int rm) {
2787 * Obtain the largest power of two not larger than rm.
2790 rot_max = pow(2, floor(log(rm) / log(2)));
2794 * Barrel distortion adjustment multiplier
2797 static void set_bda_mult(ale_pos m) {
2798 bda_mult = m;
2802 * Barrel distortion maximum rate of change
2805 static void set_bda_rate(ale_pos m) {
2806 bda_rate = m;
2810 * Level-of-detail
2813 static void set_lod_preferred(int lm) {
2814 lod_preferred = lm;
2818 * Minimum dimension for reduced level-of-detail.
2821 static void set_min_dimension(int md) {
2822 min_dimension = md;
2826 * Set the scale factor
2828 static void set_scale(ale_pos s) {
2829 scale_factor = s;
2833 * Set reference rendering to align against
2835 static void set_reference(render *r) {
2836 reference = r;
2840 * Set the interpolant
2842 static void set_interpolant(filter::scaled_filter *f) {
2843 interpolant = f;
2847 * Set alignment weights image
2849 static void set_weight_map(const image *i) {
2850 weight_map = i;
2854 * Set frequency cuts
2856 static void set_frequency_cut(double h, double v, double a) {
2857 horiz_freq_cut = h;
2858 vert_freq_cut = v;
2859 avg_freq_cut = a;
2863 * Set algorithmic alignment weighting
2865 static void set_wmx(const char *e, const char *f, const char *d) {
2866 wmx_exec = e;
2867 wmx_file = f;
2868 wmx_defs = d;
2872 * Show frequency weights
2874 static void set_fl_show(const char *filename) {
2875 fw_output = filename;
2879 * Set transformation file loader.
2881 static void set_tload(tload_t *tl) {
2882 tload = tl;
2886 * Set transformation file saver.
2888 static void set_tsave(tsave_t *ts) {
2889 tsave = ts;
2893 * Get match statistics for frame N.
2895 static int match(int n) {
2896 update_to(n);
2898 if (n == latest)
2899 return latest_ok;
2900 else if (_keep)
2901 return kept_ok[n];
2902 else {
2903 assert(0);
2904 exit(1);
2909 * Message that old alignment data should be kept.
2911 static void keep() {
2912 assert (latest == -1);
2913 _keep = 1;
2917 * Get alignment for frame N.
2919 static transformation of(int n) {
2920 update_to(n);
2921 if (n == latest)
2922 return latest_t;
2923 else if (_keep)
2924 return kept_t[n];
2925 else {
2926 assert(0);
2927 exit(1);
2932 * Use Monte Carlo alignment sampling with argument N.
2934 static void mc(ale_pos n) {
2935 _mc = n;
2939 * Set the certainty-weighted flag.
2941 static void certainty_weighted(int flag) {
2942 certainty_weights = flag;
2946 * Set the global search type.
2948 static void gs(const char *type) {
2949 if (!strcmp(type, "local")) {
2950 _gs = 0;
2951 } else if (!strcmp(type, "inner")) {
2952 _gs = 1;
2953 } else if (!strcmp(type, "outer")) {
2954 _gs = 2;
2955 } else if (!strcmp(type, "all")) {
2956 _gs = 3;
2957 } else if (!strcmp(type, "central")) {
2958 _gs = 4;
2959 } else if (!strcmp(type, "defaults")) {
2960 _gs = 6;
2961 } else if (!strcmp(type, "points")) {
2962 _gs = 5;
2963 keep();
2964 } else {
2965 ui::get()->error("bad global search type");
2970 * Set the minimum overlap for global searching
2972 static void gs_mo(ale_pos value, int _gs_mo_percent) {
2973 _gs_mo = value;
2974 gs_mo_percent = _gs_mo_percent;
2978 * Set mutli-alignment certainty lower bound.
2980 static void set_ma_cert(ale_real value) {
2981 _ma_cert = value;
2985 * Set alignment exclusion regions
2987 static void set_exclusion(exclusion *_ax_parameters, int _ax_count) {
2988 ax_count = _ax_count;
2989 ax_parameters = _ax_parameters;
2993 * Get match summary statistics.
2995 static ale_accum match_summary() {
2996 return match_sum / (ale_accum) match_count;
3000 template<class diff_trans>
3001 void *d2::align::diff_stat_generic<diff_trans>::diff_subdomain(void *args) {
3003 subdomain_args *sargs = (subdomain_args *) args;
3005 struct scale_cluster c = sargs->c;
3006 std::vector<run> runs = sargs->runs;
3007 int ax_count = sargs->ax_count;
3008 int f = sargs->f;
3009 int i_min = sargs->i_min;
3010 int i_max = sargs->i_max;
3011 int j_min = sargs->j_min;
3012 int j_max = sargs->j_max;
3013 int subdomain = sargs->subdomain;
3015 assert (reference_image);
3017 point offset = c.accum->offset();
3019 for (mc_iterate m(i_min, i_max, j_min, j_max, subdomain); !m.done(); m++) {
3021 int i = m.get_i();
3022 int j = m.get_j();
3025 * Check reference frame definition.
3028 if (!((pixel) c.accum->get_pixel(i, j)).finite()
3029 || !(((pixel) c.certainty->get_pixel(i, j)).minabs_norm() > 0))
3030 continue;
3033 * Check for exclusion in render coordinates.
3036 if (ref_excluded(i, j, offset, c.ax_parameters, ax_count))
3037 continue;
3040 * Transform
3043 struct point q, r = point::undefined();
3045 q = (c.input_scale < 1.0 && interpolant == NULL)
3046 ? runs.back().offset.scaled_inverse_transform(
3047 point(i + offset[0], j + offset[1]))
3048 : runs.back().offset.unscaled_inverse_transform(
3049 point(i + offset[0], j + offset[1]));
3051 if (runs.size() == 2) {
3052 r = (c.input_scale < 1.0)
3053 ? runs.front().offset.scaled_inverse_transform(
3054 point(i + offset[0], j + offset[1]))
3055 : runs.front().offset.unscaled_inverse_transform(
3056 point(i + offset[0], j + offset[1]));
3059 ale_pos ti = q[0];
3060 ale_pos tj = q[1];
3063 * Check that the transformed coordinates are within
3064 * the boundaries of array c.input and that they
3065 * are not subject to exclusion.
3067 * Also, check that the weight value in the accumulated array
3068 * is nonzero, unless we know it is nonzero by virtue of the
3069 * fact that it falls within the region of the original frame
3070 * (e.g. when we're not increasing image extents).
3073 if (input_excluded(ti, tj, c.ax_parameters, ax_count))
3074 continue;
3076 if (input_excluded(r[0], r[1], c.ax_parameters, ax_count))
3077 r = point::undefined();
3080 * Check the boundaries of the input frame
3083 if (!(ti >= 0
3084 && ti <= c.input->height() - 1
3085 && tj >= 0
3086 && tj <= c.input->width() - 1))
3087 continue;
3089 if (!(r[0] >= 0
3090 && r[0] <= c.input->height() - 1
3091 && r[1] >= 0
3092 && r[1] <= c.input->width() - 1))
3093 r = point::undefined();
3095 sargs->runs.back().sample(f, c, i, j, q, r, runs.front());
3098 return NULL;
3101 #endif