(Fixed) Serious memory leak affecting all current versions.
[Ale.git] / d2 / align.h
blob0c454771e77bcdeb99807ea64d7c809a7978b480
1 // Copyright 2002, 2004, 2007 David Hilvert <dhilvert@auricle.dyndns.org>,
2 // <dhilvert@ugcs.caltech.edu>
4 /* This file is part of the Anti-Lamenessing Engine.
6 The Anti-Lamenessing Engine is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 The Anti-Lamenessing Engine is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with the Anti-Lamenessing Engine; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * align.h: Handle alignment of frames.
25 #ifndef __d2align_h__
26 #define __d2align_h__
28 #include "filter.h"
29 #include "transformation.h"
30 #include "image.h"
31 #include "point.h"
32 #include "render.h"
33 #include "tfile.h"
34 #include "image_rw.h"
36 class align {
37 private:
40 * Private data members
43 static ale_pos scale_factor;
46 * Original frame transformation
48 static transformation orig_t;
51 * Keep data older than latest
53 static int _keep;
54 static transformation *kept_t;
55 static int *kept_ok;
58 * Transformation file handlers
61 static tload_t *tload;
62 static tsave_t *tsave;
65 * Control point variables
68 static const point **cp_array;
69 static unsigned int cp_count;
72 * Reference rendering to align against
75 static render *reference;
76 static filter::scaled_filter *interpolant;
77 static const image *reference_image;
78 static const image *reference_defined;
81 * Per-pixel alignment weight map
84 static const image *weight_map;
87 * Frequency-dependent alignment weights
90 static double horiz_freq_cut;
91 static double vert_freq_cut;
92 static double avg_freq_cut;
93 static const char *fw_output;
96 * Algorithmic alignment weighting
99 static const char *wmx_exec;
100 static const char *wmx_file;
101 static const char *wmx_defs;
104 * Non-certainty alignment weights
107 static image *alignment_weights;
110 * Latest transformation.
113 static transformation latest_t;
116 * Flag indicating whether the latest transformation
117 * resulted in a match.
120 static int latest_ok;
123 * Frame number most recently aligned.
126 static int latest;
129 * Exposure registration
131 * 0. Preserve the original exposure of images.
133 * 1. Match exposure between images.
135 * 2. Use only image metadata for registering exposure.
138 static int _exp_register;
141 * Alignment class.
143 * 0. Translation only. Only adjust the x and y position of images.
144 * Do not rotate input images or perform projective transformations.
146 * 1. Euclidean transformations only. Adjust the x and y position
147 * of images and the orientation of the image about the image center.
149 * 2. Perform general projective transformations. See the file gpt.h
150 * for more information about general projective transformations.
153 static int alignment_class;
156 * Default initial alignment type.
158 * 0. Identity transformation.
160 * 1. Most recently accepted frame's final transformation.
163 static int default_initial_alignment_type;
166 * Projective group behavior
168 * 0. Perturb in output coordinates.
170 * 1. Perturb in source coordinates
173 static int perturb_type;
176 * Alignment state
178 * This structure contains alignment state information. The change
179 * between the non-default old initial alignment and old final
180 * alignment is used to adjust the non-default current initial
181 * alignment. If either the old or new initial alignment is a default
182 * alignment, the old --follow semantics are preserved.
185 class astate_t {
186 transformation old_initial_alignment;
187 transformation old_final_alignment;
188 transformation default_initial_alignment;
189 int old_is_default;
190 std::vector<int> is_default;
191 const image *input_frame;
193 public:
194 astate_t() :
195 old_initial_alignment(transformation::eu_identity()),
196 old_final_alignment(transformation::eu_identity()),
197 default_initial_alignment(transformation::eu_identity()),
198 is_default(1) {
200 input_frame = NULL;
201 is_default[0] = 1;
202 old_is_default = 1;
205 const image *get_input_frame() const {
206 return input_frame;
209 void set_is_default(unsigned int index, int value) {
212 * Expand the array, if necessary.
214 if (index == is_default.size());
215 is_default.resize(index + 1);
217 assert (index < is_default.size());
218 is_default[index] = value;
221 int get_is_default(unsigned int index) {
222 assert (index < is_default.size());
223 return is_default[index];
226 transformation get_default() {
227 return default_initial_alignment;
230 void set_default(transformation t) {
231 default_initial_alignment = t;
234 void default_set_original_bounds(const image *i) {
235 default_initial_alignment.set_original_bounds(i);
238 void set_final(transformation t) {
239 old_final_alignment = t;
242 void set_input_frame(const image *i) {
243 input_frame = i;
247 * Implement new delta --follow semantics.
249 * If we have a transformation T such that
251 * prev_final == T(prev_init)
253 * Then we also have
255 * current_init_follow == T(current_init)
257 * We can calculate T as follows:
259 * T == prev_final(prev_init^-1)
261 * Where ^-1 is the inverse operator.
263 static trans_single follow(trans_single a, trans_single b, trans_single c) {
264 trans_single cc = c;
266 if (alignment_class == 0) {
268 * Translational transformations
271 ale_pos t0 = -a.eu_get(0) + b.eu_get(0);
272 ale_pos t1 = -a.eu_get(1) + b.eu_get(1);
274 cc.eu_modify(0, t0);
275 cc.eu_modify(1, t1);
277 } else if (alignment_class == 1) {
279 * Euclidean transformations
282 ale_pos t2 = -a.eu_get(2) + b.eu_get(2);
284 cc.eu_modify(2, t2);
286 point p( c.scaled_height()/2 + c.eu_get(0) - a.eu_get(0),
287 c.scaled_width()/2 + c.eu_get(1) - a.eu_get(1) );
289 p = b.transform_scaled(p);
291 cc.eu_modify(0, p[0] - c.scaled_height()/2 - c.eu_get(0));
292 cc.eu_modify(1, p[1] - c.scaled_width()/2 - c.eu_get(1));
294 } else if (alignment_class == 2) {
296 * Projective transformations
299 point p[4];
301 p[0] = b.transform_scaled(a
302 . scaled_inverse_transform(c.transform_scaled(point( 0 , 0 ))));
303 p[1] = b.transform_scaled(a
304 . scaled_inverse_transform(c.transform_scaled(point(c.scaled_height(), 0 ))));
305 p[2] = b.transform_scaled(a
306 . scaled_inverse_transform(c.transform_scaled(point(c.scaled_height(), c.scaled_width()))));
307 p[3] = b.transform_scaled(a
308 . scaled_inverse_transform(c.transform_scaled(point( 0 , c.scaled_width()))));
310 cc.gpt_set(p);
313 return cc;
317 * For multi-alignment following, we use the following approach, not
318 * guaranteed to work with large changes in scene or perspective, but
319 * which should be somewhat flexible:
321 * For
323 * t[][] calculated final alignments
324 * s[][] alignments as loaded from file
325 * previous frame n
326 * current frame n+1
327 * fundamental (primary) 0
328 * non-fundamental (non-primary) m!=0
329 * parent element m'
330 * follow(a, b, c) applying the (a, b) delta T=b(a^-1) to c
332 * following in the case of missing file data might be generated by
334 * t[n+1][0] = t[n][0]
335 * t[n+1][m!=0] = follow(t[n][m'], t[n+1][m'], t[n][m])
337 * cases with all noted file data present might be generated by
339 * t[n+1][0] = follow(s[n][0], t[n][0], s[n+1][0])
340 * t[n+1][m!=0] = follow(s[n+1][m'], t[n+1][m'], s[n+1][m])
342 * For non-following behavior, or where assigning the above is
343 * impossible, we assign the following default
345 * t[n+1][0] = Identity
346 * t[n+1][m!=0] = t[n+1][m']
349 void init_frame_alignment_primary(transformation *offset, int lod, ale_pos perturb) {
351 if (perturb > 0 && !old_is_default && !get_is_default(0)
352 && default_initial_alignment_type == 1) {
355 * Apply following logic for the primary element.
358 ui::get()->following();
360 trans_single new_offset = follow(old_initial_alignment.get_element(0),
361 old_final_alignment.get_element(0),
362 offset->get_element(0));
364 old_initial_alignment = *offset;
366 offset->set_element(0, new_offset);
368 ui::get()->set_offset(new_offset);
369 } else {
370 old_initial_alignment = *offset;
373 is_default.resize(old_initial_alignment.stack_depth());
376 void init_frame_alignment_nonprimary(transformation *offset,
377 int lod, ale_pos perturb, unsigned int index) {
379 assert (index > 0);
381 unsigned int parent_index = offset->parent_index(index);
383 if (perturb > 0
384 && !get_is_default(parent_index)
385 && !get_is_default(index)
386 && default_initial_alignment_type == 1) {
389 * Apply file-based following logic for the
390 * given element.
393 ui::get()->following();
395 trans_single new_offset = follow(old_initial_alignment.get_element(parent_index),
396 offset->get_element(parent_index),
397 offset->get_element(index));
399 old_initial_alignment.set_element(index, offset->get_element(index));
400 offset->set_element(index, new_offset);
402 ui::get()->set_offset(new_offset);
404 return;
407 offset->get_coordinate(parent_index);
410 if (perturb > 0
411 && old_final_alignment.exists(offset->get_coordinate(parent_index))
412 && old_final_alignment.exists(offset->get_current_coordinate())
413 && default_initial_alignment_type == 1) {
416 * Apply nonfile-based following logic for
417 * the given element.
420 ui::get()->following();
423 * XXX: Although it is different, the below
424 * should be equivalent to the comment
425 * description.
428 trans_single a = old_final_alignment.get_element(offset->get_coordinate(parent_index));
429 trans_single b = old_final_alignment.get_element(offset->get_current_coordinate());
430 trans_single c = offset->get_element(parent_index);
432 trans_single new_offset = follow(a, b, c);
434 offset->set_element(index, new_offset);
435 ui::get()->set_offset(new_offset);
437 return;
441 * Handle other cases.
444 if (get_is_default(index)) {
445 offset->set_element(index, offset->get_element(parent_index));
446 ui::get()->set_offset(offset->get_element(index));
450 void init_default() {
452 if (default_initial_alignment_type == 0) {
455 * Follow the transformation of the original frame,
456 * setting new image dimensions.
459 // astate->default_initial_alignment = orig_t;
460 default_initial_alignment.set_current_element(orig_t.get_element(0));
461 default_initial_alignment.set_dimensions(input_frame);
463 } else if (default_initial_alignment_type == 1)
466 * Follow previous transformation, setting new image
467 * dimensions.
470 default_initial_alignment.set_dimensions(input_frame);
472 else
473 assert(0);
475 old_is_default = get_is_default(0);
480 * Alignment for failed frames -- default or optimal?
482 * A frame that does not meet the match threshold can be assigned the
483 * best alignment found, or can be assigned its alignment default.
486 static int is_fail_default;
489 * Alignment code.
491 * 0. Align images with an error contribution from each color channel.
493 * 1. Align images with an error contribution only from the green channel.
494 * Other color channels do not affect alignment.
496 * 2. Align images using a summation of channels. May be useful when dealing
497 * with images that have high frequency color ripples due to color aliasing.
500 static int channel_alignment_type;
503 * Error metric exponent
506 static ale_real metric_exponent;
509 * Match threshold
512 static float match_threshold;
515 * Perturbation lower and upper bounds.
518 static ale_pos perturb_lower;
519 static int perturb_lower_percent;
520 static ale_pos perturb_upper;
521 static int perturb_upper_percent;
524 * Preferred level-of-detail scale factor is 2^lod_preferred/perturb.
527 static int lod_preferred;
530 * Minimum dimension for reduced LOD.
533 static int min_dimension;
536 * Maximum rotational perturbation
539 static ale_pos rot_max;
542 * Barrel distortion alignment multiplier
545 static ale_pos bda_mult;
548 * Barrel distortion maximum adjustment rate
551 static ale_pos bda_rate;
554 * Alignment match sum
557 static ale_accum match_sum;
560 * Alignment match count.
563 static int match_count;
566 * Monte Carlo parameter
569 static ale_pos _mc;
572 * Certainty weight flag
574 * 0. Don't use certainty weights for alignment.
576 * 1. Use certainty weights for alignment.
579 static int certainty_weights;
582 * Global search parameter
584 * 0. Local: Local search only.
585 * 1. Inner: Alignment reference image inner region
586 * 2. Outer: Alignment reference image outer region
587 * 3. All: Alignment reference image inner and outer regions.
588 * 4. Central: Inner if possible; else, best of inner and outer.
589 * 5. Points: Align by control points.
592 static int _gs;
595 * Minimum overlap for global searches
598 static ale_accum _gs_mo;
599 static int gs_mo_percent;
602 * Minimum certainty for multi-alignment element registration.
605 static ale_real _ma_cert;
608 * Exclusion regions
611 static exclusion *ax_parameters;
612 static int ax_count;
615 * Types for scale clusters.
618 struct nl_scale_cluster {
619 const image *accum_max;
620 const image *accum_min;
621 const image *certainty_max;
622 const image *certainty_min;
623 const image *aweight_max;
624 const image *aweight_min;
625 exclusion *ax_parameters;
627 ale_pos input_scale;
628 const image *input_certainty_max;
629 const image *input_certainty_min;
630 const image *input_max;
631 const image *input_min;
634 struct scale_cluster {
635 const image *accum;
636 const image *certainty;
637 const image *aweight;
638 exclusion *ax_parameters;
640 ale_pos input_scale;
641 const image *input_certainty;
642 const image *input;
644 nl_scale_cluster *nl_scale_clusters;
648 * Check for exclusion region coverage in the reference
649 * array.
651 static int ref_excluded(int i, int j, point offset, exclusion *params, int param_count) {
652 for (int idx = 0; idx < param_count; idx++)
653 if (params[idx].type == exclusion::RENDER
654 && i + offset[0] >= params[idx].x[0]
655 && i + offset[0] <= params[idx].x[1]
656 && j + offset[1] >= params[idx].x[2]
657 && j + offset[1] <= params[idx].x[3])
658 return 1;
660 return 0;
664 * Check for exclusion region coverage in the input
665 * array.
667 static int input_excluded(ale_pos ti, ale_pos tj, exclusion *params, int param_count) {
668 for (int idx = 0; idx < param_count; idx++)
669 if (params[idx].type == exclusion::FRAME
670 && ti >= params[idx].x[0]
671 && ti <= params[idx].x[1]
672 && tj >= params[idx].x[2]
673 && tj <= params[idx].x[3])
674 return 1;
676 return 0;
680 * Overlap function. Determines the number of pixels in areas where
681 * the arrays overlap. Uses the reference array's notion of pixel
682 * positions.
684 static unsigned int overlap(struct scale_cluster c, transformation t, int ax_count) {
685 assert (reference_image);
687 unsigned int result = 0;
689 point offset = c.accum->offset();
691 for (unsigned int i = 0; i < c.accum->height(); i++)
692 for (unsigned int j = 0; j < c.accum->width(); j++) {
694 if (ref_excluded(i, j, offset, c.ax_parameters, ax_count))
695 continue;
698 * Transform
701 struct point q;
703 q = (c.input_scale < 1.0 && interpolant == NULL)
704 ? t.scaled_inverse_transform(
705 point(i + offset[0], j + offset[1]))
706 : t.unscaled_inverse_transform(
707 point(i + offset[0], j + offset[1]));
709 ale_pos ti = q[0];
710 ale_pos tj = q[1];
713 * Check that the transformed coordinates are within
714 * the boundaries of array c.input, and check that the
715 * weight value in the accumulated array is nonzero,
716 * unless we know it is nonzero by virtue of the fact
717 * that it falls within the region of the original
718 * frame (e.g. when we're not increasing image
719 * extents). Also check for frame exclusion.
722 if (input_excluded(ti, tj, c.ax_parameters, ax_count))
723 continue;
725 if (ti >= 0
726 && ti <= c.input->height() - 1
727 && tj >= 0
728 && tj <= c.input->width() - 1
729 && c.certainty->get_pixel(i, j)[0] != 0)
730 result++;
733 return result;
737 * Monte carlo iteration class.
739 * Monte Carlo alignment has been used for statistical comparisons in
740 * spatial registration, and is now used for tonal registration
741 * and final match calculation.
745 * We use a random process for which the expected number of sampled
746 * pixels is +/- .000003 from the coverage in the range [.005,.995] for
747 * an image with 100,000 pixels. (The actual number may still deviate
748 * from the expected number by more than this amount, however.) The
749 * method is as follows:
751 * We have coverage == USE/ALL, or (expected # pixels to use)/(# total
752 * pixels). We derive from this SKIP/USE.
754 * SKIP/USE == (SKIP/ALL)/(USE/ALL) == (1 - (USE/ALL))/(USE/ALL)
756 * Once we have SKIP/USE, we know the expected number of pixels to skip
757 * in each iteration. We use a random selection process that provides
758 * SKIP/USE close to this calculated value.
760 * If we can draw uniformly to select the number of pixels to skip, we
761 * do. In this case, the maximum number of pixels to skip is twice the
762 * expected number.
764 * If we cannot draw uniformly, we still assign equal probability to
765 * each of the integer values in the interval [0, 2 * (SKIP/USE)], but
766 * assign an unequal amount to the integer value ceil(2 * SKIP/USE) +
767 * 1.
771 * When reseeding the random number generator, we want the same set of
772 * pixels to be used in cases where two alignment options are compared.
773 * If we wanted to avoid bias from repeatedly utilizing the same seed,
774 * we could seed with the number of the frame most recently aligned:
776 * srand(latest);
778 * However, in cursory tests, it seems okay to just use the default
779 * seed of 1, and so we do this, since it is simpler; both of these
780 * approaches to reseeding achieve better results than not reseeding.
781 * (1 is the default seed according to the GNU Manual Page for
782 * rand(3).)
784 * For subdomain calculations, we vary the seed by adding the subdomain
785 * index.
788 class mc_iterate {
789 ale_pos mc_max;
790 unsigned int index;
791 unsigned int index_max;
792 int i_min;
793 int i_max;
794 int j_min;
795 int j_max;
797 rng_t rng;
799 public:
800 mc_iterate(int _i_min, int _i_max, int _j_min, int _j_max, unsigned int subdomain)
801 : rng() {
803 ale_pos coverage;
805 i_min = _i_min;
806 i_max = _i_max;
807 j_min = _j_min;
808 j_max = _j_max;
810 if (i_max < i_min || j_max < j_min)
811 index_max = 0;
812 else
813 index_max = (i_max - i_min) * (j_max - j_min);
815 if (index_max < 500 || _mc > 100 || _mc <= 0)
816 coverage = 1;
817 else
818 coverage = _mc / 100;
820 double su = (1 - coverage) / coverage;
822 mc_max = (floor(2*su) * (1 + floor(2*su)) + 2*su)
823 / (2 + 2 * floor(2*su) - 2*su);
825 rng.seed(1 + subdomain);
827 #define FIXED16 3
828 #if ALE_COORDINATES == FIXED16
830 * XXX: This calculation might not yield the correct
831 * expected value.
833 index = -1 + (int) ceil(((ale_pos) mc_max+1)
834 / (ale_pos) ( (1 + 0xffffff)
835 / (1 + (rng.get() & 0xffffff))));
836 #else
837 index = -1 + (int) ceil((ale_accum) (mc_max+1)
838 * ( (1 + ((ale_accum) (rng.get())) )
839 / (1 + ((ale_accum) RAND_MAX)) ));
840 #endif
841 #undef FIXED16
844 int get_i() {
845 return index / (j_max - j_min) + i_min;
848 int get_j() {
849 return index % (j_max - j_min) + j_min;
852 void operator++(int whats_this_for) {
853 #define FIXED16 3
854 #if ALE_COORDINATES == FIXED16
855 index += (int) ceil(((ale_pos) mc_max+1)
856 / (ale_pos) ( (1 + 0xffffff)
857 / (1 + (rng.get() & 0xffffff))));
858 #else
859 index += (int) ceil((ale_accum) (mc_max+1)
860 * ( (1 + ((ale_accum) (rng.get())) )
861 / (1 + ((ale_accum) RAND_MAX)) ));
862 #endif
863 #undef FIXED16
866 int done() {
867 return (index >= index_max);
872 * Not-quite-symmetric difference function. Determines the difference in areas
873 * where the arrays overlap. Uses the reference array's notion of pixel positions.
875 * For the purposes of determining the difference, this function divides each
876 * pixel value by the corresponding image's average pixel magnitude, unless we
877 * are:
879 * a) Extending the boundaries of the image, or
881 * b) following the previous frame's transform
883 * If we are doing monte-carlo pixel sampling for alignment, we
884 * typically sample a subset of available pixels; otherwise, we sample
885 * all pixels.
889 template<class diff_trans>
890 class diff_stat_generic {
892 transformation::elem_bounds_t elem_bounds;
894 struct run {
896 diff_trans offset;
898 ale_accum result;
899 ale_accum divisor;
901 point max, min;
902 ale_accum centroid[2], centroid_divisor;
903 ale_accum de_centroid[2], de_centroid_v, de_sum;
905 void init() {
907 result = 0;
908 divisor = 0;
910 min = point::posinf();
911 max = point::neginf();
913 centroid[0] = 0;
914 centroid[1] = 0;
915 centroid_divisor = 0;
917 de_centroid[0] = 0;
918 de_centroid[1] = 0;
920 de_centroid_v = 0;
922 de_sum = 0;
925 void init(diff_trans _offset) {
926 offset = _offset;
927 init();
931 * Required for STL sanity.
933 run() : offset(diff_trans::eu_identity()) {
934 init();
937 run(diff_trans _offset) : offset(_offset) {
938 init(_offset);
941 void add(const run &_run) {
942 result += _run.result;
943 divisor += _run.divisor;
945 for (int d = 0; d < 2; d++) {
946 if (min[d] > _run.min[d])
947 min[d] = _run.min[d];
948 if (max[d] < _run.max[d])
949 max[d] = _run.max[d];
950 centroid[d] += _run.centroid[d];
951 de_centroid[d] += _run.de_centroid[d];
954 centroid_divisor += _run.centroid_divisor;
955 de_centroid_v += _run.de_centroid_v;
956 de_sum += _run.de_sum;
959 run(const run &_run) : offset(_run.offset) {
962 * Initialize
964 init(_run.offset);
967 * Add
969 add(_run);
972 run &operator=(const run &_run) {
975 * Initialize
977 init(_run.offset);
980 * Add
982 add(_run);
984 return *this;
987 ~run() {
990 ale_accum get_error() const {
991 return pow(result / divisor, 1/(ale_accum) metric_exponent);
994 void sample(int f, scale_cluster c, int i, int j, point t, point u,
995 const run &comparison) {
997 pixel pa = c.accum->get_pixel(i, j);
999 ale_real this_result[2] = { 0, 0 };
1000 ale_real this_divisor[2] = { 0, 0 };
1002 pixel p[2];
1003 pixel weight[2];
1004 weight[0] = pixel(1, 1, 1);
1005 weight[1] = pixel(1, 1, 1);
1007 pixel tm = offset.get_tonal_multiplier(point(i, j) + c.accum->offset());
1009 if (interpolant != NULL) {
1010 interpolant->filtered(i, j, &p[0], &weight[0], 1, f);
1012 if (weight[0].min_norm() > ale_real_weight_floor) {
1013 p[0] /= weight[0];
1014 } else {
1015 return;
1018 } else {
1019 p[0] = c.input->get_bl(t);
1022 p[0] *= tm;
1024 if (u.defined()) {
1025 p[1] = c.input->get_bl(u);
1026 p[1] *= tm;
1031 * Handle certainty.
1034 if (certainty_weights == 1) {
1037 * For speed, use arithmetic interpolation (get_bl(.))
1038 * instead of geometric (get_bl(., 1))
1041 weight[0] *= c.input_certainty->get_bl(t);
1042 if (u.defined())
1043 weight[1] *= c.input_certainty->get_bl(u);
1044 weight[0] *= c.certainty->get_pixel(i, j);
1045 weight[1] *= c.certainty->get_pixel(i, j);
1048 if (c.aweight != NULL) {
1049 weight[0] *= c.aweight->get_pixel(i, j);
1050 weight[1] *= c.aweight->get_pixel(i, j);
1054 * Update sampling area statistics
1057 if (min[0] > i)
1058 min[0] = i;
1059 if (min[1] > j)
1060 min[1] = j;
1061 if (max[0] < i)
1062 max[0] = i;
1063 if (max[1] < j)
1064 max[1] = j;
1066 centroid[0] += (weight[0][0] + weight[0][1] + weight[0][2]) * i;
1067 centroid[1] += (weight[0][0] + weight[0][1] + weight[0][2]) * j;
1068 centroid_divisor += (weight[0][0] + weight[0][1] + weight[0][2]);
1071 * Determine alignment type.
1074 for (int m = 0; m < (u.defined() ? 2 : 1); m++)
1075 if (channel_alignment_type == 0) {
1077 * Align based on all channels.
1081 for (int k = 0; k < 3; k++) {
1082 ale_real achan = pa[k];
1083 ale_real bchan = p[m][k];
1085 this_result[m] += weight[m][k] * pow(fabs(achan - bchan), metric_exponent);
1086 this_divisor[m] += weight[m][k] * pow(achan > bchan ? achan : bchan, metric_exponent);
1088 } else if (channel_alignment_type == 1) {
1090 * Align based on the green channel.
1093 ale_real achan = pa[1];
1094 ale_real bchan = p[m][1];
1096 this_result[m] = weight[m][1] * pow(fabs(achan - bchan), metric_exponent);
1097 this_divisor[m] = weight[m][1] * pow(achan > bchan ? achan : bchan, metric_exponent);
1098 } else if (channel_alignment_type == 2) {
1100 * Align based on the sum of all channels.
1103 ale_real asum = 0;
1104 ale_real bsum = 0;
1105 ale_real wsum = 0;
1107 for (int k = 0; k < 3; k++) {
1108 asum += pa[k];
1109 bsum += p[m][k];
1110 wsum += weight[m][k] / 3;
1113 this_result[m] = wsum * pow(fabs(asum - bsum), metric_exponent);
1114 this_divisor[m] = wsum * pow(asum > bsum ? asum : bsum, metric_exponent);
1117 if (u.defined()) {
1118 // ale_real de = fabs(this_result[0] / this_divisor[0]
1119 // - this_result[1] / this_divisor[1]);
1120 ale_real de = fabs(this_result[0] - this_result[1]);
1122 de_centroid[0] += de * (ale_real) i;
1123 de_centroid[1] += de * (ale_real) j;
1125 de_centroid_v += de * (ale_real) t.lengthto(u);
1127 de_sum += de;
1130 result += (this_result[0]);
1131 divisor += (this_divisor[0]);
1134 void rescale(ale_pos scale) {
1135 offset.rescale(scale);
1137 de_centroid[0] *= scale;
1138 de_centroid[1] *= scale;
1139 de_centroid_v *= scale;
1142 point get_centroid() {
1143 point result = point(centroid[0] / centroid_divisor, centroid[1] / centroid_divisor);
1145 assert (finite(centroid[0])
1146 && finite(centroid[1])
1147 && (result.defined() || centroid_divisor == 0));
1149 return result;
1152 point get_error_centroid() {
1153 point result = point(de_centroid[0] / de_sum, de_centroid[1] / de_sum);
1154 return result;
1158 ale_pos get_error_perturb() {
1159 ale_pos result = de_centroid_v / de_sum;
1161 return result;
1167 * When non-empty, runs.front() is best, runs.back() is
1168 * testing.
1171 std::vector<run> runs;
1174 * old_runs stores the latest available perturbation set for
1175 * each multi-alignment element.
1178 typedef int run_index;
1179 std::map<run_index, run> old_runs;
1181 static void *diff_subdomain(void *args);
1183 struct subdomain_args {
1184 struct scale_cluster c;
1185 std::vector<run> runs;
1186 int ax_count;
1187 int f;
1188 int i_min, i_max, j_min, j_max;
1189 int subdomain;
1192 struct scale_cluster si;
1193 int ax_count;
1194 int frame;
1196 std::vector<ale_pos> perturb_multipliers;
1198 public:
1199 void diff(struct scale_cluster c, const diff_trans &t,
1200 int _ax_count, int f) {
1202 if (runs.size() == 2)
1203 runs.pop_back();
1205 runs.push_back(run(t));
1207 si = c;
1208 ax_count = _ax_count;
1209 frame = f;
1211 ui::get()->d2_align_sample_start();
1213 if (interpolant != NULL) {
1216 * XXX: This has not been tested, and may be completely
1217 * wrong.
1220 transformation tt = transformation::eu_identity();
1221 tt.set_current_element(t);
1222 interpolant->set_parameters(tt, c.input, c.accum->offset());
1225 int N;
1226 #ifdef USE_PTHREAD
1227 N = thread::count();
1229 pthread_t *threads = (pthread_t *) malloc(sizeof(pthread_t) * N);
1230 pthread_attr_t *thread_attr = (pthread_attr_t *) malloc(sizeof(pthread_attr_t) * N);
1232 #else
1233 N = 1;
1234 #endif
1236 subdomain_args *args = new subdomain_args[N];
1238 transformation::elem_bounds_int_t b = elem_bounds.scale_to_bounds(c.accum->height(), c.accum->width());
1240 // fprintf(stdout, "[%d %d] [%d %d]\n",
1241 // global_i_min, global_i_max, global_j_min, global_j_max);
1243 for (int ti = 0; ti < N; ti++) {
1244 args[ti].c = c;
1245 args[ti].runs = runs;
1246 args[ti].ax_count = ax_count;
1247 args[ti].f = f;
1248 args[ti].i_min = b.imin + ((b.imax - b.imin) * ti) / N;
1249 args[ti].i_max = b.imin + ((b.imax - b.imin) * (ti + 1)) / N;
1250 args[ti].j_min = b.jmin;
1251 args[ti].j_max = b.jmax;
1252 args[ti].subdomain = ti;
1253 #ifdef USE_PTHREAD
1254 pthread_attr_init(&thread_attr[ti]);
1255 pthread_attr_setdetachstate(&thread_attr[ti], PTHREAD_CREATE_JOINABLE);
1256 pthread_create(&threads[ti], &thread_attr[ti], diff_subdomain, &args[ti]);
1257 #else
1258 diff_subdomain(&args[ti]);
1259 #endif
1262 for (int ti = 0; ti < N; ti++) {
1263 #ifdef USE_PTHREAD
1264 pthread_join(threads[ti], NULL);
1265 #endif
1266 runs.back().add(args[ti].runs.back());
1269 #ifdef USE_PTHREAD
1270 free(threads);
1271 free(thread_attr);
1272 #endif
1274 delete[] args;
1276 ui::get()->d2_align_sample_stop();
1280 private:
1281 void rediff() {
1282 std::vector<diff_trans> t_array;
1284 for (unsigned int r = 0; r < runs.size(); r++) {
1285 t_array.push_back(runs[r].offset);
1288 runs.clear();
1290 for (unsigned int r = 0; r < t_array.size(); r++)
1291 diff(si, t_array[r], ax_count, frame);
1295 public:
1296 int better() {
1297 assert(runs.size() >= 2);
1298 assert(runs[0].offset.scale() == runs[1].offset.scale());
1300 return (runs[1].get_error() < runs[0].get_error()
1301 || (!finite(runs[0].get_error()) && finite(runs[1].get_error())));
1304 int better_defined() {
1305 assert(runs.size() >= 2);
1306 assert(runs[0].offset.scale() == runs[1].offset.scale());
1308 return (runs[1].get_error() < runs[0].get_error());
1311 diff_stat_generic(transformation::elem_bounds_t e)
1312 : runs(), old_runs(), perturb_multipliers() {
1313 elem_bounds = e;
1316 run_index get_run_index(unsigned int perturb_index) {
1317 return perturb_index;
1320 run &get_run(unsigned int perturb_index) {
1321 run_index index = get_run_index(perturb_index);
1323 assert(old_runs.count(index));
1324 return old_runs[index];
1327 void rescale(ale_pos scale, scale_cluster _si) {
1328 assert(runs.size() == 1);
1330 si = _si;
1332 runs[0].rescale(scale);
1334 rediff();
1337 ~diff_stat_generic() {
1340 diff_stat_generic &operator=(const diff_stat_generic &dst) {
1342 * Copy run information.
1344 runs = dst.runs;
1345 old_runs = dst.old_runs;
1348 * Copy diff variables
1350 si = dst.si;
1351 ax_count = dst.ax_count;
1352 frame = dst.frame;
1353 perturb_multipliers = dst.perturb_multipliers;
1354 elem_bounds = dst.elem_bounds;
1356 return *this;
1359 diff_stat_generic(const diff_stat_generic &dst) : runs(), old_runs(),
1360 perturb_multipliers() {
1361 operator=(dst);
1364 void set_elem_bounds(transformation::elem_bounds_t e) {
1365 elem_bounds = e;
1368 ale_accum get_result() {
1369 assert(runs.size() == 1);
1370 return runs[0].result;
1373 ale_accum get_divisor() {
1374 assert(runs.size() == 1);
1375 return runs[0].divisor;
1378 diff_trans get_offset() {
1379 assert(runs.size() == 1);
1380 return runs[0].offset;
1383 int operator!=(diff_stat_generic &param) {
1384 return (get_error() != param.get_error());
1387 int operator==(diff_stat_generic &param) {
1388 return !(operator!=(param));
1391 ale_pos get_error_perturb() {
1392 assert(runs.size() == 1);
1393 return runs[0].get_error_perturb();
1396 ale_accum get_error() const {
1397 assert(runs.size() == 1);
1398 return runs[0].get_error();
1401 public:
1403 * Get the set of transformations produced by a given perturbation
1405 void get_perturb_set(std::vector<diff_trans> *set,
1406 ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
1407 ale_pos *current_bd, ale_pos *modified_bd,
1408 std::vector<ale_pos> multipliers = std::vector<ale_pos>()) {
1410 assert(runs.size() == 1);
1412 diff_trans test_t(diff_trans::eu_identity());
1415 * Translational or euclidean transformation
1418 for (unsigned int i = 0; i < 2; i++)
1419 for (unsigned int s = 0; s < 2; s++) {
1421 if (!multipliers.size())
1422 multipliers.push_back(1);
1424 assert(finite(multipliers[0]));
1426 test_t = get_offset();
1428 // test_t.eu_modify(i, (s ? -adj_p : adj_p) * multipliers[0]);
1429 test_t.translate((i ? point(1, 0) : point(0, 1))
1430 * (s ? -adj_p : adj_p)
1431 * multipliers[0]);
1433 test_t.snap(adj_p / 2);
1435 set->push_back(test_t);
1436 multipliers.erase(multipliers.begin());
1440 if (alignment_class > 0)
1441 for (unsigned int s = 0; s < 2; s++) {
1443 if (!multipliers.size())
1444 multipliers.push_back(1);
1446 assert(multipliers.size());
1447 assert(finite(multipliers[0]));
1449 if (!(adj_o * multipliers[0] < rot_max))
1450 return;
1452 ale_pos adj_s = (s ? 1 : -1) * adj_o * multipliers[0];
1454 test_t = get_offset();
1456 run_index ori = get_run_index(set->size());
1457 point centroid = point::undefined();
1459 if (!old_runs.count(ori))
1460 ori = get_run_index(0);
1462 if (!centroid.finite() && old_runs.count(ori)) {
1463 centroid = old_runs[ori].get_error_centroid();
1465 if (!centroid.finite())
1466 centroid = old_runs[ori].get_centroid();
1468 centroid *= test_t.scale()
1469 / old_runs[ori].offset.scale();
1472 if (!centroid.finite() && !test_t.is_projective()) {
1473 test_t.eu_modify(2, adj_s);
1474 } else if (!centroid.finite()) {
1475 centroid = point(si.input->height() / 2,
1476 si.input->width() / 2);
1478 test_t.rotate(centroid + si.accum->offset(),
1479 adj_s);
1480 } else {
1481 test_t.rotate(centroid + si.accum->offset(),
1482 adj_s);
1485 test_t.snap(adj_p / 2);
1487 set->push_back(test_t);
1488 multipliers.erase(multipliers.begin());
1491 if (alignment_class == 2) {
1494 * Projective transformation
1497 for (unsigned int i = 0; i < 4; i++)
1498 for (unsigned int j = 0; j < 2; j++)
1499 for (unsigned int s = 0; s < 2; s++) {
1501 if (!multipliers.size())
1502 multipliers.push_back(1);
1504 assert(multipliers.size());
1505 assert(finite(multipliers[0]));
1507 ale_pos adj_s = (s ? -1 : 1) * adj_p * multipliers [0];
1509 test_t = get_offset();
1511 if (perturb_type == 0)
1512 test_t.gpt_modify_bounded(j, i, adj_s, elem_bounds.scale_to_bounds(si.accum->height(), si.accum->width()));
1513 else if (perturb_type == 1)
1514 test_t.gr_modify(j, i, adj_s);
1515 else
1516 assert(0);
1518 test_t.snap(adj_p / 2);
1520 set->push_back(test_t);
1521 multipliers.erase(multipliers.begin());
1527 * Barrel distortion
1530 if (bda_mult != 0 && adj_b != 0) {
1532 for (unsigned int d = 0; d < get_offset().bd_count(); d++)
1533 for (unsigned int s = 0; s < 2; s++) {
1535 if (!multipliers.size())
1536 multipliers.push_back(1);
1538 assert (multipliers.size());
1539 assert (finite(multipliers[0]));
1541 ale_pos adj_s = (s ? -1 : 1) * adj_b * multipliers[0];
1543 if (bda_rate > 0 && fabs(modified_bd[d] + adj_s - current_bd[d]) > bda_rate)
1544 continue;
1546 diff_trans test_t = get_offset();
1548 test_t.bd_modify(d, adj_s);
1550 set->push_back(test_t);
1555 void confirm() {
1556 assert(runs.size() == 2);
1557 runs[0] = runs[1];
1558 runs.pop_back();
1561 void discard() {
1562 assert(runs.size() == 2);
1563 runs.pop_back();
1566 void perturb_test(ale_pos perturb, ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
1567 ale_pos *current_bd, ale_pos *modified_bd, int stable) {
1569 assert(runs.size() == 1);
1571 std::vector<diff_trans> t_set;
1573 if (perturb_multipliers.size() == 0) {
1574 get_perturb_set(&t_set, adj_p, adj_o, adj_b,
1575 current_bd, modified_bd);
1577 for (unsigned int i = 0; i < t_set.size(); i++) {
1578 diff_stat_generic test = *this;
1580 test.diff(si, t_set[i], ax_count, frame);
1582 test.confirm();
1584 if (finite(test.get_error_perturb()))
1585 perturb_multipliers.push_back(adj_p / test.get_error_perturb());
1586 else
1587 perturb_multipliers.push_back(1);
1591 t_set.clear();
1594 get_perturb_set(&t_set, adj_p, adj_o, adj_b, current_bd, modified_bd,
1595 perturb_multipliers);
1597 int found_unreliable = 1;
1598 std::vector<int> tested(t_set.size(), 0);
1600 for (unsigned int i = 0; i < t_set.size(); i++) {
1601 run_index ori = get_run_index(i);
1604 * Check for stability
1606 if (stable
1607 && old_runs.count(ori)
1608 && old_runs[ori].offset == t_set[i])
1609 tested[i] = 1;
1612 std::vector<ale_pos> perturb_multipliers_original = perturb_multipliers;
1614 while (found_unreliable) {
1616 found_unreliable = 0;
1618 for (unsigned int i = 0; i < t_set.size(); i++) {
1620 if (tested[i])
1621 continue;
1623 diff(si, t_set[i], ax_count, frame);
1625 if (!(i < perturb_multipliers.size())
1626 || !finite(perturb_multipliers[i])) {
1628 perturb_multipliers.resize(i + 1);
1630 if (finite(perturb_multipliers[i])
1631 && finite(adj_p)
1632 && finite(adj_p / runs[1].get_error_perturb())) {
1633 perturb_multipliers[i] =
1634 adj_p / runs[1].get_error_perturb();
1636 found_unreliable = 1;
1637 } else
1638 perturb_multipliers[i] = 1;
1640 continue;
1643 run_index ori = get_run_index(i);
1645 if (old_runs.count(ori) == 0)
1646 old_runs.insert(std::pair<run_index, run>(ori, runs[1]));
1647 else
1648 old_runs[ori] = runs[1];
1650 if (finite(perturb_multipliers_original[i])
1651 && finite(runs[1].get_error_perturb())
1652 && finite(adj_p)
1653 && finite(perturb_multipliers_original[i] * adj_p / runs[1].get_error_perturb()))
1654 perturb_multipliers[i] = perturb_multipliers_original[i]
1655 * adj_p / runs[1].get_error_perturb();
1656 else
1657 perturb_multipliers[i] = 1;
1659 tested[i] = 1;
1661 if (better()
1662 && runs[1].get_error() < runs[0].get_error()
1663 && perturb_multipliers[i]
1664 / perturb_multipliers_original[i] < 2) {
1665 runs[0] = runs[1];
1666 runs.pop_back();
1667 return;
1672 if (runs.size() > 1)
1673 runs.pop_back();
1675 if (!found_unreliable)
1676 return;
1682 typedef diff_stat_generic<trans_single> diff_stat_t;
1683 typedef diff_stat_generic<trans_multi> diff_stat_multi;
1687 * Adjust exposure for an aligned frame B against reference A.
1689 * Expects full-LOD images.
1691 * Note: This method does not use any weighting, by certainty or
1692 * otherwise, in the first exposure registration pass, as any bias of
1693 * weighting according to color may also bias the exposure registration
1694 * result; it does use weighting, including weighting by certainty
1695 * (even if certainty weighting is not specified), in the second pass,
1696 * under the assumption that weighting by certainty improves handling
1697 * of out-of-range highlights, and that bias of exposure measurements
1698 * according to color may generally be less harmful after spatial
1699 * registration has been performed.
1701 class exposure_ratio_iterate : public thread::decompose_domain {
1702 pixel_accum *asums;
1703 pixel_accum *bsums;
1704 pixel_accum *asum;
1705 pixel_accum *bsum;
1706 struct scale_cluster c;
1707 const transformation &t;
1708 int ax_count;
1709 int pass_number;
1710 protected:
1711 void prepare_subdomains(unsigned int N) {
1712 asums = new pixel_accum[N];
1713 bsums = new pixel_accum[N];
1715 void subdomain_algorithm(unsigned int thread,
1716 int i_min, int i_max, int j_min, int j_max) {
1718 point offset = c.accum->offset();
1720 for (mc_iterate m(i_min, i_max, j_min, j_max, thread); !m.done(); m++) {
1722 unsigned int i = (unsigned int) m.get_i();
1723 unsigned int j = (unsigned int) m.get_j();
1725 if (ref_excluded(i, j, offset, c.ax_parameters, ax_count))
1726 continue;
1729 * Transform
1732 struct point q;
1734 q = (c.input_scale < 1.0 && interpolant == NULL)
1735 ? t.scaled_inverse_transform(
1736 point(i + offset[0], j + offset[1]))
1737 : t.unscaled_inverse_transform(
1738 point(i + offset[0], j + offset[1]));
1741 * Check that the transformed coordinates are within
1742 * the boundaries of array c.input, that they are not
1743 * subject to exclusion, and that the weight value in
1744 * the accumulated array is nonzero.
1747 if (input_excluded(q[0], q[1], c.ax_parameters, ax_count))
1748 continue;
1750 if (q[0] >= 0
1751 && q[0] <= c.input->height() - 1
1752 && q[1] >= 0
1753 && q[1] <= c.input->width() - 1
1754 && ((pixel) c.certainty->get_pixel(i, j)).minabs_norm() != 0) {
1755 pixel a = c.accum->get_pixel(i, j);
1756 pixel b;
1758 b = c.input->get_bl(q);
1760 pixel weight = ((c.aweight && pass_number)
1761 ? (pixel) c.aweight->get_pixel(i, j)
1762 : pixel(1, 1, 1))
1763 * (pass_number
1764 ? psqrt(c.certainty->get_pixel(i, j)
1765 * c.input_certainty->get_bl(q, 1))
1766 : pixel(1, 1, 1));
1768 asums[thread] += a * weight;
1769 bsums[thread] += b * weight;
1774 void finish_subdomains(unsigned int N) {
1775 for (unsigned int n = 0; n < N; n++) {
1776 *asum += asums[n];
1777 *bsum += bsums[n];
1779 delete[] asums;
1780 delete[] bsums;
1782 public:
1783 exposure_ratio_iterate(pixel_accum *_asum,
1784 pixel_accum *_bsum,
1785 struct scale_cluster _c,
1786 const transformation &_t,
1787 int _ax_count,
1788 int _pass_number) : decompose_domain(0, _c.accum->height(),
1789 0, _c.accum->width()),
1790 t(_t) {
1792 asum = _asum;
1793 bsum = _bsum;
1794 c = _c;
1795 ax_count = _ax_count;
1796 pass_number = _pass_number;
1799 exposure_ratio_iterate(pixel_accum *_asum,
1800 pixel_accum *_bsum,
1801 struct scale_cluster _c,
1802 const transformation &_t,
1803 int _ax_count,
1804 int _pass_number,
1805 transformation::elem_bounds_int_t b) : decompose_domain(b.imin, b.imax,
1806 b.jmin, b.jmax),
1807 t(_t) {
1809 asum = _asum;
1810 bsum = _bsum;
1811 c = _c;
1812 ax_count = _ax_count;
1813 pass_number = _pass_number;
1817 static void set_exposure_ratio(unsigned int m, struct scale_cluster c,
1818 const transformation &t, int ax_count, int pass_number) {
1820 if (_exp_register == 2) {
1822 * Use metadata only.
1824 ale_real gain_multiplier = image_rw::exp(m).get_gain_multiplier();
1825 pixel multiplier = pixel(gain_multiplier, gain_multiplier, gain_multiplier);
1827 image_rw::exp(m).set_multiplier(multiplier);
1828 ui::get()->exp_multiplier(multiplier[0],
1829 multiplier[1],
1830 multiplier[2]);
1832 return;
1835 pixel_accum asum(0, 0, 0), bsum(0, 0, 0);
1837 exposure_ratio_iterate eri(&asum, &bsum, c, t, ax_count, pass_number);
1838 eri.run();
1840 // std::cerr << (asum / bsum) << " ";
1842 pixel_accum new_multiplier;
1844 new_multiplier = asum / bsum * image_rw::exp(m).get_multiplier();
1846 if (finite(new_multiplier[0])
1847 && finite(new_multiplier[1])
1848 && finite(new_multiplier[2])) {
1849 image_rw::exp(m).set_multiplier(new_multiplier);
1850 ui::get()->exp_multiplier(new_multiplier[0],
1851 new_multiplier[1],
1852 new_multiplier[2]);
1857 * Copy all ax parameters.
1859 static exclusion *copy_ax_parameters(int local_ax_count, exclusion *source) {
1861 exclusion *dest = (exclusion *) malloc(local_ax_count * sizeof(exclusion));
1863 assert (dest);
1865 if (!dest)
1866 ui::get()->memory_error("exclusion regions");
1868 for (int idx = 0; idx < local_ax_count; idx++)
1869 dest[idx] = source[idx];
1871 return dest;
1875 * Copy ax parameters according to frame.
1877 static exclusion *filter_ax_parameters(int frame, int *local_ax_count) {
1879 exclusion *dest = (exclusion *) malloc(ax_count * sizeof(exclusion));
1881 assert (dest);
1883 if (!dest)
1884 ui::get()->memory_error("exclusion regions");
1886 *local_ax_count = 0;
1888 for (int idx = 0; idx < ax_count; idx++) {
1889 if (ax_parameters[idx].x[4] > frame
1890 || ax_parameters[idx].x[5] < frame)
1891 continue;
1893 dest[*local_ax_count] = ax_parameters[idx];
1895 (*local_ax_count)++;
1898 return dest;
1901 static void scale_ax_parameters(int local_ax_count, exclusion *ax_parameters,
1902 ale_pos ref_scale, ale_pos input_scale) {
1903 for (int i = 0; i < local_ax_count; i++) {
1904 ale_pos scale = (ax_parameters[i].type == exclusion::RENDER)
1905 ? ref_scale
1906 : input_scale;
1908 for (int n = 0; n < 6; n++) {
1909 ax_parameters[i].x[n] = ax_parameters[i].x[n] * scale;
1915 * Prepare the next level of detail for ordinary images.
1917 static const image *prepare_lod(const image *current) {
1918 if (current == NULL)
1919 return NULL;
1921 return current->scale_by_half("prepare_lod");
1925 * Prepare the next level of detail for definition maps.
1927 static const image *prepare_lod_def(const image *current) {
1928 if (current == NULL)
1929 return NULL;
1931 return current->defined_scale_by_half("prepare_lod_def");
1935 * Initialize scale cluster data structures.
1938 static void init_nl_cluster(struct scale_cluster *sc) {
1941 static struct scale_cluster *init_clusters(int frame, ale_pos scale_factor,
1942 const image *input_frame, unsigned int steps,
1943 int *local_ax_count) {
1946 * Allocate memory for the array.
1949 struct scale_cluster *scale_clusters =
1950 (struct scale_cluster *) malloc(steps * sizeof(struct scale_cluster));
1952 assert (scale_clusters);
1954 if (!scale_clusters)
1955 ui::get()->memory_error("alignment");
1958 * Prepare images and exclusion regions for the highest level
1959 * of detail.
1962 scale_clusters[0].accum = reference_image;
1964 ui::get()->constructing_lod_clusters(0.0);
1965 scale_clusters[0].input_scale = scale_factor;
1966 if (scale_factor < 1.0 && interpolant == NULL)
1967 scale_clusters[0].input = input_frame->scale(scale_factor, "alignment");
1968 else
1969 scale_clusters[0].input = input_frame;
1971 scale_clusters[0].certainty = reference_defined;
1972 scale_clusters[0].aweight = alignment_weights;
1973 scale_clusters[0].ax_parameters = filter_ax_parameters(frame, local_ax_count);
1976 * Allocate and determine input frame certainty.
1979 if (scale_clusters[0].input->get_bayer() != IMAGE_BAYER_NONE) {
1980 scale_clusters[0].input_certainty = new_image_bayer_ale_real(
1981 scale_clusters[0].input->height(),
1982 scale_clusters[0].input->width(),
1983 scale_clusters[0].input->depth(),
1984 scale_clusters[0].input->get_bayer());
1985 } else {
1986 scale_clusters[0].input_certainty = scale_clusters[0].input->clone("certainty");
1989 for (unsigned int i = 0; i < scale_clusters[0].input_certainty->height(); i++)
1990 for (unsigned int j = 0; j < scale_clusters[0].input_certainty->width(); j++)
1991 for (unsigned int k = 0; k < 3; k++)
1992 if (scale_clusters[0].input->get_channels(i, j) & (1 << k))
1993 ((image *) scale_clusters[0].input_certainty)->set_chan(i, j, k,
1994 scale_clusters[0].input->
1995 exp().confidence(scale_clusters[0].input->get_pixel(i, j))[k]);
1997 scale_ax_parameters(*local_ax_count, scale_clusters[0].ax_parameters, scale_factor,
1998 (scale_factor < 1.0 && interpolant == NULL) ? scale_factor : (ale_pos) 1);
2000 init_nl_cluster(&(scale_clusters[0]));
2003 * Prepare reduced-detail images and exclusion
2004 * regions.
2007 for (unsigned int step = 1; step < steps; step++) {
2008 ui::get()->constructing_lod_clusters(step);
2009 scale_clusters[step].accum = prepare_lod(scale_clusters[step - 1].accum);
2010 scale_clusters[step].certainty = prepare_lod_def(scale_clusters[step - 1].certainty);
2011 scale_clusters[step].aweight = prepare_lod_def(scale_clusters[step - 1].aweight);
2012 scale_clusters[step].ax_parameters
2013 = copy_ax_parameters(*local_ax_count, scale_clusters[step - 1].ax_parameters);
2015 double sf = scale_clusters[step - 1].input_scale / 2;
2016 scale_clusters[step].input_scale = sf;
2018 if (sf >= 1.0 || interpolant != NULL) {
2019 scale_clusters[step].input = scale_clusters[step - 1].input;
2020 scale_clusters[step].input_certainty = scale_clusters[step - 1].input_certainty;
2021 scale_ax_parameters(*local_ax_count, scale_clusters[step].ax_parameters, 0.5, 1);
2022 } else if (sf > 0.5) {
2023 scale_clusters[step].input = scale_clusters[step - 1].input->scale(sf, "alignment");
2024 scale_clusters[step].input_certainty = scale_clusters[step - 1].input->scale(sf, "alignment", 1);
2025 scale_ax_parameters(*local_ax_count, scale_clusters[step].ax_parameters, 0.5, sf);
2026 } else {
2027 scale_clusters[step].input = scale_clusters[step - 1].input->scale(0.5, "alignment");
2028 scale_clusters[step].input_certainty = scale_clusters[step - 1].input_certainty->scale(0.5, "alignment", 1);
2029 scale_ax_parameters(*local_ax_count, scale_clusters[step].ax_parameters, 0.5, 0.5);
2032 init_nl_cluster(&(scale_clusters[step]));
2035 return scale_clusters;
2039 * Destroy the first element in the scale cluster data structure.
2041 static void final_clusters(struct scale_cluster *scale_clusters, ale_pos scale_factor,
2042 unsigned int steps) {
2044 if (scale_clusters[0].input_scale < 1.0) {
2045 delete scale_clusters[0].input;
2048 delete scale_clusters[0].input_certainty;
2050 free((void *)scale_clusters[0].ax_parameters);
2052 for (unsigned int step = 1; step < steps; step++) {
2053 delete scale_clusters[step].accum;
2054 delete scale_clusters[step].certainty;
2055 delete scale_clusters[step].aweight;
2057 if (scale_clusters[step].input_scale < 1.0) {
2058 delete scale_clusters[step].input;
2059 delete scale_clusters[step].input_certainty;
2062 free((void *)scale_clusters[step].ax_parameters);
2065 free(scale_clusters);
2069 * Calculate the centroid of a control point for the set of frames
2070 * having index lower than m. Divide by any scaling of the output.
2072 static point unscaled_centroid(unsigned int m, unsigned int p) {
2073 assert(_keep);
2075 point point_sum(0, 0);
2076 ale_accum divisor = 0;
2078 for(unsigned int j = 0; j < m; j++) {
2079 point pp = cp_array[p][j];
2081 if (pp.defined()) {
2082 point_sum += kept_t[j].transform_unscaled(pp)
2083 / kept_t[j].scale();
2084 divisor += 1;
2088 if (divisor == 0)
2089 return point::undefined();
2091 return point_sum / divisor;
2095 * Calculate centroid of this frame, and of all previous frames,
2096 * from points common to both sets.
2098 static void centroids(unsigned int m, point *current, point *previous) {
2100 * Calculate the translation
2102 point other_centroid(0, 0);
2103 point this_centroid(0, 0);
2104 ale_pos divisor = 0;
2106 for (unsigned int i = 0; i < cp_count; i++) {
2107 point other_c = unscaled_centroid(m, i);
2108 point this_c = cp_array[i][m];
2110 if (!other_c.defined() || !this_c.defined())
2111 continue;
2113 other_centroid += other_c;
2114 this_centroid += this_c;
2115 divisor += 1;
2119 if (divisor == 0) {
2120 *current = point::undefined();
2121 *previous = point::undefined();
2122 return;
2125 *current = this_centroid / divisor;
2126 *previous = other_centroid / divisor;
2130 * Calculate the RMS error of control points for frame m, with
2131 * transformation t, against control points for earlier frames.
2133 static ale_pos cp_rms_error(unsigned int m, transformation t) {
2134 assert (_keep);
2136 ale_accum err = 0;
2137 ale_accum divisor = 0;
2139 for (unsigned int i = 0; i < cp_count; i++)
2140 for (unsigned int j = 0; j < m; j++) {
2141 const point *p = cp_array[i];
2142 point p_ref = kept_t[j].transform_unscaled(p[j]);
2143 point p_cur = t.transform_unscaled(p[m]);
2145 if (!p_ref.defined() || !p_cur.defined())
2146 continue;
2148 err += p_ref.lengthtosq(p_cur);
2149 divisor += 1;
2152 return (ale_pos) sqrt(err / divisor);
2156 static void test_global(diff_stat_t *here, scale_cluster si, transformation t,
2157 int local_ax_count, int m, ale_accum local_gs_mo, ale_pos perturb) {
2159 diff_stat_t test(*here);
2161 test.diff(si, t.get_current_element(), local_ax_count, m);
2163 unsigned int ovl = overlap(si, t, local_ax_count);
2165 if (ovl >= local_gs_mo && test.better()) {
2166 test.confirm();
2167 *here = test;
2168 ui::get()->set_match(here->get_error());
2169 ui::get()->set_offset(here->get_offset());
2170 } else {
2171 test.discard();
2177 * Get the set of global transformations for a given density
2179 static void test_globals(diff_stat_t *here,
2180 scale_cluster si, transformation t, int local_gs, ale_pos adj_p,
2181 int local_ax_count, int m, ale_accum local_gs_mo, ale_pos perturb) {
2183 transformation offset = t;
2185 point min, max;
2187 transformation offset_p = offset;
2189 if (!offset_p.is_projective())
2190 offset_p.eu_to_gpt();
2192 min = max = offset_p.gpt_get(0);
2193 for (int p_index = 1; p_index < 4; p_index++) {
2194 point p = offset_p.gpt_get(p_index);
2195 if (p[0] < min[0])
2196 min[0] = p[0];
2197 if (p[1] < min[1])
2198 min[1] = p[1];
2199 if (p[0] > max[0])
2200 max[0] = p[0];
2201 if (p[1] > max[1])
2202 max[1] = p[1];
2205 point inner_min_t = -min;
2206 point inner_max_t = -max + point(si.accum->height(), si.accum->width());
2207 point outer_min_t = -max + point(adj_p - 1, adj_p - 1);
2208 point outer_max_t = point(si.accum->height(), si.accum->width()) - point(adj_p, adj_p);
2210 if (local_gs == 1 || local_gs == 3 || local_gs == 4 || local_gs == 6) {
2213 * Inner
2216 for (ale_pos i = inner_min_t[0]; i <= inner_max_t[0]; i += adj_p)
2217 for (ale_pos j = inner_min_t[1]; j <= inner_max_t[1]; j += adj_p) {
2218 transformation test_t = offset;
2219 test_t.translate(point(i, j));
2220 test_global(here, si, test_t, local_ax_count, m, local_gs_mo, perturb);
2224 if (local_gs == 2 || local_gs == 3 || local_gs == -1 || local_gs == 6) {
2227 * Outer
2230 for (ale_pos i = outer_min_t[0]; i <= outer_max_t[0]; i += adj_p)
2231 for (ale_pos j = outer_min_t[1]; j < inner_min_t[1]; j += adj_p) {
2232 transformation test_t = offset;
2233 test_t.translate(point(i, j));
2234 test_global(here, si, test_t, local_ax_count, m, local_gs_mo, perturb);
2236 for (ale_pos i = outer_min_t[0]; i <= outer_max_t[0]; i += adj_p)
2237 for (ale_pos j = outer_max_t[1]; j > inner_max_t[1]; j -= adj_p) {
2238 transformation test_t = offset;
2239 test_t.translate(point(i, j));
2240 test_global(here, si, test_t, local_ax_count, m, local_gs_mo, perturb);
2242 for (ale_pos i = outer_min_t[0]; i < inner_min_t[0]; i += adj_p)
2243 for (ale_pos j = outer_min_t[1]; j <= outer_max_t[1]; j += adj_p) {
2244 transformation test_t = offset;
2245 test_t.translate(point(i, j));
2246 test_global(here, si, test_t, local_ax_count, m, local_gs_mo, perturb);
2248 for (ale_pos i = outer_max_t[0]; i > inner_max_t[0]; i -= adj_p)
2249 for (ale_pos j = outer_min_t[1]; j <= outer_max_t[1]; j += adj_p) {
2250 transformation test_t = offset;
2251 test_t.translate(point(i, j));
2252 test_global(here, si, test_t, local_ax_count, m, local_gs_mo, perturb);
2257 static void get_translational_set(std::vector<transformation> *set,
2258 transformation t, ale_pos adj_p) {
2260 ale_pos adj_s;
2262 transformation offset = t;
2263 transformation test_t(transformation::eu_identity());
2265 for (int i = 0; i < 2; i++)
2266 for (adj_s = -adj_p; adj_s <= adj_p; adj_s += 2 * adj_p) {
2268 test_t = offset;
2270 test_t.translate(i ? point(adj_s, 0) : point(0, adj_s));
2272 set->push_back(test_t);
2276 static int threshold_ok(ale_accum error) {
2277 if ((1 - error) * (ale_accum) 100 >= match_threshold)
2278 return 1;
2280 if (!(match_threshold >= 0))
2281 return 1;
2283 return 0;
2286 static diff_stat_t _align_element(ale_pos perturb, ale_pos local_lower,
2287 scale_cluster *scale_clusters, diff_stat_t here,
2288 ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
2289 ale_pos *current_bd, ale_pos *modified_bd,
2290 astate_t *astate, int lod, scale_cluster si) {
2293 * Run initial tests to get perturbation multipliers and error
2294 * centroids.
2297 std::vector<d2::trans_single> t_set;
2299 here.get_perturb_set(&t_set, adj_p, adj_o, adj_b, current_bd, modified_bd);
2301 int stable_count = 0;
2303 while (perturb >= local_lower) {
2305 ui::get()->alignment_dims(scale_clusters[lod].accum->height(), scale_clusters[lod].accum->width(),
2306 scale_clusters[lod].input->height(), scale_clusters[lod].input->width());
2309 * Orientational adjustment value in degrees.
2311 * Since rotational perturbation is now specified as an
2312 * arclength, we have to convert.
2315 ale_pos adj_o = 2 * (double) perturb
2316 / sqrt(pow(scale_clusters[0].input->height(), 2)
2317 + pow(scale_clusters[0].input->width(), 2))
2318 * 180
2319 / M_PI;
2322 * Barrel distortion adjustment value
2325 ale_pos adj_b = perturb * bda_mult;
2327 trans_single old_offset = here.get_offset();
2329 here.perturb_test(perturb, adj_p, adj_o, adj_b, current_bd, modified_bd,
2330 stable_count);
2332 if (here.get_offset() == old_offset)
2333 stable_count++;
2334 else
2335 stable_count = 0;
2337 if (stable_count == 3) {
2339 stable_count = 0;
2341 perturb *= 0.5;
2343 if (lod > 0
2344 && lod > lrint(log(perturb) / log(2)) - lod_preferred) {
2347 * Work with images twice as large
2350 lod--;
2351 si = scale_clusters[lod];
2354 * Rescale the transforms.
2357 ale_pos rescale_factor = (double) scale_factor
2358 / (double) pow(2, lod)
2359 / (double) here.get_offset().scale();
2361 here.rescale(rescale_factor, si);
2363 } else {
2364 adj_p = perturb / pow(2, lod);
2368 * Announce changes
2371 ui::get()->alignment_perturbation_level(perturb, lod);
2374 ui::get()->set_match(here.get_error());
2375 ui::get()->set_offset(here.get_offset());
2378 if (lod > 0) {
2379 ale_pos rescale_factor = (double) scale_factor
2380 / (double) here.get_offset().scale();
2382 here.rescale(rescale_factor, scale_clusters[0]);
2385 return here;
2389 * Check for satisfaction of the certainty threshold.
2391 static int ma_cert_satisfied(const scale_cluster &c, const transformation &t, unsigned int i) {
2392 transformation::elem_bounds_int_t b = t.elem_bounds().scale_to_bounds(c.accum->height(), c.accum->width());
2393 ale_accum sum[3] = {0, 0, 0};
2395 for (unsigned int ii = b.imin; ii < b.imax; ii++)
2396 for (unsigned int jj = b.jmin; jj < b.jmax; jj++) {
2397 pixel p = c.accum->get_pixel(ii, jj);
2398 sum[0] += p[0];
2399 sum[1] += p[1];
2400 sum[2] += p[2];
2403 unsigned int count = (b.jmax - b.jmin) * (b.imax - b.imin);
2405 for (int k = 0; k < 3; k++)
2406 if (sum[k] / count < _ma_cert)
2407 return 0;
2409 return 1;
2412 static diff_stat_t check_ancestor_path(const trans_multi &offset, const scale_cluster &si, diff_stat_t here, int local_ax_count, int frame) {
2414 if (offset.get_current_index() > 0)
2415 for (int i = offset.parent_index(offset.get_current_index()); i >= 0; i = (i > 0) ? (int) offset.parent_index(i) : -1) {
2416 trans_single t = offset.get_element(i);
2417 t.rescale(offset.get_current_element().scale());
2419 here.diff(si, t, local_ax_count, frame);
2421 if (here.better_defined())
2422 here.confirm();
2423 else
2424 here.discard();
2427 return here;
2431 * Align frame m against the reference.
2433 * XXX: the transformation class currently combines ordinary
2434 * transformations with scaling. This is somewhat convenient for
2435 * some things, but can also be confusing. This method, _align(), is
2436 * one case where special care must be taken to ensure that the scale
2437 * is always set correctly (by using the 'rescale' method).
2439 static diff_stat_multi _align(int m, int local_gs, astate_t *astate) {
2441 const image *input_frame = astate->get_input_frame();
2444 * Local upper/lower data, possibly dependent on image
2445 * dimensions.
2448 ale_pos local_lower, local_upper;
2449 ale_accum local_gs_mo;
2452 * Select the minimum dimension as the reference.
2455 ale_pos reference_size = input_frame->height();
2456 if (input_frame->width() < reference_size)
2457 reference_size = input_frame->width();
2458 ale_accum reference_area = input_frame->height()
2459 * input_frame->width();
2461 if (perturb_lower_percent)
2462 local_lower = (double) perturb_lower
2463 * (double) reference_size
2464 * (double) 0.01
2465 * (double) scale_factor;
2466 else
2467 local_lower = perturb_lower;
2469 if (perturb_upper_percent)
2470 local_upper = (double) perturb_upper
2471 * (double) reference_size
2472 * (double) 0.01
2473 * (double) scale_factor;
2474 else
2475 local_upper = perturb_upper;
2477 local_upper = pow(2, floor(log(local_upper) / log(2)));
2479 if (gs_mo_percent)
2480 local_gs_mo = (double) _gs_mo
2481 * (double) reference_area
2482 * (double) 0.01
2483 * (double) scale_factor;
2484 else
2485 local_gs_mo = _gs_mo;
2488 * Logarithms aren't exact, so we divide repeatedly to discover
2489 * how many steps will occur, and pass this information to the
2490 * user interface.
2493 int step_count = 0;
2494 double step_variable = local_upper;
2495 while (step_variable >= local_lower) {
2496 step_variable /= 2;
2497 step_count++;
2500 ale_pos perturb = local_upper;
2502 if (_keep) {
2503 kept_t[latest] = latest_t;
2504 kept_ok[latest] = latest_ok;
2508 * Determine how many levels of detail should be prepared, by
2509 * calculating the initial (largest) value for the
2510 * level-of-detail variable.
2513 int lod = lrint(log(perturb) / log(2)) - lod_preferred;
2515 if (lod < 0)
2516 lod = 0;
2518 while (lod > 0 && (reference_image->width() < pow(2, lod) * min_dimension
2519 || reference_image->height() < pow(2, lod) * min_dimension))
2520 lod--;
2522 unsigned int steps = (unsigned int) lod + 1;
2525 * Prepare multiple levels of detail.
2528 int local_ax_count;
2529 struct scale_cluster *scale_clusters = init_clusters(m,
2530 scale_factor, input_frame, steps,
2531 &local_ax_count);
2534 * Initialize the default initial transform
2537 astate->init_default();
2540 * Set the default transformation.
2543 transformation offset = astate->get_default();
2546 * Establish boundaries
2549 offset.set_current_bounds(reference_image);
2551 ui::get()->alignment_degree_max(offset.get_coordinate(offset.stack_depth() - 1).degree);
2553 if (offset.stack_depth() == 1) {
2554 ui::get()->set_steps(step_count, 0);
2555 } else {
2556 ui::get()->set_steps(offset.get_coordinate(offset.stack_depth() - 1).degree + 1, 1);
2560 * Load any file-specified transformations
2563 for (unsigned int index = 0; index < offset.stack_depth(); index++) {
2564 int is_default = 1;
2565 unsigned int index_2;
2566 offset.set_current_index(index);
2568 offset = tload_next(tload, alignment_class == 2,
2569 offset,
2570 &is_default, offset.get_current_index() == 0);
2572 index_2 = offset.get_current_index();
2574 if (index_2 > index) {
2575 for (unsigned int index_3 = index; index_3 < index_2; index_3++)
2576 astate->set_is_default(index_3, 1);
2578 index = index_2;
2581 astate->set_is_default(index, is_default);
2584 offset.set_current_index(0);
2586 astate->init_frame_alignment_primary(&offset, lod, perturb);
2589 * Control point alignment
2592 if (local_gs == 5) {
2594 transformation o = offset;
2597 * Determine centroid data
2600 point current, previous;
2601 centroids(m, &current, &previous);
2603 if (current.defined() && previous.defined()) {
2604 o = orig_t;
2605 o.set_dimensions(input_frame);
2606 o.translate((previous - current) * o.scale());
2607 current = previous;
2611 * Determine rotation for alignment classes other than translation.
2614 ale_pos lowest_error = cp_rms_error(m, o);
2616 ale_pos rot_lower = 2 * (double) local_lower
2617 / sqrt(pow(scale_clusters[0].input->height(), 2)
2618 + pow(scale_clusters[0].input->width(), 2))
2619 * 180
2620 / M_PI;
2622 if (alignment_class > 0)
2623 for (double rot = 30; rot > rot_lower; rot /= 2)
2624 for (double srot = -rot; srot < rot * 1.5; srot += rot * 2) {
2625 int is_improved = 1;
2626 while (is_improved) {
2627 is_improved = 0;
2628 transformation test_t = o;
2630 * XXX: is this right?
2632 test_t.rotate(current * o.scale(), srot);
2633 ale_pos test_v = cp_rms_error(m, test_t);
2635 if (test_v < lowest_error) {
2636 lowest_error = test_v;
2637 o = test_t;
2638 srot += 3 * rot;
2639 is_improved = 1;
2645 * Determine projective parameters through a local
2646 * minimum search.
2649 if (alignment_class == 2) {
2650 ale_pos adj_p = lowest_error;
2652 if (adj_p < local_lower)
2653 adj_p = local_lower;
2655 while (adj_p >= local_lower) {
2656 transformation test_t = o;
2657 int is_improved = 1;
2658 ale_pos test_v;
2659 ale_pos adj_s;
2661 while (is_improved) {
2662 is_improved = 0;
2664 for (int i = 0; i < 4; i++)
2665 for (int j = 0; j < 2; j++)
2666 for (adj_s = -adj_p; adj_s <= adj_p; adj_s += 2 * adj_p) {
2668 test_t = o;
2670 if (perturb_type == 0)
2671 test_t.gpt_modify(j, i, adj_s);
2672 else if (perturb_type == 1)
2673 test_t.gr_modify(j, i, adj_s);
2674 else
2675 assert(0);
2677 test_v = cp_rms_error(m, test_t);
2679 if (test_v < lowest_error) {
2680 lowest_error = test_v;
2681 o = test_t;
2682 adj_s += 3 * adj_p;
2683 is_improved = 1;
2687 adj_p /= 2;
2693 * Pre-alignment exposure adjustment
2696 if (_exp_register) {
2697 ui::get()->exposure_1();
2698 set_exposure_ratio(m, scale_clusters[0], offset, local_ax_count, 0);
2702 * Scale transform for lod
2705 for (int lod_ = 0; lod_ < lod; lod_++) {
2706 transformation s = offset;
2707 transformation t = offset;
2709 t.rescale(1 / (double) 2);
2711 if (!(t.scaled_height() > 0 && t.scaled_height() < s.scaled_height())
2712 || !(t.scaled_width() > 0 && t.scaled_width() < s.scaled_width())) {
2713 perturb /= pow(2, lod - lod_);
2714 lod = lod_;
2715 break;
2716 } else {
2717 offset = t;
2721 ui::get()->set_offset(offset);
2723 struct scale_cluster si = scale_clusters[lod];
2726 * Projective adjustment value
2729 ale_pos adj_p = perturb / pow(2, lod);
2732 * Orientational adjustment value in degrees.
2734 * Since rotational perturbation is now specified as an
2735 * arclength, we have to convert.
2738 ale_pos adj_o = (double) 2 * (double) perturb
2739 / sqrt(pow((double) scale_clusters[0].input->height(), (double) 2)
2740 + pow((double) scale_clusters[0].input->width(), (double) 2))
2741 * (double) 180
2742 / M_PI;
2745 * Barrel distortion adjustment value
2748 ale_pos adj_b = perturb * bda_mult;
2751 * Global search overlap requirements.
2754 local_gs_mo = (double) local_gs_mo / pow(pow(2, lod), 2);
2757 * Alignment statistics.
2760 diff_stat_t here(offset.elem_bounds());
2763 * Current difference (error) value
2766 ui::get()->prematching();
2767 here.diff(si, offset.get_current_element(), local_ax_count, m);
2768 ui::get()->set_match(here.get_error());
2771 * Current and modified barrel distortion parameters
2774 ale_pos current_bd[BARREL_DEGREE];
2775 ale_pos modified_bd[BARREL_DEGREE];
2776 offset.bd_get(current_bd);
2777 offset.bd_get(modified_bd);
2780 * Translational global search step
2783 if (perturb >= local_lower && local_gs != 0 && local_gs != 5
2784 && (local_gs != 6 || astate->get_is_default(0))) {
2786 ui::get()->global_alignment(perturb, lod);
2787 ui::get()->gs_mo(local_gs_mo);
2789 test_globals(&here, si, offset, local_gs, adj_p,
2790 local_ax_count, m, local_gs_mo, perturb);
2792 ui::get()->set_match(here.get_error());
2793 ui::get()->set_offset(here.get_offset());
2797 * Perturbation adjustment loop.
2800 offset.set_current_element(here.get_offset());
2802 for (unsigned int i = 0; i < offset.stack_depth(); i++) {
2804 ui::get()->aligning_element(i, offset.stack_depth());
2806 offset.set_current_index(i);
2808 ui::get()->start_multi_alignment_element(offset);
2810 ui::get()->set_offset(offset);
2812 if (i > 0) {
2813 astate->init_frame_alignment_nonprimary(&offset, lod, perturb, i);
2815 if (_exp_register == 1) {
2816 ui::get()->exposure_1();
2817 pixel_accum asum(0, 0, 0), bsum(0, 0, 0);
2818 exposure_ratio_iterate eri(&asum, &bsum, scale_clusters[0], offset, local_ax_count, 0,
2819 offset.elem_bounds().scale_to_bounds(scale_clusters[0].accum->height(),
2820 scale_clusters[0].accum->width()));
2822 eri.run();
2823 pixel_accum tr = asum / bsum;
2824 ui::get()->exp_multiplier(tr[0], tr[1], tr[2]);
2825 offset.set_tonal_multiplier(tr);
2829 int e_lod = lod;
2830 int e_div = offset.get_current_coordinate().degree;
2831 ale_pos e_perturb = perturb;
2832 ale_pos e_adj_p = adj_p;
2833 ale_pos e_adj_b = adj_b;
2835 for (int d = 0; d < e_div; d++) {
2836 e_adj_b = 0;
2837 e_perturb *= 0.5;
2838 if (e_lod > 0) {
2839 e_lod--;
2840 } else {
2841 e_adj_p *= 0.5;
2845 if (i > 0) {
2847 d2::trans_multi::elem_bounds_t b = offset.elem_bounds();
2849 for (int dim_satisfied = 0; e_lod > 0 && !dim_satisfied; ) {
2850 int height = scale_clusters[e_lod].accum->height();
2851 int width = scale_clusters[e_lod].accum->width();
2853 d2::trans_multi::elem_bounds_int_t bi = b.scale_to_bounds(height, width);
2855 dim_satisfied = bi.satisfies_min_dim(min_dimension);
2857 if (!dim_satisfied) {
2858 e_lod--;
2859 e_adj_p *= 2;
2864 * Scale transform for lod
2867 for (int lod_ = 0; lod_ < e_lod; lod_++) {
2868 trans_single s = offset.get_element(i);
2869 trans_single t = offset.get_element(i);
2871 t.rescale(1 / (double) 2);
2873 if (!(t.scaled_height() > 0 && t.scaled_height() < s.scaled_height())
2874 || !(t.scaled_width() > 0 && t.scaled_width() < s.scaled_width())) {
2875 e_perturb /= pow(2, e_lod - lod_);
2876 e_lod = lod_;
2877 break;
2878 } else {
2879 offset.set_element(i, t);
2883 ui::get()->set_offset(offset);
2887 * Announce perturbation size
2890 ui::get()->aligning(e_perturb, e_lod);
2892 si = scale_clusters[e_lod];
2894 here.set_elem_bounds(offset.elem_bounds());
2896 here.diff(si, offset.get_current_element(), local_ax_count, m);
2898 here.confirm();
2900 here = check_ancestor_path(offset, si, here, local_ax_count, m);
2902 here = _align_element(e_perturb, local_lower, scale_clusters,
2903 here, e_adj_p, adj_o, e_adj_b, current_bd, modified_bd,
2904 astate, e_lod, si);
2906 offset.rescale(here.get_offset().scale() / offset.scale());
2908 offset.set_current_element(here.get_offset());
2910 if (i > 0 && _exp_register == 1) {
2911 if (ma_cert_satisfied(scale_clusters[0], offset, i)) {
2912 ui::get()->exposure_2();
2913 pixel_accum asum(0, 0, 0), bsum(0, 0, 0);
2914 exposure_ratio_iterate eri(&asum, &bsum, scale_clusters[0], offset, local_ax_count, 1,
2915 offset.elem_bounds().scale_to_bounds(scale_clusters[0].accum->height(),
2916 scale_clusters[0].accum->width()));
2918 eri.run();
2919 pixel_accum tr = asum / bsum;
2920 ui::get()->exp_multiplier(tr[0], tr[1], tr[2]);
2921 offset.set_tonal_multiplier(tr);
2922 } else {
2923 offset.set_tonal_multiplier(offset.get_element(offset.parent_index(i)).get_tonal_multiplier(point(0, 0)));
2925 } else if (_exp_register == 1) {
2926 ui::get()->exposure_2();
2927 set_exposure_ratio(m, scale_clusters[0], offset, local_ax_count, 1);
2930 ui::get()->set_offset(offset);
2932 if (i + 1 == offset.stack_depth())
2933 ui::get()->alignment_degree_complete(offset.get_coordinate(i).degree);
2934 else if (offset.get_coordinate(i).degree != offset.get_coordinate(i + 1).degree)
2935 ui::get()->alignment_degree_complete(offset.get_coordinate(i + 1).degree);
2938 offset.set_current_index(0);
2940 ui::get()->multi();
2941 offset.set_multi(reference_image, input_frame);
2944 * Recalculate error on whole frame.
2947 ui::get()->postmatching();
2948 diff_stat_generic<transformation> multi_here(offset.elem_bounds());
2949 multi_here.diff(scale_clusters[0], offset, local_ax_count, m);
2950 ui::get()->set_match(multi_here.get_error());
2953 * Free the level-of-detail structures
2956 final_clusters(scale_clusters, scale_factor, steps);
2959 * Ensure that the match meets the threshold.
2962 if (threshold_ok(multi_here.get_error())) {
2964 * Update alignment variables
2966 latest_ok = 1;
2967 astate->set_default(offset);
2968 astate->set_final(offset);
2969 ui::get()->alignment_match_ok();
2970 } else if (local_gs == 4) {
2973 * Align with outer starting points.
2977 * XXX: This probably isn't exactly the right thing to do,
2978 * since variables like old_initial_value have been overwritten.
2981 diff_stat_multi nested_result = _align(m, -1, astate);
2983 if (threshold_ok(nested_result.get_error())) {
2984 return nested_result;
2985 } else if (nested_result.get_error() < multi_here.get_error()) {
2986 multi_here = nested_result;
2989 if (is_fail_default)
2990 offset = astate->get_default();
2992 ui::get()->set_match(multi_here.get_error());
2993 ui::get()->alignment_no_match();
2995 } else if (local_gs == -1) {
2997 latest_ok = 0;
2998 latest_t = offset;
2999 return multi_here;
3001 } else {
3002 if (is_fail_default)
3003 offset = astate->get_default();
3004 latest_ok = 0;
3005 ui::get()->alignment_no_match();
3009 * Write the tonal registration multiplier as a comment.
3012 pixel trm = image_rw::exp(m).get_multiplier();
3013 tsave_trm(tsave, trm[0], trm[1], trm[2]);
3016 * Save the transformation information
3019 for (unsigned int index = 0; index < offset.stack_depth(); index++) {
3020 offset.set_current_index(index);
3022 tsave_next(tsave, offset, alignment_class == 2,
3023 offset.get_current_index() == 0);
3026 offset.set_current_index(0);
3029 * Update match statistics.
3032 match_sum += (1 - multi_here.get_error()) * (ale_accum) 100;
3033 match_count++;
3034 latest = m;
3035 latest_t = offset;
3037 return multi_here;
3040 #ifdef USE_FFTW
3042 * High-pass filter for frequency weights
3044 static void hipass(int rows, int cols, fftw_complex *inout) {
3045 for (int i = 0; i < rows * vert_freq_cut; i++)
3046 for (int j = 0; j < cols; j++)
3047 for (int k = 0; k < 2; k++)
3048 inout[i * cols + j][k] = 0;
3049 for (int i = 0; i < rows; i++)
3050 for (int j = 0; j < cols * horiz_freq_cut; j++)
3051 for (int k = 0; k < 2; k++)
3052 inout[i * cols + j][k] = 0;
3053 for (int i = 0; i < rows; i++)
3054 for (int j = 0; j < cols; j++)
3055 for (int k = 0; k < 2; k++)
3056 if (i / (double) rows + j / (double) cols < 2 * avg_freq_cut)
3057 inout[i * cols + j][k] = 0;
3059 #endif
3063 * Reset alignment weights
3065 static void reset_weights() {
3066 if (alignment_weights != NULL)
3067 delete alignment_weights;
3069 alignment_weights = NULL;
3073 * Initialize alignment weights
3075 static void init_weights() {
3076 if (alignment_weights != NULL)
3077 return;
3079 int rows = reference_image->height();
3080 int cols = reference_image->width();
3081 int colors = reference_image->depth();
3083 alignment_weights = new_image_ale_real(rows, cols,
3084 colors, "alignment_weights");
3086 assert (alignment_weights);
3088 for (int i = 0; i < rows; i++)
3089 for (int j = 0; j < cols; j++)
3090 alignment_weights->set_pixel(i, j, pixel(1, 1, 1));
3094 * Update alignment weights with weight map
3096 static void map_update() {
3098 if (weight_map == NULL)
3099 return;
3101 init_weights();
3103 point map_offset = reference_image->offset() - weight_map->offset();
3105 int rows = reference_image->height();
3106 int cols = reference_image->width();
3108 for (int i = 0; i < rows; i++)
3109 for (int j = 0; j < cols; j++) {
3110 point map_weight_position = map_offset + point(i, j);
3111 if (map_weight_position[0] >= 0
3112 && map_weight_position[1] >= 0
3113 && map_weight_position[0] <= weight_map->height() - 1
3114 && map_weight_position[1] <= weight_map->width() - 1)
3115 alignment_weights->set_pixel(i, j,
3116 alignment_weights->get_pixel(i, j)
3117 * weight_map->get_bl(map_weight_position));
3122 * Update alignment weights with algorithmic weights
3124 static void wmx_update() {
3125 #ifdef USE_UNIX
3127 static exposure *exp_def = new exposure_default();
3128 static exposure *exp_bool = new exposure_boolean();
3130 if (wmx_file == NULL || wmx_exec == NULL || wmx_defs == NULL)
3131 return;
3133 unsigned int rows = reference_image->height();
3134 unsigned int cols = reference_image->width();
3136 image_rw::write_image(wmx_file, reference_image);
3137 image_rw::write_image(wmx_defs, reference_defined, exp_bool);
3139 /* execute ... */
3140 int exit_status = 1;
3141 if (!fork()) {
3142 execlp(wmx_exec, wmx_exec, wmx_file, wmx_defs, NULL);
3143 ui::get()->exec_failure(wmx_exec, wmx_file, wmx_defs);
3146 wait(&exit_status);
3148 if (exit_status)
3149 ui::get()->fork_failure("d2::align");
3151 image *wmx_weights = image_rw::read_image(wmx_file, exp_def);
3153 if (wmx_weights->height() != rows || wmx_weights->width() != cols)
3154 ui::get()->error("algorithmic weighting must not change image size");
3156 if (alignment_weights == NULL)
3157 alignment_weights = wmx_weights;
3158 else
3159 for (unsigned int i = 0; i < rows; i++)
3160 for (unsigned int j = 0; j < cols; j++)
3161 alignment_weights->set_pixel(i, j,
3162 (pixel) alignment_weights->get_pixel(i, j)
3163 * (pixel) wmx_weights->get_pixel(i, j));
3164 #endif
3168 * Update alignment weights with frequency weights
3170 static void fw_update() {
3171 #ifdef USE_FFTW
3172 if (horiz_freq_cut == 0
3173 && vert_freq_cut == 0
3174 && avg_freq_cut == 0)
3175 return;
3178 * Required for correct operation of --fwshow
3181 assert (alignment_weights == NULL);
3183 int rows = reference_image->height();
3184 int cols = reference_image->width();
3185 int colors = reference_image->depth();
3187 alignment_weights = new_image_ale_real(rows, cols,
3188 colors, "alignment_weights");
3190 fftw_complex *inout = (fftw_complex *) fftw_malloc(sizeof(fftw_complex) * rows * cols);
3192 assert (inout);
3194 fftw_plan pf = fftw_plan_dft_2d(rows, cols,
3195 inout, inout,
3196 FFTW_FORWARD, FFTW_ESTIMATE);
3198 fftw_plan pb = fftw_plan_dft_2d(rows, cols,
3199 inout, inout,
3200 FFTW_BACKWARD, FFTW_ESTIMATE);
3202 for (int k = 0; k < colors; k++) {
3203 for (int i = 0; i < rows * cols; i++) {
3204 inout[i][0] = reference_image->get_pixel(i / cols, i % cols)[k];
3205 inout[i][1] = 0;
3208 fftw_execute(pf);
3209 hipass(rows, cols, inout);
3210 fftw_execute(pb);
3212 for (int i = 0; i < rows * cols; i++) {
3213 #if 0
3214 alignment_weights->pix(i / cols, i % cols)[k] = fabs(inout[i][0] / (rows * cols));
3215 #else
3216 alignment_weights->set_chan(i / cols, i % cols, k,
3217 sqrt(pow(inout[i][0] / (rows * cols), 2)
3218 + pow(inout[i][1] / (rows * cols), 2)));
3219 #endif
3223 fftw_destroy_plan(pf);
3224 fftw_destroy_plan(pb);
3225 fftw_free(inout);
3227 if (fw_output != NULL)
3228 image_rw::write_image(fw_output, alignment_weights);
3229 #endif
3233 * Update alignment to frame N.
3235 static void update_to(int n) {
3237 assert (n <= latest + 1);
3238 assert (n >= 0);
3240 static astate_t astate;
3242 ui::get()->set_frame_num(n);
3244 if (latest < 0) {
3247 * Handle the initial frame
3250 astate.set_input_frame(image_rw::open(n));
3252 const image *i = astate.get_input_frame();
3253 int is_default;
3254 transformation result = alignment_class == 2
3255 ? transformation::gpt_identity(i, scale_factor)
3256 : transformation::eu_identity(i, scale_factor);
3257 result = tload_first(tload, alignment_class == 2, result, &is_default);
3258 tsave_first(tsave, result, alignment_class == 2);
3260 if (_keep > 0) {
3261 kept_t = transformation::new_eu_identity_array(image_rw::count());
3262 kept_ok = (int *) malloc(image_rw::count()
3263 * sizeof(int));
3264 assert (kept_t);
3265 assert (kept_ok);
3267 if (!kept_t || !kept_ok)
3268 ui::get()->memory_error("alignment");
3270 kept_ok[0] = 1;
3271 kept_t[0] = result;
3274 latest = 0;
3275 latest_ok = 1;
3276 latest_t = result;
3278 astate.set_default(result);
3279 orig_t = result;
3281 image_rw::close(n);
3284 for (int i = latest + 1; i <= n; i++) {
3287 * Handle supplemental frames.
3290 assert (reference != NULL);
3292 ui::get()->set_arender_current();
3293 reference->sync(i - 1);
3294 ui::get()->clear_arender_current();
3295 reference_image = reference->get_image();
3296 reference_defined = reference->get_defined();
3298 if (i == 1)
3299 astate.default_set_original_bounds(reference_image);
3301 reset_weights();
3302 fw_update();
3303 wmx_update();
3304 map_update();
3306 assert (reference_image != NULL);
3307 assert (reference_defined != NULL);
3309 astate.set_input_frame(image_rw::open(i));
3311 _align(i, _gs, &astate);
3313 image_rw::close(n);
3317 public:
3320 * Set the control point count
3322 static void set_cp_count(unsigned int n) {
3323 assert (cp_array == NULL);
3325 cp_count = n;
3326 cp_array = (const point **) malloc(n * sizeof(const point *));
3330 * Set control points.
3332 static void set_cp(unsigned int i, const point *p) {
3333 cp_array[i] = p;
3337 * Register exposure
3339 static void exp_register() {
3340 _exp_register = 1;
3344 * Register exposure only based on metadata
3346 static void exp_meta_only() {
3347 _exp_register = 2;
3351 * Don't register exposure
3353 static void exp_noregister() {
3354 _exp_register = 0;
3358 * Set alignment class to translation only. Only adjust the x and y
3359 * position of images. Do not rotate input images or perform
3360 * projective transformations.
3362 static void class_translation() {
3363 alignment_class = 0;
3367 * Set alignment class to Euclidean transformations only. Adjust the x
3368 * and y position of images and the orientation of the image about the
3369 * image center.
3371 static void class_euclidean() {
3372 alignment_class = 1;
3376 * Set alignment class to perform general projective transformations.
3377 * See the file gpt.h for more information about general projective
3378 * transformations.
3380 static void class_projective() {
3381 alignment_class = 2;
3385 * Set the default initial alignment to the identity transformation.
3387 static void initial_default_identity() {
3388 default_initial_alignment_type = 0;
3392 * Set the default initial alignment to the most recently matched
3393 * frame's final transformation.
3395 static void initial_default_follow() {
3396 default_initial_alignment_type = 1;
3400 * Perturb output coordinates.
3402 static void perturb_output() {
3403 perturb_type = 0;
3407 * Perturb source coordinates.
3409 static void perturb_source() {
3410 perturb_type = 1;
3414 * Frames under threshold align optimally
3416 static void fail_optimal() {
3417 is_fail_default = 0;
3421 * Frames under threshold keep their default alignments.
3423 static void fail_default() {
3424 is_fail_default = 1;
3428 * Align images with an error contribution from each color channel.
3430 static void all() {
3431 channel_alignment_type = 0;
3435 * Align images with an error contribution only from the green channel.
3436 * Other color channels do not affect alignment.
3438 static void green() {
3439 channel_alignment_type = 1;
3443 * Align images using a summation of channels. May be useful when
3444 * dealing with images that have high frequency color ripples due to
3445 * color aliasing.
3447 static void sum() {
3448 channel_alignment_type = 2;
3452 * Error metric exponent
3455 static void set_metric_exponent(float me) {
3456 metric_exponent = me;
3460 * Match threshold
3463 static void set_match_threshold(float mt) {
3464 match_threshold = mt;
3468 * Perturbation lower and upper bounds.
3471 static void set_perturb_lower(ale_pos pl, int plp) {
3472 perturb_lower = pl;
3473 perturb_lower_percent = plp;
3476 static void set_perturb_upper(ale_pos pu, int pup) {
3477 perturb_upper = pu;
3478 perturb_upper_percent = pup;
3482 * Maximum rotational perturbation.
3485 static void set_rot_max(int rm) {
3488 * Obtain the largest power of two not larger than rm.
3491 rot_max = pow(2, floor(log(rm) / log(2)));
3495 * Barrel distortion adjustment multiplier
3498 static void set_bda_mult(ale_pos m) {
3499 bda_mult = m;
3503 * Barrel distortion maximum rate of change
3506 static void set_bda_rate(ale_pos m) {
3507 bda_rate = m;
3511 * Level-of-detail
3514 static void set_lod_preferred(int lm) {
3515 lod_preferred = lm;
3519 * Minimum dimension for reduced level-of-detail.
3522 static void set_min_dimension(int md) {
3523 min_dimension = md;
3527 * Set the scale factor
3529 static void set_scale(ale_pos s) {
3530 scale_factor = s;
3534 * Set reference rendering to align against
3536 static void set_reference(render *r) {
3537 reference = r;
3541 * Set the interpolant
3543 static void set_interpolant(filter::scaled_filter *f) {
3544 interpolant = f;
3548 * Set alignment weights image
3550 static void set_weight_map(const image *i) {
3551 weight_map = i;
3555 * Set frequency cuts
3557 static void set_frequency_cut(double h, double v, double a) {
3558 horiz_freq_cut = h;
3559 vert_freq_cut = v;
3560 avg_freq_cut = a;
3564 * Set algorithmic alignment weighting
3566 static void set_wmx(const char *e, const char *f, const char *d) {
3567 wmx_exec = e;
3568 wmx_file = f;
3569 wmx_defs = d;
3573 * Show frequency weights
3575 static void set_fl_show(const char *filename) {
3576 fw_output = filename;
3580 * Set transformation file loader.
3582 static void set_tload(tload_t *tl) {
3583 tload = tl;
3587 * Set transformation file saver.
3589 static void set_tsave(tsave_t *ts) {
3590 tsave = ts;
3594 * Get match statistics for frame N.
3596 static int match(int n) {
3597 update_to(n);
3599 if (n == latest)
3600 return latest_ok;
3601 else if (_keep)
3602 return kept_ok[n];
3603 else {
3604 assert(0);
3605 exit(1);
3610 * Message that old alignment data should be kept.
3612 static void keep() {
3613 assert (latest == -1);
3614 _keep = 1;
3618 * Get alignment for frame N.
3620 static transformation of(int n) {
3621 update_to(n);
3622 if (n == latest)
3623 return latest_t;
3624 else if (_keep)
3625 return kept_t[n];
3626 else {
3627 assert(0);
3628 exit(1);
3633 * Use Monte Carlo alignment sampling with argument N.
3635 static void mc(ale_pos n) {
3636 _mc = n;
3640 * Set the certainty-weighted flag.
3642 static void certainty_weighted(int flag) {
3643 certainty_weights = flag;
3647 * Set the global search type.
3649 static void gs(const char *type) {
3650 if (!strcmp(type, "local")) {
3651 _gs = 0;
3652 } else if (!strcmp(type, "inner")) {
3653 _gs = 1;
3654 } else if (!strcmp(type, "outer")) {
3655 _gs = 2;
3656 } else if (!strcmp(type, "all")) {
3657 _gs = 3;
3658 } else if (!strcmp(type, "central")) {
3659 _gs = 4;
3660 } else if (!strcmp(type, "defaults")) {
3661 _gs = 6;
3662 } else if (!strcmp(type, "points")) {
3663 _gs = 5;
3664 keep();
3665 } else {
3666 ui::get()->error("bad global search type");
3671 * Set the minimum overlap for global searching
3673 static void gs_mo(ale_pos value, int _gs_mo_percent) {
3674 _gs_mo = value;
3675 gs_mo_percent = _gs_mo_percent;
3679 * Set mutli-alignment certainty lower bound.
3681 static void set_ma_cert(ale_real value) {
3682 _ma_cert = value;
3686 * Set alignment exclusion regions
3688 static void set_exclusion(exclusion *_ax_parameters, int _ax_count) {
3689 ax_count = _ax_count;
3690 ax_parameters = _ax_parameters;
3694 * Get match summary statistics.
3696 static ale_accum match_summary() {
3697 return match_sum / (ale_accum) match_count;
3701 template<class diff_trans>
3702 void *d2::align::diff_stat_generic<diff_trans>::diff_subdomain(void *args) {
3704 subdomain_args *sargs = (subdomain_args *) args;
3706 struct scale_cluster c = sargs->c;
3707 std::vector<run> runs = sargs->runs;
3708 int ax_count = sargs->ax_count;
3709 int f = sargs->f;
3710 int i_min = sargs->i_min;
3711 int i_max = sargs->i_max;
3712 int j_min = sargs->j_min;
3713 int j_max = sargs->j_max;
3714 int subdomain = sargs->subdomain;
3716 assert (reference_image);
3718 point offset = c.accum->offset();
3720 for (mc_iterate m(i_min, i_max, j_min, j_max, subdomain); !m.done(); m++) {
3722 int i = m.get_i();
3723 int j = m.get_j();
3726 * Check reference frame definition.
3729 if (!((pixel) c.accum->get_pixel(i, j)).finite()
3730 || !(((pixel) c.certainty->get_pixel(i, j)).minabs_norm() > 0))
3731 continue;
3734 * Check for exclusion in render coordinates.
3737 if (ref_excluded(i, j, offset, c.ax_parameters, ax_count))
3738 continue;
3741 * Transform
3744 struct point q, r = point::undefined();
3746 q = (c.input_scale < 1.0 && interpolant == NULL)
3747 ? runs.back().offset.scaled_inverse_transform(
3748 point(i + offset[0], j + offset[1]))
3749 : runs.back().offset.unscaled_inverse_transform(
3750 point(i + offset[0], j + offset[1]));
3752 if (runs.size() == 2) {
3753 r = (c.input_scale < 1.0)
3754 ? runs.front().offset.scaled_inverse_transform(
3755 point(i + offset[0], j + offset[1]))
3756 : runs.front().offset.unscaled_inverse_transform(
3757 point(i + offset[0], j + offset[1]));
3760 ale_pos ti = q[0];
3761 ale_pos tj = q[1];
3764 * Check that the transformed coordinates are within
3765 * the boundaries of array c.input and that they
3766 * are not subject to exclusion.
3768 * Also, check that the weight value in the accumulated array
3769 * is nonzero, unless we know it is nonzero by virtue of the
3770 * fact that it falls within the region of the original frame
3771 * (e.g. when we're not increasing image extents).
3774 if (input_excluded(ti, tj, c.ax_parameters, ax_count))
3775 continue;
3777 if (input_excluded(r[0], r[1], c.ax_parameters, ax_count))
3778 r = point::undefined();
3781 * Check the boundaries of the input frame
3784 if (!(ti >= 0
3785 && ti <= c.input->height() - 1
3786 && tj >= 0
3787 && tj <= c.input->width() - 1))
3788 continue;
3790 if (!(r[0] >= 0
3791 && r[0] <= c.input->height() - 1
3792 && r[1] >= 0
3793 && r[1] <= c.input->width() - 1))
3794 r = point::undefined();
3796 sargs->runs.back().sample(f, c, i, j, q, r, runs.front());
3799 return NULL;
3802 #endif