1 // Copyright 2002, 2004, 2007 David Hilvert <dhilvert@auricle.dyndns.org>,
2 // <dhilvert@ugcs.caltech.edu>
4 /* This file is part of the Anti-Lamenessing Engine.
6 The Anti-Lamenessing Engine is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 The Anti-Lamenessing Engine is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with the Anti-Lamenessing Engine; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * align.h: Handle alignment of frames.
29 #include "transformation.h"
40 * Private data members
43 static ale_pos scale_factor
;
46 * Original frame transformation
48 static transformation orig_t
;
51 * Keep data older than latest
54 static transformation
*kept_t
;
58 * Transformation file handlers
61 static tload_t
*tload
;
62 static tsave_t
*tsave
;
65 * Control point variables
68 static const point
**cp_array
;
69 static unsigned int cp_count
;
72 * Reference rendering to align against
75 static render
*reference
;
76 static filter::scaled_filter
*interpolant
;
77 static const image
*reference_image
;
78 static const image
*reference_defined
;
81 * Per-pixel alignment weight map
84 static const image
*weight_map
;
87 * Frequency-dependent alignment weights
90 static double horiz_freq_cut
;
91 static double vert_freq_cut
;
92 static double avg_freq_cut
;
93 static const char *fw_output
;
96 * Algorithmic alignment weighting
99 static const char *wmx_exec
;
100 static const char *wmx_file
;
101 static const char *wmx_defs
;
104 * Non-certainty alignment weights
107 static image
*alignment_weights
;
110 * Latest transformation.
113 static transformation latest_t
;
116 * Flag indicating whether the latest transformation
117 * resulted in a match.
120 static int latest_ok
;
123 * Frame number most recently aligned.
129 * Exposure registration
131 * 0. Preserve the original exposure of images.
133 * 1. Match exposure between images.
135 * 2. Use only image metadata for registering exposure.
138 static int _exp_register
;
143 * 0. Translation only. Only adjust the x and y position of images.
144 * Do not rotate input images or perform projective transformations.
146 * 1. Euclidean transformations only. Adjust the x and y position
147 * of images and the orientation of the image about the image center.
149 * 2. Perform general projective transformations. See the file gpt.h
150 * for more information about general projective transformations.
153 static int alignment_class
;
156 * Default initial alignment type.
158 * 0. Identity transformation.
160 * 1. Most recently accepted frame's final transformation.
163 static int default_initial_alignment_type
;
166 * Projective group behavior
168 * 0. Perturb in output coordinates.
170 * 1. Perturb in source coordinates
173 static int perturb_type
;
178 * This structure contains alignment state information. The change
179 * between the non-default old initial alignment and old final
180 * alignment is used to adjust the non-default current initial
181 * alignment. If either the old or new initial alignment is a default
182 * alignment, the old --follow semantics are preserved.
186 transformation old_initial_alignment
;
187 transformation old_final_alignment
;
188 transformation default_initial_alignment
;
190 std::vector
<int> is_default
;
191 const image
*input_frame
;
195 old_initial_alignment(transformation::eu_identity()),
196 old_final_alignment(transformation::eu_identity()),
197 default_initial_alignment(transformation::eu_identity()),
205 const image
*get_input_frame() const {
209 void set_is_default(unsigned int index
, int value
) {
212 * Expand the array, if necessary.
214 if (index
== is_default
.size());
215 is_default
.resize(index
+ 1);
217 assert (index
< is_default
.size());
218 is_default
[index
] = value
;
221 int get_is_default(unsigned int index
) {
222 assert (index
< is_default
.size());
223 return is_default
[index
];
226 transformation
get_default() {
227 return default_initial_alignment
;
230 void set_default(transformation t
) {
231 default_initial_alignment
= t
;
234 void default_set_original_bounds(const image
*i
) {
235 default_initial_alignment
.set_original_bounds(i
);
238 void set_final(transformation t
) {
239 old_final_alignment
= t
;
242 void set_input_frame(const image
*i
) {
247 * Implement new delta --follow semantics.
249 * If we have a transformation T such that
251 * prev_final == T(prev_init)
255 * current_init_follow == T(current_init)
257 * We can calculate T as follows:
259 * T == prev_final(prev_init^-1)
261 * Where ^-1 is the inverse operator.
263 static trans_single
follow(trans_single a
, trans_single b
, trans_single c
) {
266 if (alignment_class
== 0) {
268 * Translational transformations
271 ale_pos t0
= -a
.eu_get(0) + b
.eu_get(0);
272 ale_pos t1
= -a
.eu_get(1) + b
.eu_get(1);
277 } else if (alignment_class
== 1) {
279 * Euclidean transformations
282 ale_pos t2
= -a
.eu_get(2) + b
.eu_get(2);
286 point
p( c
.scaled_height()/2 + c
.eu_get(0) - a
.eu_get(0),
287 c
.scaled_width()/2 + c
.eu_get(1) - a
.eu_get(1) );
289 p
= b
.transform_scaled(p
);
291 cc
.eu_modify(0, p
[0] - c
.scaled_height()/2 - c
.eu_get(0));
292 cc
.eu_modify(1, p
[1] - c
.scaled_width()/2 - c
.eu_get(1));
294 } else if (alignment_class
== 2) {
296 * Projective transformations
301 p
[0] = b
.transform_scaled(a
302 . scaled_inverse_transform(c
.transform_scaled(point( 0 , 0 ))));
303 p
[1] = b
.transform_scaled(a
304 . scaled_inverse_transform(c
.transform_scaled(point(c
.scaled_height(), 0 ))));
305 p
[2] = b
.transform_scaled(a
306 . scaled_inverse_transform(c
.transform_scaled(point(c
.scaled_height(), c
.scaled_width()))));
307 p
[3] = b
.transform_scaled(a
308 . scaled_inverse_transform(c
.transform_scaled(point( 0 , c
.scaled_width()))));
317 * For multi-alignment following, we use the following approach, not
318 * guaranteed to work with large changes in scene or perspective, but
319 * which should be somewhat flexible:
323 * t[][] calculated final alignments
324 * s[][] alignments as loaded from file
327 * fundamental (primary) 0
328 * non-fundamental (non-primary) m!=0
330 * follow(a, b, c) applying the (a, b) delta T=b(a^-1) to c
332 * following in the case of missing file data might be generated by
334 * t[n+1][0] = t[n][0]
335 * t[n+1][m!=0] = follow(t[n][m'], t[n+1][m'], t[n][m])
337 * cases with all noted file data present might be generated by
339 * t[n+1][0] = follow(s[n][0], t[n][0], s[n+1][0])
340 * t[n+1][m!=0] = follow(s[n+1][m'], t[n+1][m'], s[n+1][m])
342 * For non-following behavior, or where assigning the above is
343 * impossible, we assign the following default
345 * t[n+1][0] = Identity
346 * t[n+1][m!=0] = t[n+1][m']
349 void init_frame_alignment_primary(transformation
*offset
, int lod
, ale_pos perturb
) {
351 if (perturb
> 0 && !old_is_default
&& !get_is_default(0)
352 && default_initial_alignment_type
== 1) {
355 * Apply following logic for the primary element.
358 ui::get()->following();
360 trans_single new_offset
= follow(old_initial_alignment
.get_element(0),
361 old_final_alignment
.get_element(0),
362 offset
->get_element(0));
364 old_initial_alignment
= *offset
;
366 offset
->set_element(0, new_offset
);
368 ui::get()->set_offset(new_offset
);
370 old_initial_alignment
= *offset
;
373 is_default
.resize(old_initial_alignment
.stack_depth());
376 void init_frame_alignment_nonprimary(transformation
*offset
,
377 int lod
, ale_pos perturb
, unsigned int index
) {
381 unsigned int parent_index
= offset
->parent_index(index
);
384 && !get_is_default(parent_index
)
385 && !get_is_default(index
)
386 && default_initial_alignment_type
== 1) {
389 * Apply file-based following logic for the
393 ui::get()->following();
395 trans_single new_offset
= follow(old_initial_alignment
.get_element(parent_index
),
396 offset
->get_element(parent_index
),
397 offset
->get_element(index
));
399 old_initial_alignment
.set_element(index
, offset
->get_element(index
));
400 offset
->set_element(index
, new_offset
);
402 ui::get()->set_offset(new_offset
);
407 offset
->get_coordinate(parent_index
);
411 && old_final_alignment
.exists(offset
->get_coordinate(parent_index
))
412 && old_final_alignment
.exists(offset
->get_current_coordinate())
413 && default_initial_alignment_type
== 1) {
416 * Apply nonfile-based following logic for
420 ui::get()->following();
423 * XXX: Although it is different, the below
424 * should be equivalent to the comment
428 trans_single a
= old_final_alignment
.get_element(offset
->get_coordinate(parent_index
));
429 trans_single b
= old_final_alignment
.get_element(offset
->get_current_coordinate());
430 trans_single c
= offset
->get_element(parent_index
);
432 trans_single new_offset
= follow(a
, b
, c
);
434 offset
->set_element(index
, new_offset
);
435 ui::get()->set_offset(new_offset
);
441 * Handle other cases.
444 if (get_is_default(index
)) {
445 offset
->set_element(index
, offset
->get_element(parent_index
));
446 ui::get()->set_offset(offset
->get_element(index
));
450 void init_default() {
452 if (default_initial_alignment_type
== 0) {
455 * Follow the transformation of the original frame,
456 * setting new image dimensions.
459 // astate->default_initial_alignment = orig_t;
460 default_initial_alignment
.set_current_element(orig_t
.get_element(0));
461 default_initial_alignment
.set_dimensions(input_frame
);
463 } else if (default_initial_alignment_type
== 1)
466 * Follow previous transformation, setting new image
470 default_initial_alignment
.set_dimensions(input_frame
);
475 old_is_default
= get_is_default(0);
480 * Alignment for failed frames -- default or optimal?
482 * A frame that does not meet the match threshold can be assigned the
483 * best alignment found, or can be assigned its alignment default.
486 static int is_fail_default
;
491 * 0. Align images with an error contribution from each color channel.
493 * 1. Align images with an error contribution only from the green channel.
494 * Other color channels do not affect alignment.
496 * 2. Align images using a summation of channels. May be useful when dealing
497 * with images that have high frequency color ripples due to color aliasing.
500 static int channel_alignment_type
;
503 * Error metric exponent
506 static ale_real metric_exponent
;
512 static float match_threshold
;
515 * Perturbation lower and upper bounds.
518 static ale_pos perturb_lower
;
519 static int perturb_lower_percent
;
520 static ale_pos perturb_upper
;
521 static int perturb_upper_percent
;
524 * Preferred level-of-detail scale factor is 2^lod_preferred/perturb.
527 static int lod_preferred
;
530 * Minimum dimension for reduced LOD.
533 static int min_dimension
;
536 * Maximum rotational perturbation
539 static ale_pos rot_max
;
542 * Barrel distortion alignment multiplier
545 static ale_pos bda_mult
;
548 * Barrel distortion maximum adjustment rate
551 static ale_pos bda_rate
;
554 * Alignment match sum
557 static ale_accum match_sum
;
560 * Alignment match count.
563 static int match_count
;
566 * Monte Carlo parameter
572 * Certainty weight flag
574 * 0. Don't use certainty weights for alignment.
576 * 1. Use certainty weights for alignment.
579 static int certainty_weights
;
582 * Global search parameter
584 * 0. Local: Local search only.
585 * 1. Inner: Alignment reference image inner region
586 * 2. Outer: Alignment reference image outer region
587 * 3. All: Alignment reference image inner and outer regions.
588 * 4. Central: Inner if possible; else, best of inner and outer.
589 * 5. Points: Align by control points.
595 * Minimum overlap for global searches
598 static ale_accum _gs_mo
;
599 static int gs_mo_percent
;
602 * Minimum certainty for multi-alignment element registration.
605 static ale_real _ma_cert
;
611 static exclusion
*ax_parameters
;
615 * Types for scale clusters.
618 struct nl_scale_cluster
{
619 const image
*accum_max
;
620 const image
*accum_min
;
621 const image
*certainty_max
;
622 const image
*certainty_min
;
623 const image
*aweight_max
;
624 const image
*aweight_min
;
625 exclusion
*ax_parameters
;
628 const image
*input_certainty_max
;
629 const image
*input_certainty_min
;
630 const image
*input_max
;
631 const image
*input_min
;
634 struct scale_cluster
{
636 const image
*certainty
;
637 const image
*aweight
;
638 exclusion
*ax_parameters
;
641 const image
*input_certainty
;
644 nl_scale_cluster
*nl_scale_clusters
;
648 * Check for exclusion region coverage in the reference
651 static int ref_excluded(int i
, int j
, point offset
, exclusion
*params
, int param_count
) {
652 for (int idx
= 0; idx
< param_count
; idx
++)
653 if (params
[idx
].type
== exclusion::RENDER
654 && i
+ offset
[0] >= params
[idx
].x
[0]
655 && i
+ offset
[0] <= params
[idx
].x
[1]
656 && j
+ offset
[1] >= params
[idx
].x
[2]
657 && j
+ offset
[1] <= params
[idx
].x
[3])
664 * Check for exclusion region coverage in the input
667 static int input_excluded(ale_pos ti
, ale_pos tj
, exclusion
*params
, int param_count
) {
668 for (int idx
= 0; idx
< param_count
; idx
++)
669 if (params
[idx
].type
== exclusion::FRAME
670 && ti
>= params
[idx
].x
[0]
671 && ti
<= params
[idx
].x
[1]
672 && tj
>= params
[idx
].x
[2]
673 && tj
<= params
[idx
].x
[3])
680 * Overlap function. Determines the number of pixels in areas where
681 * the arrays overlap. Uses the reference array's notion of pixel
684 static unsigned int overlap(struct scale_cluster c
, transformation t
, int ax_count
) {
685 assert (reference_image
);
687 unsigned int result
= 0;
689 point offset
= c
.accum
->offset();
691 for (unsigned int i
= 0; i
< c
.accum
->height(); i
++)
692 for (unsigned int j
= 0; j
< c
.accum
->width(); j
++) {
694 if (ref_excluded(i
, j
, offset
, c
.ax_parameters
, ax_count
))
703 q
= (c
.input_scale
< 1.0 && interpolant
== NULL
)
704 ? t
.scaled_inverse_transform(
705 point(i
+ offset
[0], j
+ offset
[1]))
706 : t
.unscaled_inverse_transform(
707 point(i
+ offset
[0], j
+ offset
[1]));
713 * Check that the transformed coordinates are within
714 * the boundaries of array c.input, and check that the
715 * weight value in the accumulated array is nonzero,
716 * unless we know it is nonzero by virtue of the fact
717 * that it falls within the region of the original
718 * frame (e.g. when we're not increasing image
719 * extents). Also check for frame exclusion.
722 if (input_excluded(ti
, tj
, c
.ax_parameters
, ax_count
))
726 && ti
<= c
.input
->height() - 1
728 && tj
<= c
.input
->width() - 1
729 && c
.certainty
->get_pixel(i
, j
)[0] != 0)
737 * Monte carlo iteration class.
739 * Monte Carlo alignment has been used for statistical comparisons in
740 * spatial registration, and is now used for tonal registration
741 * and final match calculation.
745 * We use a random process for which the expected number of sampled
746 * pixels is +/- .000003 from the coverage in the range [.005,.995] for
747 * an image with 100,000 pixels. (The actual number may still deviate
748 * from the expected number by more than this amount, however.) The
749 * method is as follows:
751 * We have coverage == USE/ALL, or (expected # pixels to use)/(# total
752 * pixels). We derive from this SKIP/USE.
754 * SKIP/USE == (SKIP/ALL)/(USE/ALL) == (1 - (USE/ALL))/(USE/ALL)
756 * Once we have SKIP/USE, we know the expected number of pixels to skip
757 * in each iteration. We use a random selection process that provides
758 * SKIP/USE close to this calculated value.
760 * If we can draw uniformly to select the number of pixels to skip, we
761 * do. In this case, the maximum number of pixels to skip is twice the
764 * If we cannot draw uniformly, we still assign equal probability to
765 * each of the integer values in the interval [0, 2 * (SKIP/USE)], but
766 * assign an unequal amount to the integer value ceil(2 * SKIP/USE) +
771 * When reseeding the random number generator, we want the same set of
772 * pixels to be used in cases where two alignment options are compared.
773 * If we wanted to avoid bias from repeatedly utilizing the same seed,
774 * we could seed with the number of the frame most recently aligned:
778 * However, in cursory tests, it seems okay to just use the default
779 * seed of 1, and so we do this, since it is simpler; both of these
780 * approaches to reseeding achieve better results than not reseeding.
781 * (1 is the default seed according to the GNU Manual Page for
784 * For subdomain calculations, we vary the seed by adding the subdomain
791 unsigned int index_max
;
800 mc_iterate(int _i_min
, int _i_max
, int _j_min
, int _j_max
, unsigned int subdomain
)
810 if (i_max
< i_min
|| j_max
< j_min
)
813 index_max
= (i_max
- i_min
) * (j_max
- j_min
);
815 if (index_max
< 500 || _mc
> 100 || _mc
<= 0)
818 coverage
= _mc
/ 100;
820 double su
= (1 - coverage
) / coverage
;
822 mc_max
= (floor(2*su
) * (1 + floor(2*su
)) + 2*su
)
823 / (2 + 2 * floor(2*su
) - 2*su
);
825 rng
.seed(1 + subdomain
);
828 #if ALE_COORDINATES == FIXED16
830 * XXX: This calculation might not yield the correct
833 index
= -1 + (int) ceil(((ale_pos
) mc_max
+1)
834 / (ale_pos
) ( (1 + 0xffffff)
835 / (1 + (rng
.get() & 0xffffff))));
837 index
= -1 + (int) ceil((ale_accum
) (mc_max
+1)
838 * ( (1 + ((ale_accum
) (rng
.get())) )
839 / (1 + ((ale_accum
) RAND_MAX
)) ));
845 return index
/ (j_max
- j_min
) + i_min
;
849 return index
% (j_max
- j_min
) + j_min
;
852 void operator++(int whats_this_for
) {
854 #if ALE_COORDINATES == FIXED16
855 index
+= (int) ceil(((ale_pos
) mc_max
+1)
856 / (ale_pos
) ( (1 + 0xffffff)
857 / (1 + (rng
.get() & 0xffffff))));
859 index
+= (int) ceil((ale_accum
) (mc_max
+1)
860 * ( (1 + ((ale_accum
) (rng
.get())) )
861 / (1 + ((ale_accum
) RAND_MAX
)) ));
867 return (index
>= index_max
);
872 * Not-quite-symmetric difference function. Determines the difference in areas
873 * where the arrays overlap. Uses the reference array's notion of pixel positions.
875 * For the purposes of determining the difference, this function divides each
876 * pixel value by the corresponding image's average pixel magnitude, unless we
879 * a) Extending the boundaries of the image, or
881 * b) following the previous frame's transform
883 * If we are doing monte-carlo pixel sampling for alignment, we
884 * typically sample a subset of available pixels; otherwise, we sample
889 template<class diff_trans
>
890 class diff_stat_generic
{
892 transformation::elem_bounds_t elem_bounds
;
902 ale_accum centroid
[2], centroid_divisor
;
903 ale_accum de_centroid
[2], de_centroid_v
, de_sum
;
910 min
= point::posinf();
911 max
= point::neginf();
915 centroid_divisor
= 0;
925 void init(diff_trans _offset
) {
931 * Required for STL sanity.
933 run() : offset(diff_trans::eu_identity()) {
937 run(diff_trans _offset
) : offset(_offset
) {
941 void add(const run
&_run
) {
942 result
+= _run
.result
;
943 divisor
+= _run
.divisor
;
945 for (int d
= 0; d
< 2; d
++) {
946 if (min
[d
] > _run
.min
[d
])
947 min
[d
] = _run
.min
[d
];
948 if (max
[d
] < _run
.max
[d
])
949 max
[d
] = _run
.max
[d
];
950 centroid
[d
] += _run
.centroid
[d
];
951 de_centroid
[d
] += _run
.de_centroid
[d
];
954 centroid_divisor
+= _run
.centroid_divisor
;
955 de_centroid_v
+= _run
.de_centroid_v
;
956 de_sum
+= _run
.de_sum
;
959 run(const run
&_run
) : offset(_run
.offset
) {
972 run
&operator=(const run
&_run
) {
990 ale_accum
get_error() const {
991 return pow(result
/ divisor
, 1/(ale_accum
) metric_exponent
);
994 void sample(int f
, scale_cluster c
, int i
, int j
, point t
, point u
,
995 const run
&comparison
) {
997 pixel pa
= c
.accum
->get_pixel(i
, j
);
999 ale_real this_result
[2] = { 0, 0 };
1000 ale_real this_divisor
[2] = { 0, 0 };
1004 weight
[0] = pixel(1, 1, 1);
1005 weight
[1] = pixel(1, 1, 1);
1007 pixel tm
= offset
.get_tonal_multiplier(point(i
, j
) + c
.accum
->offset());
1009 if (interpolant
!= NULL
) {
1010 interpolant
->filtered(i
, j
, &p
[0], &weight
[0], 1, f
);
1012 if (weight
[0].min_norm() > ale_real_weight_floor
) {
1019 p
[0] = c
.input
->get_bl(t
);
1025 p
[1] = c
.input
->get_bl(u
);
1034 if (certainty_weights
== 1) {
1037 * For speed, use arithmetic interpolation (get_bl(.))
1038 * instead of geometric (get_bl(., 1))
1041 weight
[0] *= c
.input_certainty
->get_bl(t
);
1043 weight
[1] *= c
.input_certainty
->get_bl(u
);
1044 weight
[0] *= c
.certainty
->get_pixel(i
, j
);
1045 weight
[1] *= c
.certainty
->get_pixel(i
, j
);
1048 if (c
.aweight
!= NULL
) {
1049 weight
[0] *= c
.aweight
->get_pixel(i
, j
);
1050 weight
[1] *= c
.aweight
->get_pixel(i
, j
);
1054 * Update sampling area statistics
1066 centroid
[0] += (weight
[0][0] + weight
[0][1] + weight
[0][2]) * i
;
1067 centroid
[1] += (weight
[0][0] + weight
[0][1] + weight
[0][2]) * j
;
1068 centroid_divisor
+= (weight
[0][0] + weight
[0][1] + weight
[0][2]);
1071 * Determine alignment type.
1074 for (int m
= 0; m
< (u
.defined() ? 2 : 1); m
++)
1075 if (channel_alignment_type
== 0) {
1077 * Align based on all channels.
1081 for (int k
= 0; k
< 3; k
++) {
1082 ale_real achan
= pa
[k
];
1083 ale_real bchan
= p
[m
][k
];
1085 this_result
[m
] += weight
[m
][k
] * pow(fabs(achan
- bchan
), metric_exponent
);
1086 this_divisor
[m
] += weight
[m
][k
] * pow(achan
> bchan
? achan
: bchan
, metric_exponent
);
1088 } else if (channel_alignment_type
== 1) {
1090 * Align based on the green channel.
1093 ale_real achan
= pa
[1];
1094 ale_real bchan
= p
[m
][1];
1096 this_result
[m
] = weight
[m
][1] * pow(fabs(achan
- bchan
), metric_exponent
);
1097 this_divisor
[m
] = weight
[m
][1] * pow(achan
> bchan
? achan
: bchan
, metric_exponent
);
1098 } else if (channel_alignment_type
== 2) {
1100 * Align based on the sum of all channels.
1107 for (int k
= 0; k
< 3; k
++) {
1110 wsum
+= weight
[m
][k
] / 3;
1113 this_result
[m
] = wsum
* pow(fabs(asum
- bsum
), metric_exponent
);
1114 this_divisor
[m
] = wsum
* pow(asum
> bsum
? asum
: bsum
, metric_exponent
);
1118 // ale_real de = fabs(this_result[0] / this_divisor[0]
1119 // - this_result[1] / this_divisor[1]);
1120 ale_real de
= fabs(this_result
[0] - this_result
[1]);
1122 de_centroid
[0] += de
* (ale_real
) i
;
1123 de_centroid
[1] += de
* (ale_real
) j
;
1125 de_centroid_v
+= de
* (ale_real
) t
.lengthto(u
);
1130 result
+= (this_result
[0]);
1131 divisor
+= (this_divisor
[0]);
1134 void rescale(ale_pos scale
) {
1135 offset
.rescale(scale
);
1137 de_centroid
[0] *= scale
;
1138 de_centroid
[1] *= scale
;
1139 de_centroid_v
*= scale
;
1142 point
get_centroid() {
1143 point result
= point(centroid
[0] / centroid_divisor
, centroid
[1] / centroid_divisor
);
1145 assert (finite(centroid
[0])
1146 && finite(centroid
[1])
1147 && (result
.defined() || centroid_divisor
== 0));
1152 point
get_error_centroid() {
1153 point result
= point(de_centroid
[0] / de_sum
, de_centroid
[1] / de_sum
);
1158 ale_pos
get_error_perturb() {
1159 ale_pos result
= de_centroid_v
/ de_sum
;
1167 * When non-empty, runs.front() is best, runs.back() is
1171 std::vector
<run
> runs
;
1174 * old_runs stores the latest available perturbation set for
1175 * each multi-alignment element.
1178 typedef int run_index
;
1179 std::map
<run_index
, run
> old_runs
;
1181 static void *diff_subdomain(void *args
);
1183 struct subdomain_args
{
1184 struct scale_cluster c
;
1185 std::vector
<run
> runs
;
1188 int i_min
, i_max
, j_min
, j_max
;
1192 struct scale_cluster si
;
1196 std::vector
<ale_pos
> perturb_multipliers
;
1199 void diff(struct scale_cluster c
, const diff_trans
&t
,
1200 int _ax_count
, int f
) {
1202 if (runs
.size() == 2)
1205 runs
.push_back(run(t
));
1208 ax_count
= _ax_count
;
1211 ui::get()->d2_align_sample_start();
1213 if (interpolant
!= NULL
) {
1216 * XXX: This has not been tested, and may be completely
1220 transformation tt
= transformation::eu_identity();
1221 tt
.set_current_element(t
);
1222 interpolant
->set_parameters(tt
, c
.input
, c
.accum
->offset());
1227 N
= thread::count();
1229 pthread_t
*threads
= (pthread_t
*) malloc(sizeof(pthread_t
) * N
);
1230 pthread_attr_t
*thread_attr
= (pthread_attr_t
*) malloc(sizeof(pthread_attr_t
) * N
);
1236 subdomain_args
*args
= new subdomain_args
[N
];
1238 transformation::elem_bounds_int_t b
= elem_bounds
.scale_to_bounds(c
.accum
->height(), c
.accum
->width());
1240 // fprintf(stdout, "[%d %d] [%d %d]\n",
1241 // global_i_min, global_i_max, global_j_min, global_j_max);
1243 for (int ti
= 0; ti
< N
; ti
++) {
1245 args
[ti
].runs
= runs
;
1246 args
[ti
].ax_count
= ax_count
;
1248 args
[ti
].i_min
= b
.imin
+ ((b
.imax
- b
.imin
) * ti
) / N
;
1249 args
[ti
].i_max
= b
.imin
+ ((b
.imax
- b
.imin
) * (ti
+ 1)) / N
;
1250 args
[ti
].j_min
= b
.jmin
;
1251 args
[ti
].j_max
= b
.jmax
;
1252 args
[ti
].subdomain
= ti
;
1254 pthread_attr_init(&thread_attr
[ti
]);
1255 pthread_attr_setdetachstate(&thread_attr
[ti
], PTHREAD_CREATE_JOINABLE
);
1256 pthread_create(&threads
[ti
], &thread_attr
[ti
], diff_subdomain
, &args
[ti
]);
1258 diff_subdomain(&args
[ti
]);
1262 for (int ti
= 0; ti
< N
; ti
++) {
1264 pthread_join(threads
[ti
], NULL
);
1266 runs
.back().add(args
[ti
].runs
.back());
1276 ui::get()->d2_align_sample_stop();
1282 std::vector
<diff_trans
> t_array
;
1284 for (unsigned int r
= 0; r
< runs
.size(); r
++) {
1285 t_array
.push_back(runs
[r
].offset
);
1290 for (unsigned int r
= 0; r
< t_array
.size(); r
++)
1291 diff(si
, t_array
[r
], ax_count
, frame
);
1297 assert(runs
.size() >= 2);
1298 assert(runs
[0].offset
.scale() == runs
[1].offset
.scale());
1300 return (runs
[1].get_error() < runs
[0].get_error()
1301 || (!finite(runs
[0].get_error()) && finite(runs
[1].get_error())));
1304 int better_defined() {
1305 assert(runs
.size() >= 2);
1306 assert(runs
[0].offset
.scale() == runs
[1].offset
.scale());
1308 return (runs
[1].get_error() < runs
[0].get_error());
1311 diff_stat_generic(transformation::elem_bounds_t e
)
1312 : runs(), old_runs(), perturb_multipliers() {
1316 run_index
get_run_index(unsigned int perturb_index
) {
1317 return perturb_index
;
1320 run
&get_run(unsigned int perturb_index
) {
1321 run_index index
= get_run_index(perturb_index
);
1323 assert(old_runs
.count(index
));
1324 return old_runs
[index
];
1327 void rescale(ale_pos scale
, scale_cluster _si
) {
1328 assert(runs
.size() == 1);
1332 runs
[0].rescale(scale
);
1337 ~diff_stat_generic() {
1340 diff_stat_generic
&operator=(const diff_stat_generic
&dst
) {
1342 * Copy run information.
1345 old_runs
= dst
.old_runs
;
1348 * Copy diff variables
1351 ax_count
= dst
.ax_count
;
1353 perturb_multipliers
= dst
.perturb_multipliers
;
1354 elem_bounds
= dst
.elem_bounds
;
1359 diff_stat_generic(const diff_stat_generic
&dst
) : runs(), old_runs(),
1360 perturb_multipliers() {
1364 void set_elem_bounds(transformation::elem_bounds_t e
) {
1368 ale_accum
get_result() {
1369 assert(runs
.size() == 1);
1370 return runs
[0].result
;
1373 ale_accum
get_divisor() {
1374 assert(runs
.size() == 1);
1375 return runs
[0].divisor
;
1378 diff_trans
get_offset() {
1379 assert(runs
.size() == 1);
1380 return runs
[0].offset
;
1383 int operator!=(diff_stat_generic
¶m
) {
1384 return (get_error() != param
.get_error());
1387 int operator==(diff_stat_generic
¶m
) {
1388 return !(operator!=(param
));
1391 ale_pos
get_error_perturb() {
1392 assert(runs
.size() == 1);
1393 return runs
[0].get_error_perturb();
1396 ale_accum
get_error() const {
1397 assert(runs
.size() == 1);
1398 return runs
[0].get_error();
1403 * Get the set of transformations produced by a given perturbation
1405 void get_perturb_set(std::vector
<diff_trans
> *set
,
1406 ale_pos adj_p
, ale_pos adj_o
, ale_pos adj_b
,
1407 ale_pos
*current_bd
, ale_pos
*modified_bd
,
1408 std::vector
<ale_pos
> multipliers
= std::vector
<ale_pos
>()) {
1410 assert(runs
.size() == 1);
1412 diff_trans
test_t(diff_trans::eu_identity());
1415 * Translational or euclidean transformation
1418 for (unsigned int i
= 0; i
< 2; i
++)
1419 for (unsigned int s
= 0; s
< 2; s
++) {
1421 if (!multipliers
.size())
1422 multipliers
.push_back(1);
1424 assert(finite(multipliers
[0]));
1426 test_t
= get_offset();
1428 // test_t.eu_modify(i, (s ? -adj_p : adj_p) * multipliers[0]);
1429 test_t
.translate((i
? point(1, 0) : point(0, 1))
1430 * (s
? -adj_p
: adj_p
)
1433 test_t
.snap(adj_p
/ 2);
1435 set
->push_back(test_t
);
1436 multipliers
.erase(multipliers
.begin());
1440 if (alignment_class
> 0)
1441 for (unsigned int s
= 0; s
< 2; s
++) {
1443 if (!multipliers
.size())
1444 multipliers
.push_back(1);
1446 assert(multipliers
.size());
1447 assert(finite(multipliers
[0]));
1449 if (!(adj_o
* multipliers
[0] < rot_max
))
1452 ale_pos adj_s
= (s
? 1 : -1) * adj_o
* multipliers
[0];
1454 test_t
= get_offset();
1456 run_index ori
= get_run_index(set
->size());
1457 point centroid
= point::undefined();
1459 if (!old_runs
.count(ori
))
1460 ori
= get_run_index(0);
1462 if (!centroid
.finite() && old_runs
.count(ori
)) {
1463 centroid
= old_runs
[ori
].get_error_centroid();
1465 if (!centroid
.finite())
1466 centroid
= old_runs
[ori
].get_centroid();
1468 centroid
*= test_t
.scale()
1469 / old_runs
[ori
].offset
.scale();
1472 if (!centroid
.finite() && !test_t
.is_projective()) {
1473 test_t
.eu_modify(2, adj_s
);
1474 } else if (!centroid
.finite()) {
1475 centroid
= point(si
.input
->height() / 2,
1476 si
.input
->width() / 2);
1478 test_t
.rotate(centroid
+ si
.accum
->offset(),
1481 test_t
.rotate(centroid
+ si
.accum
->offset(),
1485 test_t
.snap(adj_p
/ 2);
1487 set
->push_back(test_t
);
1488 multipliers
.erase(multipliers
.begin());
1491 if (alignment_class
== 2) {
1494 * Projective transformation
1497 for (unsigned int i
= 0; i
< 4; i
++)
1498 for (unsigned int j
= 0; j
< 2; j
++)
1499 for (unsigned int s
= 0; s
< 2; s
++) {
1501 if (!multipliers
.size())
1502 multipliers
.push_back(1);
1504 assert(multipliers
.size());
1505 assert(finite(multipliers
[0]));
1507 ale_pos adj_s
= (s
? -1 : 1) * adj_p
* multipliers
[0];
1509 test_t
= get_offset();
1511 if (perturb_type
== 0)
1512 test_t
.gpt_modify_bounded(j
, i
, adj_s
, elem_bounds
.scale_to_bounds(si
.accum
->height(), si
.accum
->width()));
1513 else if (perturb_type
== 1)
1514 test_t
.gr_modify(j
, i
, adj_s
);
1518 test_t
.snap(adj_p
/ 2);
1520 set
->push_back(test_t
);
1521 multipliers
.erase(multipliers
.begin());
1530 if (bda_mult
!= 0 && adj_b
!= 0) {
1532 for (unsigned int d
= 0; d
< get_offset().bd_count(); d
++)
1533 for (unsigned int s
= 0; s
< 2; s
++) {
1535 if (!multipliers
.size())
1536 multipliers
.push_back(1);
1538 assert (multipliers
.size());
1539 assert (finite(multipliers
[0]));
1541 ale_pos adj_s
= (s
? -1 : 1) * adj_b
* multipliers
[0];
1543 if (bda_rate
> 0 && fabs(modified_bd
[d
] + adj_s
- current_bd
[d
]) > bda_rate
)
1546 diff_trans test_t
= get_offset();
1548 test_t
.bd_modify(d
, adj_s
);
1550 set
->push_back(test_t
);
1556 assert(runs
.size() == 2);
1562 assert(runs
.size() == 2);
1566 void perturb_test(ale_pos perturb
, ale_pos adj_p
, ale_pos adj_o
, ale_pos adj_b
,
1567 ale_pos
*current_bd
, ale_pos
*modified_bd
, int stable
) {
1569 assert(runs
.size() == 1);
1571 std::vector
<diff_trans
> t_set
;
1573 if (perturb_multipliers
.size() == 0) {
1574 get_perturb_set(&t_set
, adj_p
, adj_o
, adj_b
,
1575 current_bd
, modified_bd
);
1577 for (unsigned int i
= 0; i
< t_set
.size(); i
++) {
1578 diff_stat_generic test
= *this;
1580 test
.diff(si
, t_set
[i
], ax_count
, frame
);
1584 if (finite(test
.get_error_perturb()))
1585 perturb_multipliers
.push_back(adj_p
/ test
.get_error_perturb());
1587 perturb_multipliers
.push_back(1);
1594 get_perturb_set(&t_set
, adj_p
, adj_o
, adj_b
, current_bd
, modified_bd
,
1595 perturb_multipliers
);
1597 int found_unreliable
= 1;
1598 std::vector
<int> tested(t_set
.size(), 0);
1600 for (unsigned int i
= 0; i
< t_set
.size(); i
++) {
1601 run_index ori
= get_run_index(i
);
1604 * Check for stability
1607 && old_runs
.count(ori
)
1608 && old_runs
[ori
].offset
== t_set
[i
])
1612 std::vector
<ale_pos
> perturb_multipliers_original
= perturb_multipliers
;
1614 while (found_unreliable
) {
1616 found_unreliable
= 0;
1618 for (unsigned int i
= 0; i
< t_set
.size(); i
++) {
1623 diff(si
, t_set
[i
], ax_count
, frame
);
1625 if (!(i
< perturb_multipliers
.size())
1626 || !finite(perturb_multipliers
[i
])) {
1628 perturb_multipliers
.resize(i
+ 1);
1630 if (finite(perturb_multipliers
[i
])
1632 && finite(adj_p
/ runs
[1].get_error_perturb())) {
1633 perturb_multipliers
[i
] =
1634 adj_p
/ runs
[1].get_error_perturb();
1636 found_unreliable
= 1;
1638 perturb_multipliers
[i
] = 1;
1643 run_index ori
= get_run_index(i
);
1645 if (old_runs
.count(ori
) == 0)
1646 old_runs
.insert(std::pair
<run_index
, run
>(ori
, runs
[1]));
1648 old_runs
[ori
] = runs
[1];
1650 if (finite(perturb_multipliers_original
[i
])
1651 && finite(runs
[1].get_error_perturb())
1653 && finite(perturb_multipliers_original
[i
] * adj_p
/ runs
[1].get_error_perturb()))
1654 perturb_multipliers
[i
] = perturb_multipliers_original
[i
]
1655 * adj_p
/ runs
[1].get_error_perturb();
1657 perturb_multipliers
[i
] = 1;
1662 && runs
[1].get_error() < runs
[0].get_error()
1663 && perturb_multipliers
[i
]
1664 / perturb_multipliers_original
[i
] < 2) {
1672 if (runs
.size() > 1)
1675 if (!found_unreliable
)
1682 typedef diff_stat_generic
<trans_single
> diff_stat_t
;
1683 typedef diff_stat_generic
<trans_multi
> diff_stat_multi
;
1687 * Adjust exposure for an aligned frame B against reference A.
1689 * Expects full-LOD images.
1691 * Note: This method does not use any weighting, by certainty or
1692 * otherwise, in the first exposure registration pass, as any bias of
1693 * weighting according to color may also bias the exposure registration
1694 * result; it does use weighting, including weighting by certainty
1695 * (even if certainty weighting is not specified), in the second pass,
1696 * under the assumption that weighting by certainty improves handling
1697 * of out-of-range highlights, and that bias of exposure measurements
1698 * according to color may generally be less harmful after spatial
1699 * registration has been performed.
1701 class exposure_ratio_iterate
: public thread::decompose_domain
{
1706 struct scale_cluster c
;
1707 const transformation
&t
;
1711 void prepare_subdomains(unsigned int N
) {
1712 asums
= new pixel_accum
[N
];
1713 bsums
= new pixel_accum
[N
];
1715 void subdomain_algorithm(unsigned int thread
,
1716 int i_min
, int i_max
, int j_min
, int j_max
) {
1718 point offset
= c
.accum
->offset();
1720 for (mc_iterate
m(i_min
, i_max
, j_min
, j_max
, thread
); !m
.done(); m
++) {
1722 unsigned int i
= (unsigned int) m
.get_i();
1723 unsigned int j
= (unsigned int) m
.get_j();
1725 if (ref_excluded(i
, j
, offset
, c
.ax_parameters
, ax_count
))
1734 q
= (c
.input_scale
< 1.0 && interpolant
== NULL
)
1735 ? t
.scaled_inverse_transform(
1736 point(i
+ offset
[0], j
+ offset
[1]))
1737 : t
.unscaled_inverse_transform(
1738 point(i
+ offset
[0], j
+ offset
[1]));
1741 * Check that the transformed coordinates are within
1742 * the boundaries of array c.input, that they are not
1743 * subject to exclusion, and that the weight value in
1744 * the accumulated array is nonzero.
1747 if (input_excluded(q
[0], q
[1], c
.ax_parameters
, ax_count
))
1751 && q
[0] <= c
.input
->height() - 1
1753 && q
[1] <= c
.input
->width() - 1
1754 && ((pixel
) c
.certainty
->get_pixel(i
, j
)).minabs_norm() != 0) {
1755 pixel a
= c
.accum
->get_pixel(i
, j
);
1758 b
= c
.input
->get_bl(q
);
1760 pixel weight
= ((c
.aweight
&& pass_number
)
1761 ? (pixel
) c
.aweight
->get_pixel(i
, j
)
1764 ? psqrt(c
.certainty
->get_pixel(i
, j
)
1765 * c
.input_certainty
->get_bl(q
, 1))
1768 asums
[thread
] += a
* weight
;
1769 bsums
[thread
] += b
* weight
;
1774 void finish_subdomains(unsigned int N
) {
1775 for (unsigned int n
= 0; n
< N
; n
++) {
1783 exposure_ratio_iterate(pixel_accum
*_asum
,
1785 struct scale_cluster _c
,
1786 const transformation
&_t
,
1788 int _pass_number
) : decompose_domain(0, _c
.accum
->height(),
1789 0, _c
.accum
->width()),
1795 ax_count
= _ax_count
;
1796 pass_number
= _pass_number
;
1799 exposure_ratio_iterate(pixel_accum
*_asum
,
1801 struct scale_cluster _c
,
1802 const transformation
&_t
,
1805 transformation::elem_bounds_int_t b
) : decompose_domain(b
.imin
, b
.imax
,
1812 ax_count
= _ax_count
;
1813 pass_number
= _pass_number
;
1817 static void set_exposure_ratio(unsigned int m
, struct scale_cluster c
,
1818 const transformation
&t
, int ax_count
, int pass_number
) {
1820 if (_exp_register
== 2) {
1822 * Use metadata only.
1824 ale_real gain_multiplier
= image_rw::exp(m
).get_gain_multiplier();
1825 pixel multiplier
= pixel(gain_multiplier
, gain_multiplier
, gain_multiplier
);
1827 image_rw::exp(m
).set_multiplier(multiplier
);
1828 ui::get()->exp_multiplier(multiplier
[0],
1835 pixel_accum
asum(0, 0, 0), bsum(0, 0, 0);
1837 exposure_ratio_iterate
eri(&asum
, &bsum
, c
, t
, ax_count
, pass_number
);
1840 // std::cerr << (asum / bsum) << " ";
1842 pixel_accum new_multiplier
;
1844 new_multiplier
= asum
/ bsum
* image_rw::exp(m
).get_multiplier();
1846 if (finite(new_multiplier
[0])
1847 && finite(new_multiplier
[1])
1848 && finite(new_multiplier
[2])) {
1849 image_rw::exp(m
).set_multiplier(new_multiplier
);
1850 ui::get()->exp_multiplier(new_multiplier
[0],
1857 * Copy all ax parameters.
1859 static exclusion
*copy_ax_parameters(int local_ax_count
, exclusion
*source
) {
1861 exclusion
*dest
= (exclusion
*) malloc(local_ax_count
* sizeof(exclusion
));
1866 ui::get()->memory_error("exclusion regions");
1868 for (int idx
= 0; idx
< local_ax_count
; idx
++)
1869 dest
[idx
] = source
[idx
];
1875 * Copy ax parameters according to frame.
1877 static exclusion
*filter_ax_parameters(int frame
, int *local_ax_count
) {
1879 exclusion
*dest
= (exclusion
*) malloc(ax_count
* sizeof(exclusion
));
1884 ui::get()->memory_error("exclusion regions");
1886 *local_ax_count
= 0;
1888 for (int idx
= 0; idx
< ax_count
; idx
++) {
1889 if (ax_parameters
[idx
].x
[4] > frame
1890 || ax_parameters
[idx
].x
[5] < frame
)
1893 dest
[*local_ax_count
] = ax_parameters
[idx
];
1895 (*local_ax_count
)++;
1901 static void scale_ax_parameters(int local_ax_count
, exclusion
*ax_parameters
,
1902 ale_pos ref_scale
, ale_pos input_scale
) {
1903 for (int i
= 0; i
< local_ax_count
; i
++) {
1904 ale_pos scale
= (ax_parameters
[i
].type
== exclusion::RENDER
)
1908 for (int n
= 0; n
< 6; n
++) {
1909 ax_parameters
[i
].x
[n
] = ax_parameters
[i
].x
[n
] * scale
;
1915 * Prepare the next level of detail for ordinary images.
1917 static const image
*prepare_lod(const image
*current
) {
1918 if (current
== NULL
)
1921 return current
->scale_by_half("prepare_lod");
1925 * Prepare the next level of detail for definition maps.
1927 static const image
*prepare_lod_def(const image
*current
) {
1928 if (current
== NULL
)
1931 return current
->defined_scale_by_half("prepare_lod_def");
1935 * Initialize scale cluster data structures.
1938 static void init_nl_cluster(struct scale_cluster
*sc
) {
1941 static struct scale_cluster
*init_clusters(int frame
, ale_pos scale_factor
,
1942 const image
*input_frame
, unsigned int steps
,
1943 int *local_ax_count
) {
1946 * Allocate memory for the array.
1949 struct scale_cluster
*scale_clusters
=
1950 (struct scale_cluster
*) malloc(steps
* sizeof(struct scale_cluster
));
1952 assert (scale_clusters
);
1954 if (!scale_clusters
)
1955 ui::get()->memory_error("alignment");
1958 * Prepare images and exclusion regions for the highest level
1962 scale_clusters
[0].accum
= reference_image
;
1964 ui::get()->constructing_lod_clusters(0.0);
1965 scale_clusters
[0].input_scale
= scale_factor
;
1966 if (scale_factor
< 1.0 && interpolant
== NULL
)
1967 scale_clusters
[0].input
= input_frame
->scale(scale_factor
, "alignment");
1969 scale_clusters
[0].input
= input_frame
;
1971 scale_clusters
[0].certainty
= reference_defined
;
1972 scale_clusters
[0].aweight
= alignment_weights
;
1973 scale_clusters
[0].ax_parameters
= filter_ax_parameters(frame
, local_ax_count
);
1976 * Allocate and determine input frame certainty.
1979 if (scale_clusters
[0].input
->get_bayer() != IMAGE_BAYER_NONE
) {
1980 scale_clusters
[0].input_certainty
= new_image_bayer_ale_real(
1981 scale_clusters
[0].input
->height(),
1982 scale_clusters
[0].input
->width(),
1983 scale_clusters
[0].input
->depth(),
1984 scale_clusters
[0].input
->get_bayer());
1986 scale_clusters
[0].input_certainty
= scale_clusters
[0].input
->clone("certainty");
1989 for (unsigned int i
= 0; i
< scale_clusters
[0].input_certainty
->height(); i
++)
1990 for (unsigned int j
= 0; j
< scale_clusters
[0].input_certainty
->width(); j
++)
1991 for (unsigned int k
= 0; k
< 3; k
++)
1992 if (scale_clusters
[0].input
->get_channels(i
, j
) & (1 << k
))
1993 ((image
*) scale_clusters
[0].input_certainty
)->set_chan(i
, j
, k
,
1994 scale_clusters
[0].input
->
1995 exp().confidence(scale_clusters
[0].input
->get_pixel(i
, j
))[k
]);
1997 scale_ax_parameters(*local_ax_count
, scale_clusters
[0].ax_parameters
, scale_factor
,
1998 (scale_factor
< 1.0 && interpolant
== NULL
) ? scale_factor
: (ale_pos
) 1);
2000 init_nl_cluster(&(scale_clusters
[0]));
2003 * Prepare reduced-detail images and exclusion
2007 for (unsigned int step
= 1; step
< steps
; step
++) {
2008 ui::get()->constructing_lod_clusters(step
);
2009 scale_clusters
[step
].accum
= prepare_lod(scale_clusters
[step
- 1].accum
);
2010 scale_clusters
[step
].certainty
= prepare_lod_def(scale_clusters
[step
- 1].certainty
);
2011 scale_clusters
[step
].aweight
= prepare_lod_def(scale_clusters
[step
- 1].aweight
);
2012 scale_clusters
[step
].ax_parameters
2013 = copy_ax_parameters(*local_ax_count
, scale_clusters
[step
- 1].ax_parameters
);
2015 double sf
= scale_clusters
[step
- 1].input_scale
/ 2;
2016 scale_clusters
[step
].input_scale
= sf
;
2018 if (sf
>= 1.0 || interpolant
!= NULL
) {
2019 scale_clusters
[step
].input
= scale_clusters
[step
- 1].input
;
2020 scale_clusters
[step
].input_certainty
= scale_clusters
[step
- 1].input_certainty
;
2021 scale_ax_parameters(*local_ax_count
, scale_clusters
[step
].ax_parameters
, 0.5, 1);
2022 } else if (sf
> 0.5) {
2023 scale_clusters
[step
].input
= scale_clusters
[step
- 1].input
->scale(sf
, "alignment");
2024 scale_clusters
[step
].input_certainty
= scale_clusters
[step
- 1].input
->scale(sf
, "alignment", 1);
2025 scale_ax_parameters(*local_ax_count
, scale_clusters
[step
].ax_parameters
, 0.5, sf
);
2027 scale_clusters
[step
].input
= scale_clusters
[step
- 1].input
->scale(0.5, "alignment");
2028 scale_clusters
[step
].input_certainty
= scale_clusters
[step
- 1].input_certainty
->scale(0.5, "alignment", 1);
2029 scale_ax_parameters(*local_ax_count
, scale_clusters
[step
].ax_parameters
, 0.5, 0.5);
2032 init_nl_cluster(&(scale_clusters
[step
]));
2035 return scale_clusters
;
2039 * Destroy the first element in the scale cluster data structure.
2041 static void final_clusters(struct scale_cluster
*scale_clusters
, ale_pos scale_factor
,
2042 unsigned int steps
) {
2044 if (scale_clusters
[0].input_scale
< 1.0) {
2045 delete scale_clusters
[0].input
;
2048 delete scale_clusters
[0].input_certainty
;
2050 free((void *)scale_clusters
[0].ax_parameters
);
2052 for (unsigned int step
= 1; step
< steps
; step
++) {
2053 delete scale_clusters
[step
].accum
;
2054 delete scale_clusters
[step
].certainty
;
2055 delete scale_clusters
[step
].aweight
;
2057 if (scale_clusters
[step
].input_scale
< 1.0) {
2058 delete scale_clusters
[step
].input
;
2059 delete scale_clusters
[step
].input_certainty
;
2062 free((void *)scale_clusters
[step
].ax_parameters
);
2065 free(scale_clusters
);
2069 * Calculate the centroid of a control point for the set of frames
2070 * having index lower than m. Divide by any scaling of the output.
2072 static point
unscaled_centroid(unsigned int m
, unsigned int p
) {
2075 point
point_sum(0, 0);
2076 ale_accum divisor
= 0;
2078 for(unsigned int j
= 0; j
< m
; j
++) {
2079 point pp
= cp_array
[p
][j
];
2082 point_sum
+= kept_t
[j
].transform_unscaled(pp
)
2083 / kept_t
[j
].scale();
2089 return point::undefined();
2091 return point_sum
/ divisor
;
2095 * Calculate centroid of this frame, and of all previous frames,
2096 * from points common to both sets.
2098 static void centroids(unsigned int m
, point
*current
, point
*previous
) {
2100 * Calculate the translation
2102 point
other_centroid(0, 0);
2103 point
this_centroid(0, 0);
2104 ale_pos divisor
= 0;
2106 for (unsigned int i
= 0; i
< cp_count
; i
++) {
2107 point other_c
= unscaled_centroid(m
, i
);
2108 point this_c
= cp_array
[i
][m
];
2110 if (!other_c
.defined() || !this_c
.defined())
2113 other_centroid
+= other_c
;
2114 this_centroid
+= this_c
;
2120 *current
= point::undefined();
2121 *previous
= point::undefined();
2125 *current
= this_centroid
/ divisor
;
2126 *previous
= other_centroid
/ divisor
;
2130 * Calculate the RMS error of control points for frame m, with
2131 * transformation t, against control points for earlier frames.
2133 static ale_pos
cp_rms_error(unsigned int m
, transformation t
) {
2137 ale_accum divisor
= 0;
2139 for (unsigned int i
= 0; i
< cp_count
; i
++)
2140 for (unsigned int j
= 0; j
< m
; j
++) {
2141 const point
*p
= cp_array
[i
];
2142 point p_ref
= kept_t
[j
].transform_unscaled(p
[j
]);
2143 point p_cur
= t
.transform_unscaled(p
[m
]);
2145 if (!p_ref
.defined() || !p_cur
.defined())
2148 err
+= p_ref
.lengthtosq(p_cur
);
2152 return (ale_pos
) sqrt(err
/ divisor
);
2156 static void test_global(diff_stat_t
*here
, scale_cluster si
, transformation t
,
2157 int local_ax_count
, int m
, ale_accum local_gs_mo
, ale_pos perturb
) {
2159 diff_stat_t
test(*here
);
2161 test
.diff(si
, t
.get_current_element(), local_ax_count
, m
);
2163 unsigned int ovl
= overlap(si
, t
, local_ax_count
);
2165 if (ovl
>= local_gs_mo
&& test
.better()) {
2168 ui::get()->set_match(here
->get_error());
2169 ui::get()->set_offset(here
->get_offset());
2177 * Get the set of global transformations for a given density
2179 static void test_globals(diff_stat_t
*here
,
2180 scale_cluster si
, transformation t
, int local_gs
, ale_pos adj_p
,
2181 int local_ax_count
, int m
, ale_accum local_gs_mo
, ale_pos perturb
) {
2183 transformation offset
= t
;
2187 transformation offset_p
= offset
;
2189 if (!offset_p
.is_projective())
2190 offset_p
.eu_to_gpt();
2192 min
= max
= offset_p
.gpt_get(0);
2193 for (int p_index
= 1; p_index
< 4; p_index
++) {
2194 point p
= offset_p
.gpt_get(p_index
);
2205 point inner_min_t
= -min
;
2206 point inner_max_t
= -max
+ point(si
.accum
->height(), si
.accum
->width());
2207 point outer_min_t
= -max
+ point(adj_p
- 1, adj_p
- 1);
2208 point outer_max_t
= point(si
.accum
->height(), si
.accum
->width()) - point(adj_p
, adj_p
);
2210 if (local_gs
== 1 || local_gs
== 3 || local_gs
== 4 || local_gs
== 6) {
2216 for (ale_pos i
= inner_min_t
[0]; i
<= inner_max_t
[0]; i
+= adj_p
)
2217 for (ale_pos j
= inner_min_t
[1]; j
<= inner_max_t
[1]; j
+= adj_p
) {
2218 transformation test_t
= offset
;
2219 test_t
.translate(point(i
, j
));
2220 test_global(here
, si
, test_t
, local_ax_count
, m
, local_gs_mo
, perturb
);
2224 if (local_gs
== 2 || local_gs
== 3 || local_gs
== -1 || local_gs
== 6) {
2230 for (ale_pos i
= outer_min_t
[0]; i
<= outer_max_t
[0]; i
+= adj_p
)
2231 for (ale_pos j
= outer_min_t
[1]; j
< inner_min_t
[1]; j
+= adj_p
) {
2232 transformation test_t
= offset
;
2233 test_t
.translate(point(i
, j
));
2234 test_global(here
, si
, test_t
, local_ax_count
, m
, local_gs_mo
, perturb
);
2236 for (ale_pos i
= outer_min_t
[0]; i
<= outer_max_t
[0]; i
+= adj_p
)
2237 for (ale_pos j
= outer_max_t
[1]; j
> inner_max_t
[1]; j
-= adj_p
) {
2238 transformation test_t
= offset
;
2239 test_t
.translate(point(i
, j
));
2240 test_global(here
, si
, test_t
, local_ax_count
, m
, local_gs_mo
, perturb
);
2242 for (ale_pos i
= outer_min_t
[0]; i
< inner_min_t
[0]; i
+= adj_p
)
2243 for (ale_pos j
= outer_min_t
[1]; j
<= outer_max_t
[1]; j
+= adj_p
) {
2244 transformation test_t
= offset
;
2245 test_t
.translate(point(i
, j
));
2246 test_global(here
, si
, test_t
, local_ax_count
, m
, local_gs_mo
, perturb
);
2248 for (ale_pos i
= outer_max_t
[0]; i
> inner_max_t
[0]; i
-= adj_p
)
2249 for (ale_pos j
= outer_min_t
[1]; j
<= outer_max_t
[1]; j
+= adj_p
) {
2250 transformation test_t
= offset
;
2251 test_t
.translate(point(i
, j
));
2252 test_global(here
, si
, test_t
, local_ax_count
, m
, local_gs_mo
, perturb
);
2257 static void get_translational_set(std::vector
<transformation
> *set
,
2258 transformation t
, ale_pos adj_p
) {
2262 transformation offset
= t
;
2263 transformation
test_t(transformation::eu_identity());
2265 for (int i
= 0; i
< 2; i
++)
2266 for (adj_s
= -adj_p
; adj_s
<= adj_p
; adj_s
+= 2 * adj_p
) {
2270 test_t
.translate(i
? point(adj_s
, 0) : point(0, adj_s
));
2272 set
->push_back(test_t
);
2276 static int threshold_ok(ale_accum error
) {
2277 if ((1 - error
) * (ale_accum
) 100 >= match_threshold
)
2280 if (!(match_threshold
>= 0))
2286 static diff_stat_t
_align_element(ale_pos perturb
, ale_pos local_lower
,
2287 scale_cluster
*scale_clusters
, diff_stat_t here
,
2288 ale_pos adj_p
, ale_pos adj_o
, ale_pos adj_b
,
2289 ale_pos
*current_bd
, ale_pos
*modified_bd
,
2290 astate_t
*astate
, int lod
, scale_cluster si
) {
2293 * Run initial tests to get perturbation multipliers and error
2297 std::vector
<d2::trans_single
> t_set
;
2299 here
.get_perturb_set(&t_set
, adj_p
, adj_o
, adj_b
, current_bd
, modified_bd
);
2301 int stable_count
= 0;
2303 while (perturb
>= local_lower
) {
2305 ui::get()->alignment_dims(scale_clusters
[lod
].accum
->height(), scale_clusters
[lod
].accum
->width(),
2306 scale_clusters
[lod
].input
->height(), scale_clusters
[lod
].input
->width());
2309 * Orientational adjustment value in degrees.
2311 * Since rotational perturbation is now specified as an
2312 * arclength, we have to convert.
2315 ale_pos adj_o
= 2 * (double) perturb
2316 / sqrt(pow(scale_clusters
[0].input
->height(), 2)
2317 + pow(scale_clusters
[0].input
->width(), 2))
2322 * Barrel distortion adjustment value
2325 ale_pos adj_b
= perturb
* bda_mult
;
2327 trans_single old_offset
= here
.get_offset();
2329 here
.perturb_test(perturb
, adj_p
, adj_o
, adj_b
, current_bd
, modified_bd
,
2332 if (here
.get_offset() == old_offset
)
2337 if (stable_count
== 3) {
2344 && lod
> lrint(log(perturb
) / log(2)) - lod_preferred
) {
2347 * Work with images twice as large
2351 si
= scale_clusters
[lod
];
2354 * Rescale the transforms.
2357 ale_pos rescale_factor
= (double) scale_factor
2358 / (double) pow(2, lod
)
2359 / (double) here
.get_offset().scale();
2361 here
.rescale(rescale_factor
, si
);
2364 adj_p
= perturb
/ pow(2, lod
);
2371 ui::get()->alignment_perturbation_level(perturb
, lod
);
2374 ui::get()->set_match(here
.get_error());
2375 ui::get()->set_offset(here
.get_offset());
2379 ale_pos rescale_factor
= (double) scale_factor
2380 / (double) here
.get_offset().scale();
2382 here
.rescale(rescale_factor
, scale_clusters
[0]);
2389 * Check for satisfaction of the certainty threshold.
2391 static int ma_cert_satisfied(const scale_cluster
&c
, const transformation
&t
, unsigned int i
) {
2392 transformation::elem_bounds_int_t b
= t
.elem_bounds().scale_to_bounds(c
.accum
->height(), c
.accum
->width());
2393 ale_accum sum
[3] = {0, 0, 0};
2395 for (unsigned int ii
= b
.imin
; ii
< b
.imax
; ii
++)
2396 for (unsigned int jj
= b
.jmin
; jj
< b
.jmax
; jj
++) {
2397 pixel p
= c
.accum
->get_pixel(ii
, jj
);
2403 unsigned int count
= (b
.jmax
- b
.jmin
) * (b
.imax
- b
.imin
);
2405 for (int k
= 0; k
< 3; k
++)
2406 if (sum
[k
] / count
< _ma_cert
)
2412 static diff_stat_t
check_ancestor_path(const trans_multi
&offset
, const scale_cluster
&si
, diff_stat_t here
, int local_ax_count
, int frame
) {
2414 if (offset
.get_current_index() > 0)
2415 for (int i
= offset
.parent_index(offset
.get_current_index()); i
>= 0; i
= (i
> 0) ? (int) offset
.parent_index(i
) : -1) {
2416 trans_single t
= offset
.get_element(i
);
2417 t
.rescale(offset
.get_current_element().scale());
2419 here
.diff(si
, t
, local_ax_count
, frame
);
2421 if (here
.better_defined())
2431 * Align frame m against the reference.
2433 * XXX: the transformation class currently combines ordinary
2434 * transformations with scaling. This is somewhat convenient for
2435 * some things, but can also be confusing. This method, _align(), is
2436 * one case where special care must be taken to ensure that the scale
2437 * is always set correctly (by using the 'rescale' method).
2439 static diff_stat_multi
_align(int m
, int local_gs
, astate_t
*astate
) {
2441 const image
*input_frame
= astate
->get_input_frame();
2444 * Local upper/lower data, possibly dependent on image
2448 ale_pos local_lower
, local_upper
;
2449 ale_accum local_gs_mo
;
2452 * Select the minimum dimension as the reference.
2455 ale_pos reference_size
= input_frame
->height();
2456 if (input_frame
->width() < reference_size
)
2457 reference_size
= input_frame
->width();
2458 ale_accum reference_area
= input_frame
->height()
2459 * input_frame
->width();
2461 if (perturb_lower_percent
)
2462 local_lower
= (double) perturb_lower
2463 * (double) reference_size
2465 * (double) scale_factor
;
2467 local_lower
= perturb_lower
;
2469 if (perturb_upper_percent
)
2470 local_upper
= (double) perturb_upper
2471 * (double) reference_size
2473 * (double) scale_factor
;
2475 local_upper
= perturb_upper
;
2477 local_upper
= pow(2, floor(log(local_upper
) / log(2)));
2480 local_gs_mo
= (double) _gs_mo
2481 * (double) reference_area
2483 * (double) scale_factor
;
2485 local_gs_mo
= _gs_mo
;
2488 * Logarithms aren't exact, so we divide repeatedly to discover
2489 * how many steps will occur, and pass this information to the
2494 double step_variable
= local_upper
;
2495 while (step_variable
>= local_lower
) {
2500 ale_pos perturb
= local_upper
;
2503 kept_t
[latest
] = latest_t
;
2504 kept_ok
[latest
] = latest_ok
;
2508 * Determine how many levels of detail should be prepared, by
2509 * calculating the initial (largest) value for the
2510 * level-of-detail variable.
2513 int lod
= lrint(log(perturb
) / log(2)) - lod_preferred
;
2518 while (lod
> 0 && (reference_image
->width() < pow(2, lod
) * min_dimension
2519 || reference_image
->height() < pow(2, lod
) * min_dimension
))
2522 unsigned int steps
= (unsigned int) lod
+ 1;
2525 * Prepare multiple levels of detail.
2529 struct scale_cluster
*scale_clusters
= init_clusters(m
,
2530 scale_factor
, input_frame
, steps
,
2534 * Initialize the default initial transform
2537 astate
->init_default();
2540 * Set the default transformation.
2543 transformation offset
= astate
->get_default();
2546 * Establish boundaries
2549 offset
.set_current_bounds(reference_image
);
2551 ui::get()->alignment_degree_max(offset
.get_coordinate(offset
.stack_depth() - 1).degree
);
2553 if (offset
.stack_depth() == 1) {
2554 ui::get()->set_steps(step_count
, 0);
2556 ui::get()->set_steps(offset
.get_coordinate(offset
.stack_depth() - 1).degree
+ 1, 1);
2560 * Load any file-specified transformations
2563 for (unsigned int index
= 0; index
< offset
.stack_depth(); index
++) {
2565 unsigned int index_2
;
2566 offset
.set_current_index(index
);
2568 offset
= tload_next(tload
, alignment_class
== 2,
2570 &is_default
, offset
.get_current_index() == 0);
2572 index_2
= offset
.get_current_index();
2574 if (index_2
> index
) {
2575 for (unsigned int index_3
= index
; index_3
< index_2
; index_3
++)
2576 astate
->set_is_default(index_3
, 1);
2581 astate
->set_is_default(index
, is_default
);
2584 offset
.set_current_index(0);
2586 astate
->init_frame_alignment_primary(&offset
, lod
, perturb
);
2589 * Control point alignment
2592 if (local_gs
== 5) {
2594 transformation o
= offset
;
2597 * Determine centroid data
2600 point current
, previous
;
2601 centroids(m
, ¤t
, &previous
);
2603 if (current
.defined() && previous
.defined()) {
2605 o
.set_dimensions(input_frame
);
2606 o
.translate((previous
- current
) * o
.scale());
2611 * Determine rotation for alignment classes other than translation.
2614 ale_pos lowest_error
= cp_rms_error(m
, o
);
2616 ale_pos rot_lower
= 2 * (double) local_lower
2617 / sqrt(pow(scale_clusters
[0].input
->height(), 2)
2618 + pow(scale_clusters
[0].input
->width(), 2))
2622 if (alignment_class
> 0)
2623 for (double rot
= 30; rot
> rot_lower
; rot
/= 2)
2624 for (double srot
= -rot
; srot
< rot
* 1.5; srot
+= rot
* 2) {
2625 int is_improved
= 1;
2626 while (is_improved
) {
2628 transformation test_t
= o
;
2630 * XXX: is this right?
2632 test_t
.rotate(current
* o
.scale(), srot
);
2633 ale_pos test_v
= cp_rms_error(m
, test_t
);
2635 if (test_v
< lowest_error
) {
2636 lowest_error
= test_v
;
2645 * Determine projective parameters through a local
2649 if (alignment_class
== 2) {
2650 ale_pos adj_p
= lowest_error
;
2652 if (adj_p
< local_lower
)
2653 adj_p
= local_lower
;
2655 while (adj_p
>= local_lower
) {
2656 transformation test_t
= o
;
2657 int is_improved
= 1;
2661 while (is_improved
) {
2664 for (int i
= 0; i
< 4; i
++)
2665 for (int j
= 0; j
< 2; j
++)
2666 for (adj_s
= -adj_p
; adj_s
<= adj_p
; adj_s
+= 2 * adj_p
) {
2670 if (perturb_type
== 0)
2671 test_t
.gpt_modify(j
, i
, adj_s
);
2672 else if (perturb_type
== 1)
2673 test_t
.gr_modify(j
, i
, adj_s
);
2677 test_v
= cp_rms_error(m
, test_t
);
2679 if (test_v
< lowest_error
) {
2680 lowest_error
= test_v
;
2693 * Pre-alignment exposure adjustment
2696 if (_exp_register
) {
2697 ui::get()->exposure_1();
2698 set_exposure_ratio(m
, scale_clusters
[0], offset
, local_ax_count
, 0);
2702 * Scale transform for lod
2705 for (int lod_
= 0; lod_
< lod
; lod_
++) {
2706 transformation s
= offset
;
2707 transformation t
= offset
;
2709 t
.rescale(1 / (double) 2);
2711 if (!(t
.scaled_height() > 0 && t
.scaled_height() < s
.scaled_height())
2712 || !(t
.scaled_width() > 0 && t
.scaled_width() < s
.scaled_width())) {
2713 perturb
/= pow(2, lod
- lod_
);
2721 ui::get()->set_offset(offset
);
2723 struct scale_cluster si
= scale_clusters
[lod
];
2726 * Projective adjustment value
2729 ale_pos adj_p
= perturb
/ pow(2, lod
);
2732 * Orientational adjustment value in degrees.
2734 * Since rotational perturbation is now specified as an
2735 * arclength, we have to convert.
2738 ale_pos adj_o
= (double) 2 * (double) perturb
2739 / sqrt(pow((double) scale_clusters
[0].input
->height(), (double) 2)
2740 + pow((double) scale_clusters
[0].input
->width(), (double) 2))
2745 * Barrel distortion adjustment value
2748 ale_pos adj_b
= perturb
* bda_mult
;
2751 * Global search overlap requirements.
2754 local_gs_mo
= (double) local_gs_mo
/ pow(pow(2, lod
), 2);
2757 * Alignment statistics.
2760 diff_stat_t
here(offset
.elem_bounds());
2763 * Current difference (error) value
2766 ui::get()->prematching();
2767 here
.diff(si
, offset
.get_current_element(), local_ax_count
, m
);
2768 ui::get()->set_match(here
.get_error());
2771 * Current and modified barrel distortion parameters
2774 ale_pos current_bd
[BARREL_DEGREE
];
2775 ale_pos modified_bd
[BARREL_DEGREE
];
2776 offset
.bd_get(current_bd
);
2777 offset
.bd_get(modified_bd
);
2780 * Translational global search step
2783 if (perturb
>= local_lower
&& local_gs
!= 0 && local_gs
!= 5
2784 && (local_gs
!= 6 || astate
->get_is_default(0))) {
2786 ui::get()->global_alignment(perturb
, lod
);
2787 ui::get()->gs_mo(local_gs_mo
);
2789 test_globals(&here
, si
, offset
, local_gs
, adj_p
,
2790 local_ax_count
, m
, local_gs_mo
, perturb
);
2792 ui::get()->set_match(here
.get_error());
2793 ui::get()->set_offset(here
.get_offset());
2797 * Perturbation adjustment loop.
2800 offset
.set_current_element(here
.get_offset());
2802 for (unsigned int i
= 0; i
< offset
.stack_depth(); i
++) {
2804 ui::get()->aligning_element(i
, offset
.stack_depth());
2806 offset
.set_current_index(i
);
2808 ui::get()->start_multi_alignment_element(offset
);
2810 ui::get()->set_offset(offset
);
2813 astate
->init_frame_alignment_nonprimary(&offset
, lod
, perturb
, i
);
2815 if (_exp_register
== 1) {
2816 ui::get()->exposure_1();
2817 pixel_accum
asum(0, 0, 0), bsum(0, 0, 0);
2818 exposure_ratio_iterate
eri(&asum
, &bsum
, scale_clusters
[0], offset
, local_ax_count
, 0,
2819 offset
.elem_bounds().scale_to_bounds(scale_clusters
[0].accum
->height(),
2820 scale_clusters
[0].accum
->width()));
2823 pixel_accum tr
= asum
/ bsum
;
2824 ui::get()->exp_multiplier(tr
[0], tr
[1], tr
[2]);
2825 offset
.set_tonal_multiplier(tr
);
2830 int e_div
= offset
.get_current_coordinate().degree
;
2831 ale_pos e_perturb
= perturb
;
2832 ale_pos e_adj_p
= adj_p
;
2833 ale_pos e_adj_b
= adj_b
;
2835 for (int d
= 0; d
< e_div
; d
++) {
2847 d2::trans_multi::elem_bounds_t b
= offset
.elem_bounds();
2849 for (int dim_satisfied
= 0; e_lod
> 0 && !dim_satisfied
; ) {
2850 int height
= scale_clusters
[e_lod
].accum
->height();
2851 int width
= scale_clusters
[e_lod
].accum
->width();
2853 d2::trans_multi::elem_bounds_int_t bi
= b
.scale_to_bounds(height
, width
);
2855 dim_satisfied
= bi
.satisfies_min_dim(min_dimension
);
2857 if (!dim_satisfied
) {
2864 * Scale transform for lod
2867 for (int lod_
= 0; lod_
< e_lod
; lod_
++) {
2868 trans_single s
= offset
.get_element(i
);
2869 trans_single t
= offset
.get_element(i
);
2871 t
.rescale(1 / (double) 2);
2873 if (!(t
.scaled_height() > 0 && t
.scaled_height() < s
.scaled_height())
2874 || !(t
.scaled_width() > 0 && t
.scaled_width() < s
.scaled_width())) {
2875 e_perturb
/= pow(2, e_lod
- lod_
);
2879 offset
.set_element(i
, t
);
2883 ui::get()->set_offset(offset
);
2887 * Announce perturbation size
2890 ui::get()->aligning(e_perturb
, e_lod
);
2892 si
= scale_clusters
[e_lod
];
2894 here
.set_elem_bounds(offset
.elem_bounds());
2896 here
.diff(si
, offset
.get_current_element(), local_ax_count
, m
);
2900 here
= check_ancestor_path(offset
, si
, here
, local_ax_count
, m
);
2902 here
= _align_element(e_perturb
, local_lower
, scale_clusters
,
2903 here
, e_adj_p
, adj_o
, e_adj_b
, current_bd
, modified_bd
,
2906 offset
.rescale(here
.get_offset().scale() / offset
.scale());
2908 offset
.set_current_element(here
.get_offset());
2910 if (i
> 0 && _exp_register
== 1) {
2911 if (ma_cert_satisfied(scale_clusters
[0], offset
, i
)) {
2912 ui::get()->exposure_2();
2913 pixel_accum
asum(0, 0, 0), bsum(0, 0, 0);
2914 exposure_ratio_iterate
eri(&asum
, &bsum
, scale_clusters
[0], offset
, local_ax_count
, 1,
2915 offset
.elem_bounds().scale_to_bounds(scale_clusters
[0].accum
->height(),
2916 scale_clusters
[0].accum
->width()));
2919 pixel_accum tr
= asum
/ bsum
;
2920 ui::get()->exp_multiplier(tr
[0], tr
[1], tr
[2]);
2921 offset
.set_tonal_multiplier(tr
);
2923 offset
.set_tonal_multiplier(offset
.get_element(offset
.parent_index(i
)).get_tonal_multiplier(point(0, 0)));
2925 } else if (_exp_register
== 1) {
2926 ui::get()->exposure_2();
2927 set_exposure_ratio(m
, scale_clusters
[0], offset
, local_ax_count
, 1);
2930 ui::get()->set_offset(offset
);
2932 if (i
+ 1 == offset
.stack_depth())
2933 ui::get()->alignment_degree_complete(offset
.get_coordinate(i
).degree
);
2934 else if (offset
.get_coordinate(i
).degree
!= offset
.get_coordinate(i
+ 1).degree
)
2935 ui::get()->alignment_degree_complete(offset
.get_coordinate(i
+ 1).degree
);
2938 offset
.set_current_index(0);
2941 offset
.set_multi(reference_image
, input_frame
);
2944 * Recalculate error on whole frame.
2947 ui::get()->postmatching();
2948 diff_stat_generic
<transformation
> multi_here(offset
.elem_bounds());
2949 multi_here
.diff(scale_clusters
[0], offset
, local_ax_count
, m
);
2950 ui::get()->set_match(multi_here
.get_error());
2953 * Free the level-of-detail structures
2956 final_clusters(scale_clusters
, scale_factor
, steps
);
2959 * Ensure that the match meets the threshold.
2962 if (threshold_ok(multi_here
.get_error())) {
2964 * Update alignment variables
2967 astate
->set_default(offset
);
2968 astate
->set_final(offset
);
2969 ui::get()->alignment_match_ok();
2970 } else if (local_gs
== 4) {
2973 * Align with outer starting points.
2977 * XXX: This probably isn't exactly the right thing to do,
2978 * since variables like old_initial_value have been overwritten.
2981 diff_stat_multi nested_result
= _align(m
, -1, astate
);
2983 if (threshold_ok(nested_result
.get_error())) {
2984 return nested_result
;
2985 } else if (nested_result
.get_error() < multi_here
.get_error()) {
2986 multi_here
= nested_result
;
2989 if (is_fail_default
)
2990 offset
= astate
->get_default();
2992 ui::get()->set_match(multi_here
.get_error());
2993 ui::get()->alignment_no_match();
2995 } else if (local_gs
== -1) {
3002 if (is_fail_default
)
3003 offset
= astate
->get_default();
3005 ui::get()->alignment_no_match();
3009 * Write the tonal registration multiplier as a comment.
3012 pixel trm
= image_rw::exp(m
).get_multiplier();
3013 tsave_trm(tsave
, trm
[0], trm
[1], trm
[2]);
3016 * Save the transformation information
3019 for (unsigned int index
= 0; index
< offset
.stack_depth(); index
++) {
3020 offset
.set_current_index(index
);
3022 tsave_next(tsave
, offset
, alignment_class
== 2,
3023 offset
.get_current_index() == 0);
3026 offset
.set_current_index(0);
3029 * Update match statistics.
3032 match_sum
+= (1 - multi_here
.get_error()) * (ale_accum
) 100;
3042 * High-pass filter for frequency weights
3044 static void hipass(int rows
, int cols
, fftw_complex
*inout
) {
3045 for (int i
= 0; i
< rows
* vert_freq_cut
; i
++)
3046 for (int j
= 0; j
< cols
; j
++)
3047 for (int k
= 0; k
< 2; k
++)
3048 inout
[i
* cols
+ j
][k
] = 0;
3049 for (int i
= 0; i
< rows
; i
++)
3050 for (int j
= 0; j
< cols
* horiz_freq_cut
; j
++)
3051 for (int k
= 0; k
< 2; k
++)
3052 inout
[i
* cols
+ j
][k
] = 0;
3053 for (int i
= 0; i
< rows
; i
++)
3054 for (int j
= 0; j
< cols
; j
++)
3055 for (int k
= 0; k
< 2; k
++)
3056 if (i
/ (double) rows
+ j
/ (double) cols
< 2 * avg_freq_cut
)
3057 inout
[i
* cols
+ j
][k
] = 0;
3063 * Reset alignment weights
3065 static void reset_weights() {
3066 if (alignment_weights
!= NULL
)
3067 delete alignment_weights
;
3069 alignment_weights
= NULL
;
3073 * Initialize alignment weights
3075 static void init_weights() {
3076 if (alignment_weights
!= NULL
)
3079 int rows
= reference_image
->height();
3080 int cols
= reference_image
->width();
3081 int colors
= reference_image
->depth();
3083 alignment_weights
= new_image_ale_real(rows
, cols
,
3084 colors
, "alignment_weights");
3086 assert (alignment_weights
);
3088 for (int i
= 0; i
< rows
; i
++)
3089 for (int j
= 0; j
< cols
; j
++)
3090 alignment_weights
->set_pixel(i
, j
, pixel(1, 1, 1));
3094 * Update alignment weights with weight map
3096 static void map_update() {
3098 if (weight_map
== NULL
)
3103 point map_offset
= reference_image
->offset() - weight_map
->offset();
3105 int rows
= reference_image
->height();
3106 int cols
= reference_image
->width();
3108 for (int i
= 0; i
< rows
; i
++)
3109 for (int j
= 0; j
< cols
; j
++) {
3110 point map_weight_position
= map_offset
+ point(i
, j
);
3111 if (map_weight_position
[0] >= 0
3112 && map_weight_position
[1] >= 0
3113 && map_weight_position
[0] <= weight_map
->height() - 1
3114 && map_weight_position
[1] <= weight_map
->width() - 1)
3115 alignment_weights
->set_pixel(i
, j
,
3116 alignment_weights
->get_pixel(i
, j
)
3117 * weight_map
->get_bl(map_weight_position
));
3122 * Update alignment weights with algorithmic weights
3124 static void wmx_update() {
3127 static exposure
*exp_def
= new exposure_default();
3128 static exposure
*exp_bool
= new exposure_boolean();
3130 if (wmx_file
== NULL
|| wmx_exec
== NULL
|| wmx_defs
== NULL
)
3133 unsigned int rows
= reference_image
->height();
3134 unsigned int cols
= reference_image
->width();
3136 image_rw::write_image(wmx_file
, reference_image
);
3137 image_rw::write_image(wmx_defs
, reference_defined
, exp_bool
);
3140 int exit_status
= 1;
3142 execlp(wmx_exec
, wmx_exec
, wmx_file
, wmx_defs
, NULL
);
3143 ui::get()->exec_failure(wmx_exec
, wmx_file
, wmx_defs
);
3149 ui::get()->fork_failure("d2::align");
3151 image
*wmx_weights
= image_rw::read_image(wmx_file
, exp_def
);
3153 if (wmx_weights
->height() != rows
|| wmx_weights
->width() != cols
)
3154 ui::get()->error("algorithmic weighting must not change image size");
3156 if (alignment_weights
== NULL
)
3157 alignment_weights
= wmx_weights
;
3159 for (unsigned int i
= 0; i
< rows
; i
++)
3160 for (unsigned int j
= 0; j
< cols
; j
++)
3161 alignment_weights
->set_pixel(i
, j
,
3162 (pixel
) alignment_weights
->get_pixel(i
, j
)
3163 * (pixel
) wmx_weights
->get_pixel(i
, j
));
3168 * Update alignment weights with frequency weights
3170 static void fw_update() {
3172 if (horiz_freq_cut
== 0
3173 && vert_freq_cut
== 0
3174 && avg_freq_cut
== 0)
3178 * Required for correct operation of --fwshow
3181 assert (alignment_weights
== NULL
);
3183 int rows
= reference_image
->height();
3184 int cols
= reference_image
->width();
3185 int colors
= reference_image
->depth();
3187 alignment_weights
= new_image_ale_real(rows
, cols
,
3188 colors
, "alignment_weights");
3190 fftw_complex
*inout
= (fftw_complex
*) fftw_malloc(sizeof(fftw_complex
) * rows
* cols
);
3194 fftw_plan pf
= fftw_plan_dft_2d(rows
, cols
,
3196 FFTW_FORWARD
, FFTW_ESTIMATE
);
3198 fftw_plan pb
= fftw_plan_dft_2d(rows
, cols
,
3200 FFTW_BACKWARD
, FFTW_ESTIMATE
);
3202 for (int k
= 0; k
< colors
; k
++) {
3203 for (int i
= 0; i
< rows
* cols
; i
++) {
3204 inout
[i
][0] = reference_image
->get_pixel(i
/ cols
, i
% cols
)[k
];
3209 hipass(rows
, cols
, inout
);
3212 for (int i
= 0; i
< rows
* cols
; i
++) {
3214 alignment_weights
->pix(i
/ cols
, i
% cols
)[k
] = fabs(inout
[i
][0] / (rows
* cols
));
3216 alignment_weights
->set_chan(i
/ cols
, i
% cols
, k
,
3217 sqrt(pow(inout
[i
][0] / (rows
* cols
), 2)
3218 + pow(inout
[i
][1] / (rows
* cols
), 2)));
3223 fftw_destroy_plan(pf
);
3224 fftw_destroy_plan(pb
);
3227 if (fw_output
!= NULL
)
3228 image_rw::write_image(fw_output
, alignment_weights
);
3233 * Update alignment to frame N.
3235 static void update_to(int n
) {
3237 assert (n
<= latest
+ 1);
3240 static astate_t astate
;
3242 ui::get()->set_frame_num(n
);
3247 * Handle the initial frame
3250 astate
.set_input_frame(image_rw::open(n
));
3252 const image
*i
= astate
.get_input_frame();
3254 transformation result
= alignment_class
== 2
3255 ? transformation::gpt_identity(i
, scale_factor
)
3256 : transformation::eu_identity(i
, scale_factor
);
3257 result
= tload_first(tload
, alignment_class
== 2, result
, &is_default
);
3258 tsave_first(tsave
, result
, alignment_class
== 2);
3261 kept_t
= transformation::new_eu_identity_array(image_rw::count());
3262 kept_ok
= (int *) malloc(image_rw::count()
3267 if (!kept_t
|| !kept_ok
)
3268 ui::get()->memory_error("alignment");
3278 astate
.set_default(result
);
3284 for (int i
= latest
+ 1; i
<= n
; i
++) {
3287 * Handle supplemental frames.
3290 assert (reference
!= NULL
);
3292 ui::get()->set_arender_current();
3293 reference
->sync(i
- 1);
3294 ui::get()->clear_arender_current();
3295 reference_image
= reference
->get_image();
3296 reference_defined
= reference
->get_defined();
3299 astate
.default_set_original_bounds(reference_image
);
3306 assert (reference_image
!= NULL
);
3307 assert (reference_defined
!= NULL
);
3309 astate
.set_input_frame(image_rw::open(i
));
3311 _align(i
, _gs
, &astate
);
3320 * Set the control point count
3322 static void set_cp_count(unsigned int n
) {
3323 assert (cp_array
== NULL
);
3326 cp_array
= (const point
**) malloc(n
* sizeof(const point
*));
3330 * Set control points.
3332 static void set_cp(unsigned int i
, const point
*p
) {
3339 static void exp_register() {
3344 * Register exposure only based on metadata
3346 static void exp_meta_only() {
3351 * Don't register exposure
3353 static void exp_noregister() {
3358 * Set alignment class to translation only. Only adjust the x and y
3359 * position of images. Do not rotate input images or perform
3360 * projective transformations.
3362 static void class_translation() {
3363 alignment_class
= 0;
3367 * Set alignment class to Euclidean transformations only. Adjust the x
3368 * and y position of images and the orientation of the image about the
3371 static void class_euclidean() {
3372 alignment_class
= 1;
3376 * Set alignment class to perform general projective transformations.
3377 * See the file gpt.h for more information about general projective
3380 static void class_projective() {
3381 alignment_class
= 2;
3385 * Set the default initial alignment to the identity transformation.
3387 static void initial_default_identity() {
3388 default_initial_alignment_type
= 0;
3392 * Set the default initial alignment to the most recently matched
3393 * frame's final transformation.
3395 static void initial_default_follow() {
3396 default_initial_alignment_type
= 1;
3400 * Perturb output coordinates.
3402 static void perturb_output() {
3407 * Perturb source coordinates.
3409 static void perturb_source() {
3414 * Frames under threshold align optimally
3416 static void fail_optimal() {
3417 is_fail_default
= 0;
3421 * Frames under threshold keep their default alignments.
3423 static void fail_default() {
3424 is_fail_default
= 1;
3428 * Align images with an error contribution from each color channel.
3431 channel_alignment_type
= 0;
3435 * Align images with an error contribution only from the green channel.
3436 * Other color channels do not affect alignment.
3438 static void green() {
3439 channel_alignment_type
= 1;
3443 * Align images using a summation of channels. May be useful when
3444 * dealing with images that have high frequency color ripples due to
3448 channel_alignment_type
= 2;
3452 * Error metric exponent
3455 static void set_metric_exponent(float me
) {
3456 metric_exponent
= me
;
3463 static void set_match_threshold(float mt
) {
3464 match_threshold
= mt
;
3468 * Perturbation lower and upper bounds.
3471 static void set_perturb_lower(ale_pos pl
, int plp
) {
3473 perturb_lower_percent
= plp
;
3476 static void set_perturb_upper(ale_pos pu
, int pup
) {
3478 perturb_upper_percent
= pup
;
3482 * Maximum rotational perturbation.
3485 static void set_rot_max(int rm
) {
3488 * Obtain the largest power of two not larger than rm.
3491 rot_max
= pow(2, floor(log(rm
) / log(2)));
3495 * Barrel distortion adjustment multiplier
3498 static void set_bda_mult(ale_pos m
) {
3503 * Barrel distortion maximum rate of change
3506 static void set_bda_rate(ale_pos m
) {
3514 static void set_lod_preferred(int lm
) {
3519 * Minimum dimension for reduced level-of-detail.
3522 static void set_min_dimension(int md
) {
3527 * Set the scale factor
3529 static void set_scale(ale_pos s
) {
3534 * Set reference rendering to align against
3536 static void set_reference(render
*r
) {
3541 * Set the interpolant
3543 static void set_interpolant(filter::scaled_filter
*f
) {
3548 * Set alignment weights image
3550 static void set_weight_map(const image
*i
) {
3555 * Set frequency cuts
3557 static void set_frequency_cut(double h
, double v
, double a
) {
3564 * Set algorithmic alignment weighting
3566 static void set_wmx(const char *e
, const char *f
, const char *d
) {
3573 * Show frequency weights
3575 static void set_fl_show(const char *filename
) {
3576 fw_output
= filename
;
3580 * Set transformation file loader.
3582 static void set_tload(tload_t
*tl
) {
3587 * Set transformation file saver.
3589 static void set_tsave(tsave_t
*ts
) {
3594 * Get match statistics for frame N.
3596 static int match(int n
) {
3610 * Message that old alignment data should be kept.
3612 static void keep() {
3613 assert (latest
== -1);
3618 * Get alignment for frame N.
3620 static transformation
of(int n
) {
3633 * Use Monte Carlo alignment sampling with argument N.
3635 static void mc(ale_pos n
) {
3640 * Set the certainty-weighted flag.
3642 static void certainty_weighted(int flag
) {
3643 certainty_weights
= flag
;
3647 * Set the global search type.
3649 static void gs(const char *type
) {
3650 if (!strcmp(type
, "local")) {
3652 } else if (!strcmp(type
, "inner")) {
3654 } else if (!strcmp(type
, "outer")) {
3656 } else if (!strcmp(type
, "all")) {
3658 } else if (!strcmp(type
, "central")) {
3660 } else if (!strcmp(type
, "defaults")) {
3662 } else if (!strcmp(type
, "points")) {
3666 ui::get()->error("bad global search type");
3671 * Set the minimum overlap for global searching
3673 static void gs_mo(ale_pos value
, int _gs_mo_percent
) {
3675 gs_mo_percent
= _gs_mo_percent
;
3679 * Set mutli-alignment certainty lower bound.
3681 static void set_ma_cert(ale_real value
) {
3686 * Set alignment exclusion regions
3688 static void set_exclusion(exclusion
*_ax_parameters
, int _ax_count
) {
3689 ax_count
= _ax_count
;
3690 ax_parameters
= _ax_parameters
;
3694 * Get match summary statistics.
3696 static ale_accum
match_summary() {
3697 return match_sum
/ (ale_accum
) match_count
;
3701 template<class diff_trans
>
3702 void *d2::align::diff_stat_generic
<diff_trans
>::diff_subdomain(void *args
) {
3704 subdomain_args
*sargs
= (subdomain_args
*) args
;
3706 struct scale_cluster c
= sargs
->c
;
3707 std::vector
<run
> runs
= sargs
->runs
;
3708 int ax_count
= sargs
->ax_count
;
3710 int i_min
= sargs
->i_min
;
3711 int i_max
= sargs
->i_max
;
3712 int j_min
= sargs
->j_min
;
3713 int j_max
= sargs
->j_max
;
3714 int subdomain
= sargs
->subdomain
;
3716 assert (reference_image
);
3718 point offset
= c
.accum
->offset();
3720 for (mc_iterate
m(i_min
, i_max
, j_min
, j_max
, subdomain
); !m
.done(); m
++) {
3726 * Check reference frame definition.
3729 if (!((pixel
) c
.accum
->get_pixel(i
, j
)).finite()
3730 || !(((pixel
) c
.certainty
->get_pixel(i
, j
)).minabs_norm() > 0))
3734 * Check for exclusion in render coordinates.
3737 if (ref_excluded(i
, j
, offset
, c
.ax_parameters
, ax_count
))
3744 struct point q
, r
= point::undefined();
3746 q
= (c
.input_scale
< 1.0 && interpolant
== NULL
)
3747 ? runs
.back().offset
.scaled_inverse_transform(
3748 point(i
+ offset
[0], j
+ offset
[1]))
3749 : runs
.back().offset
.unscaled_inverse_transform(
3750 point(i
+ offset
[0], j
+ offset
[1]));
3752 if (runs
.size() == 2) {
3753 r
= (c
.input_scale
< 1.0)
3754 ? runs
.front().offset
.scaled_inverse_transform(
3755 point(i
+ offset
[0], j
+ offset
[1]))
3756 : runs
.front().offset
.unscaled_inverse_transform(
3757 point(i
+ offset
[0], j
+ offset
[1]));
3764 * Check that the transformed coordinates are within
3765 * the boundaries of array c.input and that they
3766 * are not subject to exclusion.
3768 * Also, check that the weight value in the accumulated array
3769 * is nonzero, unless we know it is nonzero by virtue of the
3770 * fact that it falls within the region of the original frame
3771 * (e.g. when we're not increasing image extents).
3774 if (input_excluded(ti
, tj
, c
.ax_parameters
, ax_count
))
3777 if (input_excluded(r
[0], r
[1], c
.ax_parameters
, ax_count
))
3778 r
= point::undefined();
3781 * Check the boundaries of the input frame
3785 && ti
<= c
.input
->height() - 1
3787 && tj
<= c
.input
->width() - 1))
3791 && r
[0] <= c
.input
->height() - 1
3793 && r
[1] <= c
.input
->width() - 1))
3794 r
= point::undefined();
3796 sargs
->runs
.back().sample(f
, c
, i
, j
, q
, r
, runs
.front());