d2/align: Revert last patch until the corresponding code in Libale (currently
[Ale.git] / d2 / align.h
blob296df91e4a9c65cd6f36ab8eb8ce827b7ce80b40
1 // Copyright 2002, 2004, 2007 David Hilvert <dhilvert@auricle.dyndns.org>,
2 // <dhilvert@ugcs.caltech.edu>
4 /* This file is part of the Anti-Lamenessing Engine.
6 The Anti-Lamenessing Engine is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 The Anti-Lamenessing Engine is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with the Anti-Lamenessing Engine; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * align.h: Handle alignment of frames.
25 #ifndef __d2align_h__
26 #define __d2align_h__
28 #include "filter.h"
29 #include "transformation.h"
30 #include "image.h"
31 #include "point.h"
32 #include "render.h"
33 #include "tfile.h"
34 #include "image_rw.h"
36 class align {
37 private:
40 * Alignment properties
43 static ale_align_properties align_properties() {
44 static ale_align_properties data = NULL;
46 if (data == NULL)
47 data = ale_new_align_properties();
49 assert(data);
51 return data;
55 * Private data members
58 static ale_pos scale_factor;
61 * Original frame transformation
63 static transformation orig_t;
66 * Keep data older than latest
68 static int _keep;
69 static transformation *kept_t;
70 static int *kept_ok;
73 * Transformation file handlers
76 static tload_t *tload;
77 static tsave_t *tsave;
80 * Control point variables
83 static const point **cp_array;
84 static unsigned int cp_count;
87 * Reference rendering to align against
90 static render *reference;
91 static filter::scaled_filter *interpolant;
92 static ale_image reference_image;
95 * Per-pixel alignment weight map
98 static ale_image weight_map;
101 * Frequency-dependent alignment weights
104 static double horiz_freq_cut;
105 static double vert_freq_cut;
106 static double avg_freq_cut;
107 static const char *fw_output;
110 * Algorithmic alignment weighting
113 static const char *wmx_exec;
114 static const char *wmx_file;
115 static const char *wmx_defs;
118 * Non-certainty alignment weights
121 static ale_image alignment_weights;
124 * Latest transformation.
127 static transformation latest_t;
130 * Flag indicating whether the latest transformation
131 * resulted in a match.
134 static int latest_ok;
137 * Frame number most recently aligned.
140 static int latest;
143 * Exposure registration
145 * 0. Preserve the original exposure of images.
147 * 1. Match exposure between images.
149 * 2. Use only image metadata for registering exposure.
152 static int _exp_register;
155 * Alignment class.
157 * 0. Translation only. Only adjust the x and y position of images.
158 * Do not rotate input images or perform projective transformations.
160 * 1. Euclidean transformations only. Adjust the x and y position
161 * of images and the orientation of the image about the image center.
163 * 2. Perform general projective transformations. See the file gpt.h
164 * for more information about general projective transformations.
167 static int alignment_class;
170 * Default initial alignment type.
172 * 0. Identity transformation.
174 * 1. Most recently accepted frame's final transformation.
177 static int default_initial_alignment_type;
180 * Projective group behavior
182 * 0. Perturb in output coordinates.
184 * 1. Perturb in source coordinates
187 static int perturb_type;
190 * Alignment for failed frames -- default or optimal?
192 * A frame that does not meet the match threshold can be assigned the
193 * best alignment found, or can be assigned its alignment default.
196 static int is_fail_default;
199 * Alignment code.
201 * 0. Align images with an error contribution from each color channel.
203 * 1. Align images with an error contribution only from the green channel.
204 * Other color channels do not affect alignment.
206 * 2. Align images using a summation of channels. May be useful when dealing
207 * with images that have high frequency color ripples due to color aliasing.
210 static int channel_alignment_type;
213 * Error metric exponent
216 static ale_real metric_exponent;
219 * Match threshold
222 static float match_threshold;
225 * Perturbation lower and upper bounds.
228 static ale_pos perturb_lower;
229 static int perturb_lower_percent;
230 static ale_pos perturb_upper;
231 static int perturb_upper_percent;
234 * Preferred level-of-detail scale factor is 2^lod_preferred/perturb.
237 static int lod_preferred;
240 * Minimum dimension for reduced LOD.
243 static int min_dimension;
246 * Maximum rotational perturbation
249 static ale_pos rot_max;
252 * Barrel distortion alignment multiplier
255 static ale_pos bda_mult;
258 * Barrel distortion maximum adjustment rate
261 static ale_pos bda_rate;
264 * Alignment match sum
267 static ale_accum match_sum;
270 * Alignment match count.
273 static int match_count;
276 * Monte Carlo parameter
279 static ale_pos _mc;
282 * Certainty weight flag
284 * 0. Don't use certainty weights for alignment.
286 * 1. Use certainty weights for alignment.
289 static int certainty_weights;
292 * Global search parameter
294 * 0. Local: Local search only.
295 * 1. Inner: Alignment reference image inner region
296 * 2. Outer: Alignment reference image outer region
297 * 3. All: Alignment reference image inner and outer regions.
298 * 4. Central: Inner if possible; else, best of inner and outer.
299 * 5. Points: Align by control points.
302 static int _gs;
305 * Minimum overlap for global searches
308 static ale_accum _gs_mo;
309 static int gs_mo_percent;
312 * Minimum certainty for multi-alignment element registration.
315 static ale_real _ma_cert;
318 * Exclusion regions
321 static exclusion *ax_parameters;
322 static int ax_count;
325 * Types for scale clusters.
328 struct nl_scale_cluster {
329 const image *accum_max;
330 const image *accum_min;
331 const image *certainty_max;
332 const image *certainty_min;
333 const image *aweight_max;
334 const image *aweight_min;
335 exclusion *ax_parameters;
337 ale_pos input_scale;
338 const image *input_certainty_max;
339 const image *input_certainty_min;
340 const image *input_max;
341 const image *input_min;
344 struct scale_cluster {
345 const image *accum;
346 const image *certainty;
347 const image *aweight;
348 exclusion *ax_parameters;
350 ale_pos input_scale;
351 const image *input_certainty;
352 const image *input;
354 nl_scale_cluster *nl_scale_clusters;
358 * Check for exclusion region coverage in the reference
359 * array.
361 static int ref_excluded(int i, int j, point offset, exclusion *params, int param_count) {
362 for (int idx = 0; idx < param_count; idx++)
363 if (params[idx].type == exclusion::RENDER
364 && i + offset[0] >= params[idx].x[0]
365 && i + offset[0] <= params[idx].x[1]
366 && j + offset[1] >= params[idx].x[2]
367 && j + offset[1] <= params[idx].x[3])
368 return 1;
370 return 0;
374 * Check for exclusion region coverage in the input
375 * array.
377 static int input_excluded(ale_pos ti, ale_pos tj, exclusion *params, int param_count) {
378 for (int idx = 0; idx < param_count; idx++)
379 if (params[idx].type == exclusion::FRAME
380 && ti >= params[idx].x[0]
381 && ti <= params[idx].x[1]
382 && tj >= params[idx].x[2]
383 && tj <= params[idx].x[3])
384 return 1;
386 return 0;
390 * Overlap function. Determines the number of pixels in areas where
391 * the arrays overlap. Uses the reference array's notion of pixel
392 * positions.
394 static unsigned int overlap(struct scale_cluster c, transformation t, int ax_count) {
395 assert (reference_image);
397 unsigned int result = 0;
399 point offset = c.accum->offset();
401 for (unsigned int i = 0; i < c.accum->height(); i++)
402 for (unsigned int j = 0; j < c.accum->width(); j++) {
404 if (ref_excluded(i, j, offset, c.ax_parameters, ax_count))
405 continue;
408 * Transform
411 struct point q;
413 q = (c.input_scale < 1.0 && interpolant == NULL)
414 ? t.scaled_inverse_transform(
415 point(i + offset[0], j + offset[1]))
416 : t.unscaled_inverse_transform(
417 point(i + offset[0], j + offset[1]));
419 ale_pos ti = q[0];
420 ale_pos tj = q[1];
423 * Check that the transformed coordinates are within
424 * the boundaries of array c.input, and check that the
425 * weight value in the accumulated array is nonzero,
426 * unless we know it is nonzero by virtue of the fact
427 * that it falls within the region of the original
428 * frame (e.g. when we're not increasing image
429 * extents). Also check for frame exclusion.
432 if (input_excluded(ti, tj, c.ax_parameters, ax_count))
433 continue;
435 if (ti >= 0
436 && ti <= c.input->height() - 1
437 && tj >= 0
438 && tj <= c.input->width() - 1
439 && c.certainty->get_pixel(i, j)[0] != 0)
440 result++;
443 return result;
447 * Monte carlo iteration class.
449 * Monte Carlo alignment has been used for statistical comparisons in
450 * spatial registration, and is now used for tonal registration
451 * and final match calculation.
455 * We use a random process for which the expected number of sampled
456 * pixels is +/- .000003 from the coverage in the range [.005,.995] for
457 * an image with 100,000 pixels. (The actual number may still deviate
458 * from the expected number by more than this amount, however.) The
459 * method is as follows:
461 * We have coverage == USE/ALL, or (expected # pixels to use)/(# total
462 * pixels). We derive from this SKIP/USE.
464 * SKIP/USE == (SKIP/ALL)/(USE/ALL) == (1 - (USE/ALL))/(USE/ALL)
466 * Once we have SKIP/USE, we know the expected number of pixels to skip
467 * in each iteration. We use a random selection process that provides
468 * SKIP/USE close to this calculated value.
470 * If we can draw uniformly to select the number of pixels to skip, we
471 * do. In this case, the maximum number of pixels to skip is twice the
472 * expected number.
474 * If we cannot draw uniformly, we still assign equal probability to
475 * each of the integer values in the interval [0, 2 * (SKIP/USE)], but
476 * assign an unequal amount to the integer value ceil(2 * SKIP/USE) +
477 * 1.
481 * When reseeding the random number generator, we want the same set of
482 * pixels to be used in cases where two alignment options are compared.
483 * If we wanted to avoid bias from repeatedly utilizing the same seed,
484 * we could seed with the number of the frame most recently aligned:
486 * srand(latest);
488 * However, in cursory tests, it seems okay to just use the default
489 * seed of 1, and so we do this, since it is simpler; both of these
490 * approaches to reseeding achieve better results than not reseeding.
491 * (1 is the default seed according to the GNU Manual Page for
492 * rand(3).)
494 * For subdomain calculations, we vary the seed by adding the subdomain
495 * index.
498 class mc_iterate {
499 ale_pos mc_max;
500 unsigned int index;
501 unsigned int index_max;
502 int i_min;
503 int i_max;
504 int j_min;
505 int j_max;
507 rng_t rng;
509 public:
510 mc_iterate(int _i_min, int _i_max, int _j_min, int _j_max, unsigned int subdomain)
511 : rng() {
513 ale_pos coverage;
515 i_min = _i_min;
516 i_max = _i_max;
517 j_min = _j_min;
518 j_max = _j_max;
520 if (i_max < i_min || j_max < j_min)
521 index_max = 0;
522 else
523 index_max = (i_max - i_min) * (j_max - j_min);
525 if (index_max < 500 || _mc > 100 || _mc <= 0)
526 coverage = 1;
527 else
528 coverage = _mc / 100;
530 double su = (1 - coverage) / coverage;
532 mc_max = (floor(2*su) * (1 + floor(2*su)) + 2*su)
533 / (2 + 2 * floor(2*su) - 2*su);
535 rng.seed(1 + subdomain);
537 #define FIXED16 3
538 #if ALE_COORDINATES == FIXED16
540 * XXX: This calculation might not yield the correct
541 * expected value.
543 index = -1 + (int) ceil(((ale_pos) mc_max+1)
544 / (ale_pos) ( (1 + 0xffffff)
545 / (1 + (rng.get() & 0xffffff))));
546 #else
547 index = -1 + (int) ceil((ale_accum) (mc_max+1)
548 * ( (1 + ((ale_accum) (rng.get())) )
549 / (1 + ((ale_accum) RAND_MAX)) ));
550 #endif
551 #undef FIXED16
554 int get_i() {
555 return index / (j_max - j_min) + i_min;
558 int get_j() {
559 return index % (j_max - j_min) + j_min;
562 void operator++(int whats_this_for) {
563 #define FIXED16 3
564 #if ALE_COORDINATES == FIXED16
565 index += (int) ceil(((ale_pos) mc_max+1)
566 / (ale_pos) ( (1 + 0xffffff)
567 / (1 + (rng.get() & 0xffffff))));
568 #else
569 index += (int) ceil((ale_accum) (mc_max+1)
570 * ( (1 + ((ale_accum) (rng.get())) )
571 / (1 + ((ale_accum) RAND_MAX)) ));
572 #endif
573 #undef FIXED16
576 int done() {
577 return (index >= index_max);
582 * Not-quite-symmetric difference function. Determines the difference in areas
583 * where the arrays overlap. Uses the reference array's notion of pixel positions.
585 * For the purposes of determining the difference, this function divides each
586 * pixel value by the corresponding image's average pixel magnitude, unless we
587 * are:
589 * a) Extending the boundaries of the image, or
591 * b) following the previous frame's transform
593 * If we are doing monte-carlo pixel sampling for alignment, we
594 * typically sample a subset of available pixels; otherwise, we sample
595 * all pixels.
599 template<class diff_trans>
600 class diff_stat_generic {
602 transformation::elem_bounds_t elem_bounds;
604 struct run {
606 diff_trans offset;
608 ale_accum result;
609 ale_accum divisor;
611 point max, min;
612 ale_accum centroid[2], centroid_divisor;
613 ale_accum de_centroid[2], de_centroid_v, de_sum;
615 void init() {
617 result = 0;
618 divisor = 0;
620 min = point::posinf();
621 max = point::neginf();
623 centroid[0] = 0;
624 centroid[1] = 0;
625 centroid_divisor = 0;
627 de_centroid[0] = 0;
628 de_centroid[1] = 0;
630 de_centroid_v = 0;
632 de_sum = 0;
635 void init(diff_trans _offset) {
636 offset = _offset;
637 init();
641 * Required for STL sanity.
643 run() : offset(diff_trans::eu_identity()) {
644 init();
647 run(diff_trans _offset) : offset(_offset) {
648 init(_offset);
651 void add(const run &_run) {
652 result += _run.result;
653 divisor += _run.divisor;
655 for (int d = 0; d < 2; d++) {
656 if (min[d] > _run.min[d])
657 min[d] = _run.min[d];
658 if (max[d] < _run.max[d])
659 max[d] = _run.max[d];
660 centroid[d] += _run.centroid[d];
661 de_centroid[d] += _run.de_centroid[d];
664 centroid_divisor += _run.centroid_divisor;
665 de_centroid_v += _run.de_centroid_v;
666 de_sum += _run.de_sum;
669 run(const run &_run) : offset(_run.offset) {
672 * Initialize
674 init(_run.offset);
677 * Add
679 add(_run);
682 run &operator=(const run &_run) {
685 * Initialize
687 init(_run.offset);
690 * Add
692 add(_run);
694 return *this;
697 ~run() {
700 ale_accum get_error() const {
701 return pow(result / divisor, 1/(ale_accum) metric_exponent);
704 void sample(int f, scale_cluster c, int i, int j, point t, point u,
705 const run &comparison) {
707 pixel pa = c.accum->get_pixel(i, j);
709 ale_real this_result[2] = { 0, 0 };
710 ale_real this_divisor[2] = { 0, 0 };
712 pixel p[2];
713 pixel weight[2];
714 weight[0] = pixel(1, 1, 1);
715 weight[1] = pixel(1, 1, 1);
717 pixel tm = offset.get_tonal_multiplier(point(i, j) + c.accum->offset());
719 if (interpolant != NULL) {
720 interpolant->filtered(i, j, &p[0], &weight[0], 1, f);
722 if (weight[0].min_norm() > ale_real_weight_floor) {
723 p[0] /= weight[0];
724 } else {
725 return;
728 } else {
729 p[0] = c.input->get_bl(t);
732 p[0] *= tm;
734 if (u.defined()) {
735 p[1] = c.input->get_bl(u);
736 p[1] *= tm;
741 * Handle certainty.
744 if (certainty_weights == 1) {
747 * For speed, use arithmetic interpolation (get_bl(.))
748 * instead of geometric (get_bl(., 1))
751 weight[0] *= c.input_certainty->get_bl(t);
752 if (u.defined())
753 weight[1] *= c.input_certainty->get_bl(u);
754 weight[0] *= c.certainty->get_pixel(i, j);
755 weight[1] *= c.certainty->get_pixel(i, j);
758 if (c.aweight != NULL) {
759 weight[0] *= c.aweight->get_pixel(i, j);
760 weight[1] *= c.aweight->get_pixel(i, j);
764 * Update sampling area statistics
767 if (min[0] > i)
768 min[0] = i;
769 if (min[1] > j)
770 min[1] = j;
771 if (max[0] < i)
772 max[0] = i;
773 if (max[1] < j)
774 max[1] = j;
776 centroid[0] += (weight[0][0] + weight[0][1] + weight[0][2]) * i;
777 centroid[1] += (weight[0][0] + weight[0][1] + weight[0][2]) * j;
778 centroid_divisor += (weight[0][0] + weight[0][1] + weight[0][2]);
781 * Determine alignment type.
784 for (int m = 0; m < (u.defined() ? 2 : 1); m++)
785 if (channel_alignment_type == 0) {
787 * Align based on all channels.
791 for (int k = 0; k < 3; k++) {
792 ale_real achan = pa[k];
793 ale_real bchan = p[m][k];
795 this_result[m] += weight[m][k] * pow(fabs(achan - bchan), metric_exponent);
796 this_divisor[m] += weight[m][k] * pow(achan > bchan ? achan : bchan, metric_exponent);
798 } else if (channel_alignment_type == 1) {
800 * Align based on the green channel.
803 ale_real achan = pa[1];
804 ale_real bchan = p[m][1];
806 this_result[m] = weight[m][1] * pow(fabs(achan - bchan), metric_exponent);
807 this_divisor[m] = weight[m][1] * pow(achan > bchan ? achan : bchan, metric_exponent);
808 } else if (channel_alignment_type == 2) {
810 * Align based on the sum of all channels.
813 ale_real asum = 0;
814 ale_real bsum = 0;
815 ale_real wsum = 0;
817 for (int k = 0; k < 3; k++) {
818 asum += pa[k];
819 bsum += p[m][k];
820 wsum += weight[m][k] / 3;
823 this_result[m] = wsum * pow(fabs(asum - bsum), metric_exponent);
824 this_divisor[m] = wsum * pow(asum > bsum ? asum : bsum, metric_exponent);
827 if (u.defined()) {
828 // ale_real de = fabs(this_result[0] / this_divisor[0]
829 // - this_result[1] / this_divisor[1]);
830 ale_real de = fabs(this_result[0] - this_result[1]);
832 de_centroid[0] += de * (ale_real) i;
833 de_centroid[1] += de * (ale_real) j;
835 de_centroid_v += de * (ale_real) t.lengthto(u);
837 de_sum += de;
840 result += (this_result[0]);
841 divisor += (this_divisor[0]);
844 void rescale(ale_pos scale) {
845 offset.rescale(scale);
847 de_centroid[0] *= scale;
848 de_centroid[1] *= scale;
849 de_centroid_v *= scale;
852 point get_centroid() {
853 point result = point(centroid[0] / centroid_divisor, centroid[1] / centroid_divisor);
855 assert (finite(centroid[0])
856 && finite(centroid[1])
857 && (result.defined() || centroid_divisor == 0));
859 return result;
862 point get_error_centroid() {
863 point result = point(de_centroid[0] / de_sum, de_centroid[1] / de_sum);
864 return result;
868 ale_pos get_error_perturb() {
869 ale_pos result = de_centroid_v / de_sum;
871 return result;
877 * When non-empty, runs.front() is best, runs.back() is
878 * testing.
881 std::vector<run> runs;
884 * old_runs stores the latest available perturbation set for
885 * each multi-alignment element.
888 typedef int run_index;
889 std::map<run_index, run> old_runs;
891 static void *diff_subdomain(void *args);
893 struct subdomain_args {
894 struct scale_cluster c;
895 std::vector<run> runs;
896 int ax_count;
897 int f;
898 int i_min, i_max, j_min, j_max;
899 int subdomain;
902 struct scale_cluster si;
903 int ax_count;
904 int frame;
906 std::vector<ale_pos> perturb_multipliers;
908 public:
909 void diff(struct scale_cluster c, const diff_trans &t,
910 int _ax_count, int f) {
912 if (runs.size() == 2)
913 runs.pop_back();
915 runs.push_back(run(t));
917 si = c;
918 ax_count = _ax_count;
919 frame = f;
921 ui::get()->d2_align_sample_start();
923 if (interpolant != NULL) {
926 * XXX: This has not been tested, and may be completely
927 * wrong.
930 transformation tt = transformation::eu_identity();
931 tt.set_current_element(t);
932 interpolant->set_parameters(tt, c.input, c.accum->offset());
935 int N;
936 #ifdef USE_PTHREAD
937 N = thread::count();
939 pthread_t *threads = (pthread_t *) malloc(sizeof(pthread_t) * N);
940 pthread_attr_t *thread_attr = (pthread_attr_t *) malloc(sizeof(pthread_attr_t) * N);
942 #else
943 N = 1;
944 #endif
946 subdomain_args *args = new subdomain_args[N];
948 transformation::elem_bounds_int_t b = elem_bounds.scale_to_bounds(c.accum->height(), c.accum->width());
950 // fprintf(stdout, "[%d %d] [%d %d]\n",
951 // global_i_min, global_i_max, global_j_min, global_j_max);
953 for (int ti = 0; ti < N; ti++) {
954 args[ti].c = c;
955 args[ti].runs = runs;
956 args[ti].ax_count = ax_count;
957 args[ti].f = f;
958 args[ti].i_min = b.imin + ((b.imax - b.imin) * ti) / N;
959 args[ti].i_max = b.imin + ((b.imax - b.imin) * (ti + 1)) / N;
960 args[ti].j_min = b.jmin;
961 args[ti].j_max = b.jmax;
962 args[ti].subdomain = ti;
963 #ifdef USE_PTHREAD
964 pthread_attr_init(&thread_attr[ti]);
965 pthread_attr_setdetachstate(&thread_attr[ti], PTHREAD_CREATE_JOINABLE);
966 pthread_create(&threads[ti], &thread_attr[ti], diff_subdomain, &args[ti]);
967 #else
968 diff_subdomain(&args[ti]);
969 #endif
972 for (int ti = 0; ti < N; ti++) {
973 #ifdef USE_PTHREAD
974 pthread_join(threads[ti], NULL);
975 #endif
976 runs.back().add(args[ti].runs.back());
979 #ifdef USE_PTHREAD
980 free(threads);
981 free(thread_attr);
982 #endif
984 delete[] args;
986 ui::get()->d2_align_sample_stop();
990 private:
991 void rediff() {
992 std::vector<diff_trans> t_array;
994 for (unsigned int r = 0; r < runs.size(); r++) {
995 t_array.push_back(runs[r].offset);
998 runs.clear();
1000 for (unsigned int r = 0; r < t_array.size(); r++)
1001 diff(si, t_array[r], ax_count, frame);
1005 public:
1006 int better() {
1007 assert(runs.size() >= 2);
1008 assert(runs[0].offset.scale() == runs[1].offset.scale());
1010 return (runs[1].get_error() < runs[0].get_error()
1011 || (!finite(runs[0].get_error()) && finite(runs[1].get_error())));
1014 int better_defined() {
1015 assert(runs.size() >= 2);
1016 assert(runs[0].offset.scale() == runs[1].offset.scale());
1018 return (runs[1].get_error() < runs[0].get_error());
1021 diff_stat_generic(transformation::elem_bounds_t e)
1022 : runs(), old_runs(), perturb_multipliers() {
1023 elem_bounds = e;
1026 run_index get_run_index(unsigned int perturb_index) {
1027 return perturb_index;
1030 run &get_run(unsigned int perturb_index) {
1031 run_index index = get_run_index(perturb_index);
1033 assert(old_runs.count(index));
1034 return old_runs[index];
1037 void rescale(ale_pos scale, scale_cluster _si) {
1038 assert(runs.size() == 1);
1040 si = _si;
1042 runs[0].rescale(scale);
1044 rediff();
1047 ~diff_stat_generic() {
1050 diff_stat_generic &operator=(const diff_stat_generic &dst) {
1052 * Copy run information.
1054 runs = dst.runs;
1055 old_runs = dst.old_runs;
1058 * Copy diff variables
1060 si = dst.si;
1061 ax_count = dst.ax_count;
1062 frame = dst.frame;
1063 perturb_multipliers = dst.perturb_multipliers;
1064 elem_bounds = dst.elem_bounds;
1066 return *this;
1069 diff_stat_generic(const diff_stat_generic &dst) : runs(), old_runs(),
1070 perturb_multipliers() {
1071 operator=(dst);
1074 void set_elem_bounds(transformation::elem_bounds_t e) {
1075 elem_bounds = e;
1078 ale_accum get_result() {
1079 assert(runs.size() == 1);
1080 return runs[0].result;
1083 ale_accum get_divisor() {
1084 assert(runs.size() == 1);
1085 return runs[0].divisor;
1088 diff_trans get_offset() {
1089 assert(runs.size() == 1);
1090 return runs[0].offset;
1093 int operator!=(diff_stat_generic &param) {
1094 return (get_error() != param.get_error());
1097 int operator==(diff_stat_generic &param) {
1098 return !(operator!=(param));
1101 ale_pos get_error_perturb() {
1102 assert(runs.size() == 1);
1103 return runs[0].get_error_perturb();
1106 ale_accum get_error() const {
1107 assert(runs.size() == 1);
1108 return runs[0].get_error();
1111 public:
1113 * Get the set of transformations produced by a given perturbation
1115 void get_perturb_set(std::vector<diff_trans> *set,
1116 ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
1117 ale_pos *current_bd, ale_pos *modified_bd,
1118 std::vector<ale_pos> multipliers = std::vector<ale_pos>()) {
1120 assert(runs.size() == 1);
1122 diff_trans test_t(diff_trans::eu_identity());
1125 * Translational or euclidean transformation
1128 for (unsigned int i = 0; i < 2; i++)
1129 for (unsigned int s = 0; s < 2; s++) {
1131 if (!multipliers.size())
1132 multipliers.push_back(1);
1134 assert(finite(multipliers[0]));
1136 test_t = get_offset();
1138 // test_t.eu_modify(i, (s ? -adj_p : adj_p) * multipliers[0]);
1139 test_t.translate((i ? point(1, 0) : point(0, 1))
1140 * (s ? -adj_p : adj_p)
1141 * multipliers[0]);
1143 test_t.snap(adj_p / 2);
1145 set->push_back(test_t);
1146 multipliers.erase(multipliers.begin());
1150 if (alignment_class > 0)
1151 for (unsigned int s = 0; s < 2; s++) {
1153 if (!multipliers.size())
1154 multipliers.push_back(1);
1156 assert(multipliers.size());
1157 assert(finite(multipliers[0]));
1159 if (!(adj_o * multipliers[0] < rot_max))
1160 return;
1162 ale_pos adj_s = (s ? 1 : -1) * adj_o * multipliers[0];
1164 test_t = get_offset();
1166 run_index ori = get_run_index(set->size());
1167 point centroid = point::undefined();
1169 if (!old_runs.count(ori))
1170 ori = get_run_index(0);
1172 if (!centroid.finite() && old_runs.count(ori)) {
1173 centroid = old_runs[ori].get_error_centroid();
1175 if (!centroid.finite())
1176 centroid = old_runs[ori].get_centroid();
1178 centroid *= test_t.scale()
1179 / old_runs[ori].offset.scale();
1182 if (!centroid.finite() && !test_t.is_projective()) {
1183 test_t.eu_modify(2, adj_s);
1184 } else if (!centroid.finite()) {
1185 centroid = point(si.input->height() / 2,
1186 si.input->width() / 2);
1188 test_t.rotate(centroid + si.accum->offset(),
1189 adj_s);
1190 } else {
1191 test_t.rotate(centroid + si.accum->offset(),
1192 adj_s);
1195 test_t.snap(adj_p / 2);
1197 set->push_back(test_t);
1198 multipliers.erase(multipliers.begin());
1201 if (alignment_class == 2) {
1204 * Projective transformation
1207 for (unsigned int i = 0; i < 4; i++)
1208 for (unsigned int j = 0; j < 2; j++)
1209 for (unsigned int s = 0; s < 2; s++) {
1211 if (!multipliers.size())
1212 multipliers.push_back(1);
1214 assert(multipliers.size());
1215 assert(finite(multipliers[0]));
1217 ale_pos adj_s = (s ? -1 : 1) * adj_p * multipliers [0];
1219 test_t = get_offset();
1221 if (perturb_type == 0)
1222 test_t.gpt_modify_bounded(j, i, adj_s, elem_bounds.scale_to_bounds(si.accum->height(), si.accum->width()));
1223 else if (perturb_type == 1)
1224 test_t.gr_modify(j, i, adj_s);
1225 else
1226 assert(0);
1228 test_t.snap(adj_p / 2);
1230 set->push_back(test_t);
1231 multipliers.erase(multipliers.begin());
1237 * Barrel distortion
1240 if (bda_mult != 0 && adj_b != 0) {
1242 for (unsigned int d = 0; d < get_offset().bd_count(); d++)
1243 for (unsigned int s = 0; s < 2; s++) {
1245 if (!multipliers.size())
1246 multipliers.push_back(1);
1248 assert (multipliers.size());
1249 assert (finite(multipliers[0]));
1251 ale_pos adj_s = (s ? -1 : 1) * adj_b * multipliers[0];
1253 if (bda_rate > 0 && fabs(modified_bd[d] + adj_s - current_bd[d]) > bda_rate)
1254 continue;
1256 diff_trans test_t = get_offset();
1258 test_t.bd_modify(d, adj_s);
1260 set->push_back(test_t);
1265 void confirm() {
1266 assert(runs.size() == 2);
1267 runs[0] = runs[1];
1268 runs.pop_back();
1271 void discard() {
1272 assert(runs.size() == 2);
1273 runs.pop_back();
1276 void perturb_test(ale_pos perturb, ale_pos adj_p, ale_pos adj_o, ale_pos adj_b,
1277 ale_pos *current_bd, ale_pos *modified_bd, int stable) {
1279 assert(runs.size() == 1);
1281 std::vector<diff_trans> t_set;
1283 if (perturb_multipliers.size() == 0) {
1284 get_perturb_set(&t_set, adj_p, adj_o, adj_b,
1285 current_bd, modified_bd);
1287 for (unsigned int i = 0; i < t_set.size(); i++) {
1288 diff_stat_generic test = *this;
1290 test.diff(si, t_set[i], ax_count, frame);
1292 test.confirm();
1294 if (finite(test.get_error_perturb()))
1295 perturb_multipliers.push_back(adj_p / test.get_error_perturb());
1296 else
1297 perturb_multipliers.push_back(1);
1301 t_set.clear();
1304 get_perturb_set(&t_set, adj_p, adj_o, adj_b, current_bd, modified_bd,
1305 perturb_multipliers);
1307 int found_unreliable = 1;
1308 std::vector<int> tested(t_set.size(), 0);
1310 for (unsigned int i = 0; i < t_set.size(); i++) {
1311 run_index ori = get_run_index(i);
1314 * Check for stability
1316 if (stable
1317 && old_runs.count(ori)
1318 && old_runs[ori].offset == t_set[i])
1319 tested[i] = 1;
1322 std::vector<ale_pos> perturb_multipliers_original = perturb_multipliers;
1324 while (found_unreliable) {
1326 found_unreliable = 0;
1328 for (unsigned int i = 0; i < t_set.size(); i++) {
1330 if (tested[i])
1331 continue;
1333 diff(si, t_set[i], ax_count, frame);
1335 if (!(i < perturb_multipliers.size())
1336 || !finite(perturb_multipliers[i])) {
1338 perturb_multipliers.resize(i + 1);
1340 if (finite(perturb_multipliers[i])
1341 && finite(adj_p)
1342 && finite(adj_p / runs[1].get_error_perturb())) {
1343 perturb_multipliers[i] =
1344 adj_p / runs[1].get_error_perturb();
1346 found_unreliable = 1;
1347 } else
1348 perturb_multipliers[i] = 1;
1350 continue;
1353 run_index ori = get_run_index(i);
1355 if (old_runs.count(ori) == 0)
1356 old_runs.insert(std::pair<run_index, run>(ori, runs[1]));
1357 else
1358 old_runs[ori] = runs[1];
1360 if (finite(perturb_multipliers_original[i])
1361 && finite(runs[1].get_error_perturb())
1362 && finite(adj_p)
1363 && finite(perturb_multipliers_original[i] * adj_p / runs[1].get_error_perturb()))
1364 perturb_multipliers[i] = perturb_multipliers_original[i]
1365 * adj_p / runs[1].get_error_perturb();
1366 else
1367 perturb_multipliers[i] = 1;
1369 tested[i] = 1;
1371 if (better()
1372 && runs[1].get_error() < runs[0].get_error()
1373 && perturb_multipliers[i]
1374 / perturb_multipliers_original[i] < 2) {
1375 runs[0] = runs[1];
1376 runs.pop_back();
1377 return;
1382 if (runs.size() > 1)
1383 runs.pop_back();
1385 if (!found_unreliable)
1386 return;
1392 typedef diff_stat_generic<trans_single> diff_stat_t;
1393 typedef diff_stat_generic<trans_multi> diff_stat_multi;
1397 * Adjust exposure for an aligned frame B against reference A.
1399 * Expects full-LOD images.
1401 * Note: This method does not use any weighting, by certainty or
1402 * otherwise, in the first exposure registration pass, as any bias of
1403 * weighting according to color may also bias the exposure registration
1404 * result; it does use weighting, including weighting by certainty
1405 * (even if certainty weighting is not specified), in the second pass,
1406 * under the assumption that weighting by certainty improves handling
1407 * of out-of-range highlights, and that bias of exposure measurements
1408 * according to color may generally be less harmful after spatial
1409 * registration has been performed.
1411 class exposure_ratio_iterate : public thread::decompose_domain {
1412 pixel_accum *asums;
1413 pixel_accum *bsums;
1414 pixel_accum *asum;
1415 pixel_accum *bsum;
1416 struct scale_cluster c;
1417 const transformation &t;
1418 int ax_count;
1419 int pass_number;
1420 protected:
1421 void prepare_subdomains(unsigned int N) {
1422 asums = new pixel_accum[N];
1423 bsums = new pixel_accum[N];
1425 void subdomain_algorithm(unsigned int thread,
1426 int i_min, int i_max, int j_min, int j_max) {
1428 point offset = c.accum->offset();
1430 for (mc_iterate m(i_min, i_max, j_min, j_max, thread); !m.done(); m++) {
1432 unsigned int i = (unsigned int) m.get_i();
1433 unsigned int j = (unsigned int) m.get_j();
1435 if (ref_excluded(i, j, offset, c.ax_parameters, ax_count))
1436 continue;
1439 * Transform
1442 struct point q;
1444 q = (c.input_scale < 1.0 && interpolant == NULL)
1445 ? t.scaled_inverse_transform(
1446 point(i + offset[0], j + offset[1]))
1447 : t.unscaled_inverse_transform(
1448 point(i + offset[0], j + offset[1]));
1451 * Check that the transformed coordinates are within
1452 * the boundaries of array c.input, that they are not
1453 * subject to exclusion, and that the weight value in
1454 * the accumulated array is nonzero.
1457 if (input_excluded(q[0], q[1], c.ax_parameters, ax_count))
1458 continue;
1460 if (q[0] >= 0
1461 && q[0] <= c.input->height() - 1
1462 && q[1] >= 0
1463 && q[1] <= c.input->width() - 1
1464 && ((pixel) c.certainty->get_pixel(i, j)).minabs_norm() != 0) {
1465 pixel a = c.accum->get_pixel(i, j);
1466 pixel b;
1468 b = c.input->get_bl(q);
1470 pixel weight = ((c.aweight && pass_number)
1471 ? (pixel) c.aweight->get_pixel(i, j)
1472 : pixel(1, 1, 1))
1473 * (pass_number
1474 ? psqrt(c.certainty->get_pixel(i, j)
1475 * c.input_certainty->get_bl(q, 1))
1476 : pixel(1, 1, 1));
1478 asums[thread] += a * weight;
1479 bsums[thread] += b * weight;
1484 void finish_subdomains(unsigned int N) {
1485 for (unsigned int n = 0; n < N; n++) {
1486 *asum += asums[n];
1487 *bsum += bsums[n];
1489 delete[] asums;
1490 delete[] bsums;
1492 public:
1493 exposure_ratio_iterate(pixel_accum *_asum,
1494 pixel_accum *_bsum,
1495 struct scale_cluster _c,
1496 const transformation &_t,
1497 int _ax_count,
1498 int _pass_number) : decompose_domain(0, _c.accum->height(),
1499 0, _c.accum->width()),
1500 t(_t) {
1502 asum = _asum;
1503 bsum = _bsum;
1504 c = _c;
1505 ax_count = _ax_count;
1506 pass_number = _pass_number;
1509 exposure_ratio_iterate(pixel_accum *_asum,
1510 pixel_accum *_bsum,
1511 struct scale_cluster _c,
1512 const transformation &_t,
1513 int _ax_count,
1514 int _pass_number,
1515 transformation::elem_bounds_int_t b) : decompose_domain(b.imin, b.imax,
1516 b.jmin, b.jmax),
1517 t(_t) {
1519 asum = _asum;
1520 bsum = _bsum;
1521 c = _c;
1522 ax_count = _ax_count;
1523 pass_number = _pass_number;
1527 static void set_exposure_ratio(unsigned int m, struct scale_cluster c,
1528 const transformation &t, int ax_count, int pass_number) {
1530 if (_exp_register == 2) {
1532 * Use metadata only.
1534 ale_real gain_multiplier = image_rw::exp(m).get_gain_multiplier();
1535 pixel multiplier = pixel(gain_multiplier, gain_multiplier, gain_multiplier);
1537 image_rw::exp(m).set_multiplier(multiplier);
1538 ui::get()->exp_multiplier(multiplier[0],
1539 multiplier[1],
1540 multiplier[2]);
1542 return;
1545 pixel_accum asum(0, 0, 0), bsum(0, 0, 0);
1547 exposure_ratio_iterate eri(&asum, &bsum, c, t, ax_count, pass_number);
1548 eri.run();
1550 // std::cerr << (asum / bsum) << " ";
1552 pixel_accum new_multiplier;
1554 new_multiplier = asum / bsum * image_rw::exp(m).get_multiplier();
1556 if (finite(new_multiplier[0])
1557 && finite(new_multiplier[1])
1558 && finite(new_multiplier[2])) {
1559 image_rw::exp(m).set_multiplier(new_multiplier);
1560 ui::get()->exp_multiplier(new_multiplier[0],
1561 new_multiplier[1],
1562 new_multiplier[2]);
1567 * Copy all ax parameters.
1569 static exclusion *copy_ax_parameters(int local_ax_count, exclusion *source) {
1571 exclusion *dest = (exclusion *) malloc(local_ax_count * sizeof(exclusion));
1573 assert (dest);
1575 if (!dest)
1576 ui::get()->memory_error("exclusion regions");
1578 for (int idx = 0; idx < local_ax_count; idx++)
1579 dest[idx] = source[idx];
1581 return dest;
1585 * Copy ax parameters according to frame.
1587 static exclusion *filter_ax_parameters(int frame, int *local_ax_count) {
1589 exclusion *dest = (exclusion *) malloc(ax_count * sizeof(exclusion));
1591 assert (dest);
1593 if (!dest)
1594 ui::get()->memory_error("exclusion regions");
1596 *local_ax_count = 0;
1598 for (int idx = 0; idx < ax_count; idx++) {
1599 if (ax_parameters[idx].x[4] > frame
1600 || ax_parameters[idx].x[5] < frame)
1601 continue;
1603 dest[*local_ax_count] = ax_parameters[idx];
1605 (*local_ax_count)++;
1608 return dest;
1611 static void scale_ax_parameters(int local_ax_count, exclusion *ax_parameters,
1612 ale_pos ref_scale, ale_pos input_scale) {
1613 for (int i = 0; i < local_ax_count; i++) {
1614 ale_pos scale = (ax_parameters[i].type == exclusion::RENDER)
1615 ? ref_scale
1616 : input_scale;
1618 for (int n = 0; n < 6; n++) {
1619 ax_parameters[i].x[n] = ax_parameters[i].x[n] * scale;
1625 * Prepare the next level of detail for ordinary images.
1627 static const image *prepare_lod(const image *current) {
1628 if (current == NULL)
1629 return NULL;
1631 return current->scale_by_half("prepare_lod");
1635 * Prepare the next level of detail for definition maps.
1637 static const image *prepare_lod_def(const image *current) {
1638 if (current == NULL)
1639 return NULL;
1641 return current->defined_scale_by_half("prepare_lod_def");
1645 * Initialize scale cluster data structures.
1648 static void init_nl_cluster(struct scale_cluster *sc) {
1652 * Destroy the first element in the scale cluster data structure.
1654 static void final_clusters(struct scale_cluster *scale_clusters, ale_pos scale_factor,
1655 unsigned int steps) {
1657 if (scale_clusters[0].input_scale < 1.0) {
1658 delete scale_clusters[0].input;
1661 delete scale_clusters[0].input_certainty;
1663 free((void *)scale_clusters[0].ax_parameters);
1665 for (unsigned int step = 1; step < steps; step++) {
1666 delete scale_clusters[step].accum;
1667 delete scale_clusters[step].certainty;
1668 delete scale_clusters[step].aweight;
1670 if (scale_clusters[step].input_scale < 1.0) {
1671 delete scale_clusters[step].input;
1672 delete scale_clusters[step].input_certainty;
1675 free((void *)scale_clusters[step].ax_parameters);
1678 free(scale_clusters);
1682 * Calculate the centroid of a control point for the set of frames
1683 * having index lower than m. Divide by any scaling of the output.
1685 static point unscaled_centroid(unsigned int m, unsigned int p) {
1686 assert(_keep);
1688 point point_sum(0, 0);
1689 ale_accum divisor = 0;
1691 for(unsigned int j = 0; j < m; j++) {
1692 point pp = cp_array[p][j];
1694 if (pp.defined()) {
1695 point_sum += kept_t[j].transform_unscaled(pp)
1696 / kept_t[j].scale();
1697 divisor += 1;
1701 if (divisor == 0)
1702 return point::undefined();
1704 return point_sum / divisor;
1708 * Calculate centroid of this frame, and of all previous frames,
1709 * from points common to both sets.
1711 static void centroids(unsigned int m, point *current, point *previous) {
1713 * Calculate the translation
1715 point other_centroid(0, 0);
1716 point this_centroid(0, 0);
1717 ale_pos divisor = 0;
1719 for (unsigned int i = 0; i < cp_count; i++) {
1720 point other_c = unscaled_centroid(m, i);
1721 point this_c = cp_array[i][m];
1723 if (!other_c.defined() || !this_c.defined())
1724 continue;
1726 other_centroid += other_c;
1727 this_centroid += this_c;
1728 divisor += 1;
1732 if (divisor == 0) {
1733 *current = point::undefined();
1734 *previous = point::undefined();
1735 return;
1738 *current = this_centroid / divisor;
1739 *previous = other_centroid / divisor;
1743 * Calculate the RMS error of control points for frame m, with
1744 * transformation t, against control points for earlier frames.
1746 static ale_pos cp_rms_error(unsigned int m, transformation t) {
1747 assert (_keep);
1749 ale_accum err = 0;
1750 ale_accum divisor = 0;
1752 for (unsigned int i = 0; i < cp_count; i++)
1753 for (unsigned int j = 0; j < m; j++) {
1754 const point *p = cp_array[i];
1755 point p_ref = kept_t[j].transform_unscaled(p[j]);
1756 point p_cur = t.transform_unscaled(p[m]);
1758 if (!p_ref.defined() || !p_cur.defined())
1759 continue;
1761 err += p_ref.lengthtosq(p_cur);
1762 divisor += 1;
1765 return (ale_pos) sqrt(err / divisor);
1769 static void test_global(diff_stat_t *here, scale_cluster si, transformation t,
1770 int local_ax_count, int m, ale_accum local_gs_mo, ale_pos perturb) {
1772 diff_stat_t test(*here);
1774 test.diff(si, t.get_current_element(), local_ax_count, m);
1776 unsigned int ovl = overlap(si, t, local_ax_count);
1778 if (ovl >= local_gs_mo && test.better()) {
1779 test.confirm();
1780 *here = test;
1781 ui::get()->set_match(here->get_error());
1782 ui::get()->set_offset(here->get_offset());
1783 } else {
1784 test.discard();
1790 * Get the set of global transformations for a given density
1792 static void test_globals(diff_stat_t *here,
1793 scale_cluster si, transformation t, int local_gs, ale_pos adj_p,
1794 int local_ax_count, int m, ale_accum local_gs_mo, ale_pos perturb) {
1796 transformation offset = t;
1798 point min, max;
1800 transformation offset_p = offset;
1802 if (!offset_p.is_projective())
1803 offset_p.eu_to_gpt();
1805 min = max = offset_p.gpt_get(0);
1806 for (int p_index = 1; p_index < 4; p_index++) {
1807 point p = offset_p.gpt_get(p_index);
1808 if (p[0] < min[0])
1809 min[0] = p[0];
1810 if (p[1] < min[1])
1811 min[1] = p[1];
1812 if (p[0] > max[0])
1813 max[0] = p[0];
1814 if (p[1] > max[1])
1815 max[1] = p[1];
1818 point inner_min_t = -min;
1819 point inner_max_t = -max + point(si.accum->height(), si.accum->width());
1820 point outer_min_t = -max + point(adj_p - 1, adj_p - 1);
1821 point outer_max_t = point(si.accum->height(), si.accum->width()) - point(adj_p, adj_p);
1823 if (local_gs == 1 || local_gs == 3 || local_gs == 4 || local_gs == 6) {
1826 * Inner
1829 for (ale_pos i = inner_min_t[0]; i <= inner_max_t[0]; i += adj_p)
1830 for (ale_pos j = inner_min_t[1]; j <= inner_max_t[1]; j += adj_p) {
1831 transformation test_t = offset;
1832 test_t.translate(point(i, j));
1833 test_global(here, si, test_t, local_ax_count, m, local_gs_mo, perturb);
1837 if (local_gs == 2 || local_gs == 3 || local_gs == -1 || local_gs == 6) {
1840 * Outer
1843 for (ale_pos i = outer_min_t[0]; i <= outer_max_t[0]; i += adj_p)
1844 for (ale_pos j = outer_min_t[1]; j < inner_min_t[1]; j += adj_p) {
1845 transformation test_t = offset;
1846 test_t.translate(point(i, j));
1847 test_global(here, si, test_t, local_ax_count, m, local_gs_mo, perturb);
1849 for (ale_pos i = outer_min_t[0]; i <= outer_max_t[0]; i += adj_p)
1850 for (ale_pos j = outer_max_t[1]; j > inner_max_t[1]; j -= adj_p) {
1851 transformation test_t = offset;
1852 test_t.translate(point(i, j));
1853 test_global(here, si, test_t, local_ax_count, m, local_gs_mo, perturb);
1855 for (ale_pos i = outer_min_t[0]; i < inner_min_t[0]; i += adj_p)
1856 for (ale_pos j = outer_min_t[1]; j <= outer_max_t[1]; j += adj_p) {
1857 transformation test_t = offset;
1858 test_t.translate(point(i, j));
1859 test_global(here, si, test_t, local_ax_count, m, local_gs_mo, perturb);
1861 for (ale_pos i = outer_max_t[0]; i > inner_max_t[0]; i -= adj_p)
1862 for (ale_pos j = outer_min_t[1]; j <= outer_max_t[1]; j += adj_p) {
1863 transformation test_t = offset;
1864 test_t.translate(point(i, j));
1865 test_global(here, si, test_t, local_ax_count, m, local_gs_mo, perturb);
1870 static void get_translational_set(std::vector<transformation> *set,
1871 transformation t, ale_pos adj_p) {
1873 ale_pos adj_s;
1875 transformation offset = t;
1876 transformation test_t(transformation::eu_identity());
1878 for (int i = 0; i < 2; i++)
1879 for (adj_s = -adj_p; adj_s <= adj_p; adj_s += 2 * adj_p) {
1881 test_t = offset;
1883 test_t.translate(i ? point(adj_s, 0) : point(0, adj_s));
1885 set->push_back(test_t);
1889 static int threshold_ok(ale_accum error) {
1890 if ((1 - error) * (ale_accum) 100 >= match_threshold)
1891 return 1;
1893 if (!(match_threshold >= 0))
1894 return 1;
1896 return 0;
1900 * Check for satisfaction of the certainty threshold.
1902 static int ma_cert_satisfied(const scale_cluster &c, const transformation &t, unsigned int i) {
1903 transformation::elem_bounds_int_t b = t.elem_bounds().scale_to_bounds(c.accum->height(), c.accum->width());
1904 ale_accum sum[3] = {0, 0, 0};
1906 for (unsigned int ii = b.imin; ii < b.imax; ii++)
1907 for (unsigned int jj = b.jmin; jj < b.jmax; jj++) {
1908 pixel p = c.accum->get_pixel(ii, jj);
1909 sum[0] += p[0];
1910 sum[1] += p[1];
1911 sum[2] += p[2];
1914 unsigned int count = (b.jmax - b.jmin) * (b.imax - b.imin);
1916 for (int k = 0; k < 3; k++)
1917 if (sum[k] / count < _ma_cert)
1918 return 0;
1920 return 1;
1923 static diff_stat_t check_ancestor_path(const trans_multi &offset, const scale_cluster &si, diff_stat_t here, int local_ax_count, int frame) {
1925 if (offset.get_current_index() > 0)
1926 for (int i = offset.parent_index(offset.get_current_index()); i >= 0; i = (i > 0) ? (int) offset.parent_index(i) : -1) {
1927 trans_single t = offset.get_element(i);
1928 t.rescale(offset.get_current_element().scale());
1930 here.diff(si, t, local_ax_count, frame);
1932 if (here.better_defined())
1933 here.confirm();
1934 else
1935 here.discard();
1938 return here;
1941 #ifdef USE_FFTW
1943 * High-pass filter for frequency weights
1945 static void hipass(int rows, int cols, fftw_complex *inout) {
1946 for (int i = 0; i < rows * vert_freq_cut; i++)
1947 for (int j = 0; j < cols; j++)
1948 for (int k = 0; k < 2; k++)
1949 inout[i * cols + j][k] = 0;
1950 for (int i = 0; i < rows; i++)
1951 for (int j = 0; j < cols * horiz_freq_cut; j++)
1952 for (int k = 0; k < 2; k++)
1953 inout[i * cols + j][k] = 0;
1954 for (int i = 0; i < rows; i++)
1955 for (int j = 0; j < cols; j++)
1956 for (int k = 0; k < 2; k++)
1957 if (i / (double) rows + j / (double) cols < 2 * avg_freq_cut)
1958 inout[i * cols + j][k] = 0;
1960 #endif
1964 * Reset alignment weights
1966 static void reset_weights() {
1967 if (alignment_weights != NULL)
1968 ale_image_release(alignment_weights);
1970 alignment_weights = NULL;
1974 * Initialize alignment weights
1976 static void init_weights() {
1977 if (alignment_weights != NULL)
1978 return;
1980 alignment_weights = ale_new_image(accel::context(), ALE_IMAGE_RGB, ale_image_get_type(reference_image));
1982 assert (alignment_weights);
1984 assert (!ale_resize_image(alignment_weights, 0, 0, ale_image_get_width(reference_image), ale_image_get_height(reference_image)));
1986 ale_image_map_0(alignment_weights, "SET_PIXEL(p, PIXEL(1, 1, 1))");
1990 * Update alignment weights with weight map
1992 static void map_update() {
1994 if (weight_map == NULL)
1995 return;
1997 init_weights();
1999 ale_image_map_2(alignment_weights, alignment_weights, weight_map, " \
2000 SET_PIXEL(p, GET_PIXEL(0, p) * GET_PIXEL_BG(1, p))");
2004 * Update alignment weights with algorithmic weights
2006 static void wmx_update() {
2007 #ifdef USE_UNIX
2009 static exposure *exp_def = new exposure_default();
2010 static exposure *exp_bool = new exposure_boolean();
2012 if (wmx_file == NULL || wmx_exec == NULL || wmx_defs == NULL)
2013 return;
2015 unsigned int rows = ale_image_get_height(reference_image);
2016 unsigned int cols = ale_image_get_width(reference_image);
2018 image_rw::write_image(wmx_file, reference_image);
2019 image_rw::write_image(wmx_defs, reference_image, exp_bool->get_gamma(), 0, 0, 1);
2021 /* execute ... */
2022 int exit_status = 1;
2023 if (!fork()) {
2024 execlp(wmx_exec, wmx_exec, wmx_file, wmx_defs, NULL);
2025 ui::get()->exec_failure(wmx_exec, wmx_file, wmx_defs);
2028 wait(&exit_status);
2030 if (exit_status)
2031 ui::get()->fork_failure("d2::align");
2033 ale_image wmx_weights = image_rw::read_image(wmx_file, exp_def);
2035 ale_image_set_x_offset(wmx_weights, ale_image_get_x_offset(reference_image));
2036 ale_image_set_y_offset(wmx_weights, ale_image_get_y_offset(reference_image));
2038 if (ale_image_get_height(wmx_weights) != rows || ale_image_get_width(wmx_weights) != cols)
2039 ui::get()->error("algorithmic weighting must not change image size");
2041 if (alignment_weights == NULL)
2042 alignment_weights = wmx_weights;
2043 else
2044 ale_image_map_2(alignment_weights, alignment_weights, wmx_weights, "\
2045 SET_PIXEL(p, GET_PIXEL(0, p) * GET_PIXEL(1, p))");
2046 #endif
2050 * Update alignment weights with frequency weights
2052 static void fw_update() {
2053 #ifdef USE_FFTW
2054 if (horiz_freq_cut == 0
2055 && vert_freq_cut == 0
2056 && avg_freq_cut == 0)
2057 return;
2060 * Required for correct operation of --fwshow
2063 assert (alignment_weights == NULL);
2065 int rows = reference_image->height();
2066 int cols = reference_image->width();
2067 int colors = reference_image->depth();
2069 alignment_weights = new_image_ale_real(rows, cols,
2070 colors, "alignment_weights");
2072 fftw_complex *inout = (fftw_complex *) fftw_malloc(sizeof(fftw_complex) * rows * cols);
2074 assert (inout);
2076 fftw_plan pf = fftw_plan_dft_2d(rows, cols,
2077 inout, inout,
2078 FFTW_FORWARD, FFTW_ESTIMATE);
2080 fftw_plan pb = fftw_plan_dft_2d(rows, cols,
2081 inout, inout,
2082 FFTW_BACKWARD, FFTW_ESTIMATE);
2084 for (int k = 0; k < colors; k++) {
2085 for (int i = 0; i < rows * cols; i++) {
2086 inout[i][0] = reference_image->get_pixel(i / cols, i % cols)[k];
2087 inout[i][1] = 0;
2090 fftw_execute(pf);
2091 hipass(rows, cols, inout);
2092 fftw_execute(pb);
2094 for (int i = 0; i < rows * cols; i++) {
2095 #if 0
2096 alignment_weights->pix(i / cols, i % cols)[k] = fabs(inout[i][0] / (rows * cols));
2097 #else
2098 alignment_weights->set_chan(i / cols, i % cols, k,
2099 sqrt(pow(inout[i][0] / (rows * cols), 2)
2100 + pow(inout[i][1] / (rows * cols), 2)));
2101 #endif
2105 fftw_destroy_plan(pf);
2106 fftw_destroy_plan(pb);
2107 fftw_free(inout);
2109 if (fw_output != NULL)
2110 image_rw::write_image(fw_output, alignment_weights);
2111 #endif
2115 * Update alignment to frame N.
2117 static void update_to(int n) {
2119 assert (n <= latest + 1);
2120 assert (n >= 0);
2122 ale_align_properties astate = align_properties();
2124 ui::get()->set_frame_num(n);
2126 if (latest < 0) {
2129 * Handle the initial frame
2132 astate.set_input_frame(image_rw::open(n));
2134 const image *i = astate.get_input_frame();
2135 int is_default;
2136 transformation result = alignment_class == 2
2137 ? transformation::gpt_identity(i, scale_factor)
2138 : transformation::eu_identity(i, scale_factor);
2139 result = tload_first(tload, alignment_class == 2, result, &is_default);
2140 tsave_first(tsave, result, alignment_class == 2);
2142 if (_keep > 0) {
2143 kept_t = transformation::new_eu_identity_array(image_rw::count());
2144 kept_ok = (int *) malloc(image_rw::count()
2145 * sizeof(int));
2146 assert (kept_t);
2147 assert (kept_ok);
2149 if (!kept_t || !kept_ok)
2150 ui::get()->memory_error("alignment");
2152 kept_ok[0] = 1;
2153 kept_t[0] = result;
2156 latest = 0;
2157 latest_ok = 1;
2158 latest_t = result;
2160 astate.set_default(result);
2161 orig_t = result;
2163 image_rw::close(n);
2166 for (int i = latest + 1; i <= n; i++) {
2169 * Handle supplemental frames.
2172 assert (reference != NULL);
2174 ui::get()->set_arender_current();
2175 reference->sync(i - 1);
2176 ui::get()->clear_arender_current();
2177 reference_image = reference->get_image();
2178 reference_defined = reference->get_defined();
2180 if (i == 1)
2181 astate.default_set_original_bounds(reference_image);
2183 reset_weights();
2184 fw_update();
2185 wmx_update();
2186 map_update();
2188 assert (reference_image != NULL);
2189 assert (reference_defined != NULL);
2191 astate.set_input_frame(image_rw::open(i));
2193 _align(i, _gs, &astate);
2195 image_rw::close(n);
2199 public:
2202 * Set the control point count
2204 static void set_cp_count(unsigned int n) {
2205 assert (cp_array == NULL);
2207 cp_count = n;
2208 cp_array = (const point **) malloc(n * sizeof(const point *));
2212 * Set control points.
2214 static void set_cp(unsigned int i, const point *p) {
2215 cp_array[i] = p;
2219 * Register exposure
2221 static void exp_register() {
2222 _exp_register = 1;
2226 * Register exposure only based on metadata
2228 static void exp_meta_only() {
2229 _exp_register = 2;
2233 * Don't register exposure
2235 static void exp_noregister() {
2236 _exp_register = 0;
2240 * Set alignment class to translation only. Only adjust the x and y
2241 * position of images. Do not rotate input images or perform
2242 * projective transformations.
2244 static void class_translation() {
2245 alignment_class = 0;
2249 * Set alignment class to Euclidean transformations only. Adjust the x
2250 * and y position of images and the orientation of the image about the
2251 * image center.
2253 static void class_euclidean() {
2254 alignment_class = 1;
2258 * Set alignment class to perform general projective transformations.
2259 * See the file gpt.h for more information about general projective
2260 * transformations.
2262 static void class_projective() {
2263 alignment_class = 2;
2267 * Set the default initial alignment to the identity transformation.
2269 static void initial_default_identity() {
2270 default_initial_alignment_type = 0;
2274 * Set the default initial alignment to the most recently matched
2275 * frame's final transformation.
2277 static void initial_default_follow() {
2278 default_initial_alignment_type = 1;
2282 * Perturb output coordinates.
2284 static void perturb_output() {
2285 perturb_type = 0;
2289 * Perturb source coordinates.
2291 static void perturb_source() {
2292 perturb_type = 1;
2296 * Frames under threshold align optimally
2298 static void fail_optimal() {
2299 is_fail_default = 0;
2303 * Frames under threshold keep their default alignments.
2305 static void fail_default() {
2306 is_fail_default = 1;
2310 * Align images with an error contribution from each color channel.
2312 static void all() {
2313 channel_alignment_type = 0;
2317 * Align images with an error contribution only from the green channel.
2318 * Other color channels do not affect alignment.
2320 static void green() {
2321 channel_alignment_type = 1;
2325 * Align images using a summation of channels. May be useful when
2326 * dealing with images that have high frequency color ripples due to
2327 * color aliasing.
2329 static void sum() {
2330 channel_alignment_type = 2;
2334 * Error metric exponent
2337 static void set_metric_exponent(float me) {
2338 metric_exponent = me;
2342 * Match threshold
2345 static void set_match_threshold(float mt) {
2346 match_threshold = mt;
2350 * Perturbation lower and upper bounds.
2353 static void set_perturb_lower(ale_pos pl, int plp) {
2354 perturb_lower = pl;
2355 perturb_lower_percent = plp;
2358 static void set_perturb_upper(ale_pos pu, int pup) {
2359 perturb_upper = pu;
2360 perturb_upper_percent = pup;
2364 * Maximum rotational perturbation.
2367 static void set_rot_max(int rm) {
2370 * Obtain the largest power of two not larger than rm.
2373 rot_max = pow(2, floor(log(rm) / log(2)));
2377 * Barrel distortion adjustment multiplier
2380 static void set_bda_mult(ale_pos m) {
2381 bda_mult = m;
2385 * Barrel distortion maximum rate of change
2388 static void set_bda_rate(ale_pos m) {
2389 bda_rate = m;
2393 * Level-of-detail
2396 static void set_lod_preferred(int lm) {
2397 lod_preferred = lm;
2401 * Minimum dimension for reduced level-of-detail.
2404 static void set_min_dimension(int md) {
2405 min_dimension = md;
2409 * Set the scale factor
2411 static void set_scale(ale_pos s) {
2412 scale_factor = s;
2416 * Set reference rendering to align against
2418 static void set_reference(render *r) {
2419 reference = r;
2423 * Set the interpolant
2425 static void set_interpolant(filter::scaled_filter *f) {
2426 interpolant = f;
2430 * Set alignment weights image
2432 static void set_weight_map(const image *i) {
2433 weight_map = i;
2437 * Set frequency cuts
2439 static void set_frequency_cut(double h, double v, double a) {
2440 horiz_freq_cut = h;
2441 vert_freq_cut = v;
2442 avg_freq_cut = a;
2446 * Set algorithmic alignment weighting
2448 static void set_wmx(const char *e, const char *f, const char *d) {
2449 wmx_exec = e;
2450 wmx_file = f;
2451 wmx_defs = d;
2455 * Show frequency weights
2457 static void set_fl_show(const char *filename) {
2458 fw_output = filename;
2462 * Set transformation file loader.
2464 static void set_tload(tload_t *tl) {
2465 tload = tl;
2469 * Set transformation file saver.
2471 static void set_tsave(tsave_t *ts) {
2472 tsave = ts;
2476 * Get match statistics for frame N.
2478 static int match(int n) {
2479 update_to(n);
2481 if (n == latest)
2482 return latest_ok;
2483 else if (_keep)
2484 return kept_ok[n];
2485 else {
2486 assert(0);
2487 exit(1);
2492 * Message that old alignment data should be kept.
2494 static void keep() {
2495 assert (latest == -1);
2496 _keep = 1;
2500 * Get alignment for frame N.
2502 static transformation of(int n) {
2503 update_to(n);
2504 if (n == latest)
2505 return latest_t;
2506 else if (_keep)
2507 return kept_t[n];
2508 else {
2509 assert(0);
2510 exit(1);
2515 * Use Monte Carlo alignment sampling with argument N.
2517 static void mc(ale_pos n) {
2518 _mc = n;
2522 * Set the certainty-weighted flag.
2524 static void certainty_weighted(int flag) {
2525 certainty_weights = flag;
2529 * Set the global search type.
2531 static void gs(const char *type) {
2532 if (!strcmp(type, "local")) {
2533 _gs = 0;
2534 } else if (!strcmp(type, "inner")) {
2535 _gs = 1;
2536 } else if (!strcmp(type, "outer")) {
2537 _gs = 2;
2538 } else if (!strcmp(type, "all")) {
2539 _gs = 3;
2540 } else if (!strcmp(type, "central")) {
2541 _gs = 4;
2542 } else if (!strcmp(type, "defaults")) {
2543 _gs = 6;
2544 } else if (!strcmp(type, "points")) {
2545 _gs = 5;
2546 keep();
2547 } else {
2548 ui::get()->error("bad global search type");
2553 * Set the minimum overlap for global searching
2555 static void gs_mo(ale_pos value, int _gs_mo_percent) {
2556 _gs_mo = value;
2557 gs_mo_percent = _gs_mo_percent;
2561 * Set mutli-alignment certainty lower bound.
2563 static void set_ma_cert(ale_real value) {
2564 _ma_cert = value;
2568 * Set alignment exclusion regions
2570 static void set_exclusion(exclusion *_ax_parameters, int _ax_count) {
2571 ax_count = _ax_count;
2572 ax_parameters = _ax_parameters;
2576 * Get match summary statistics.
2578 static ale_accum match_summary() {
2579 return match_sum / (ale_accum) match_count;
2583 template<class diff_trans>
2584 void *d2::align::diff_stat_generic<diff_trans>::diff_subdomain(void *args) {
2586 subdomain_args *sargs = (subdomain_args *) args;
2588 struct scale_cluster c = sargs->c;
2589 std::vector<run> runs = sargs->runs;
2590 int ax_count = sargs->ax_count;
2591 int f = sargs->f;
2592 int i_min = sargs->i_min;
2593 int i_max = sargs->i_max;
2594 int j_min = sargs->j_min;
2595 int j_max = sargs->j_max;
2596 int subdomain = sargs->subdomain;
2598 assert (reference_image);
2600 point offset = c.accum->offset();
2602 for (mc_iterate m(i_min, i_max, j_min, j_max, subdomain); !m.done(); m++) {
2604 int i = m.get_i();
2605 int j = m.get_j();
2608 * Check reference frame definition.
2611 if (!((pixel) c.accum->get_pixel(i, j)).finite()
2612 || !(((pixel) c.certainty->get_pixel(i, j)).minabs_norm() > 0))
2613 continue;
2616 * Check for exclusion in render coordinates.
2619 if (ref_excluded(i, j, offset, c.ax_parameters, ax_count))
2620 continue;
2623 * Transform
2626 struct point q, r = point::undefined();
2628 q = (c.input_scale < 1.0 && interpolant == NULL)
2629 ? runs.back().offset.scaled_inverse_transform(
2630 point(i + offset[0], j + offset[1]))
2631 : runs.back().offset.unscaled_inverse_transform(
2632 point(i + offset[0], j + offset[1]));
2634 if (runs.size() == 2) {
2635 r = (c.input_scale < 1.0)
2636 ? runs.front().offset.scaled_inverse_transform(
2637 point(i + offset[0], j + offset[1]))
2638 : runs.front().offset.unscaled_inverse_transform(
2639 point(i + offset[0], j + offset[1]));
2642 ale_pos ti = q[0];
2643 ale_pos tj = q[1];
2646 * Check that the transformed coordinates are within
2647 * the boundaries of array c.input and that they
2648 * are not subject to exclusion.
2650 * Also, check that the weight value in the accumulated array
2651 * is nonzero, unless we know it is nonzero by virtue of the
2652 * fact that it falls within the region of the original frame
2653 * (e.g. when we're not increasing image extents).
2656 if (input_excluded(ti, tj, c.ax_parameters, ax_count))
2657 continue;
2659 if (input_excluded(r[0], r[1], c.ax_parameters, ax_count))
2660 r = point::undefined();
2663 * Check the boundaries of the input frame
2666 if (!(ti >= 0
2667 && ti <= c.input->height() - 1
2668 && tj >= 0
2669 && tj <= c.input->width() - 1))
2670 continue;
2672 if (!(r[0] >= 0
2673 && r[0] <= c.input->height() - 1
2674 && r[1] >= 0
2675 && r[1] <= c.input->width() - 1))
2676 r = point::undefined();
2678 sargs->runs.back().sample(f, c, i, j, q, r, runs.front());
2681 return NULL;
2684 #endif