Begin work on a structure to store information about subspace candidates.
[Ale.git] / d3 / scene.h
blob2d145e53a687c0e492086773eafd161a021fa54d
1 // Copyright 2003, 2004, 2005 David Hilvert <dhilvert@auricle.dyndns.org>,
2 // <dhilvert@ugcs.caltech.edu>
4 /* This file is part of the Anti-Lamenessing Engine.
6 The Anti-Lamenessing Engine is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 The Anti-Lamenessing Engine is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with the Anti-Lamenessing Engine; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * d3/scene.h: Representation of a 3D scene.
25 #ifndef __scene_h__
26 #define __scene_h__
28 #include "point.h"
31 * View angle multiplier.
33 * Setting this to a value larger than one can be useful for debugging.
36 #define VIEW_ANGLE_MULTIPLIER 1
38 class scene {
41 * Clipping planes
43 static ale_pos front_clip;
44 static ale_pos rear_clip;
47 * Decimation exponents for geometry
49 static ale_pos primary_decimation_upper;
50 static ale_pos input_decimation_lower;
51 static ale_pos output_decimation_preferred;
54 * Input resolution divisor
56 static ale_pos input_resolution_divisor;
59 * Output clipping
61 static int output_clip;
64 * Model files
66 static const char *load_model_name;
67 static const char *save_model_name;
70 * Occupancy attenuation
73 static double occ_att;
76 * Normalization of output by weight
79 static int normalize_weights;
82 * Falloff exponent
85 static double falloff_exponent;
88 * Third-camera error multiplier
90 static double tc_multiplier;
93 * Occupancy update iterations
95 static unsigned int ou_iterations;
98 * Pairwise ambiguity
100 static unsigned int pairwise_ambiguity;
103 * Pairwise comparisons
105 static const char *pairwise_comparisons;
108 * 3D Post-exclusion
110 static int d3px_count;
111 static double *d3px_parameters;
114 * Nearness threshold
116 static const ale_real nearness;
119 * Encounter threshold for defined pixels.
121 static double encounter_threshold;
124 * Structure to hold input frame information at levels of
125 * detail between full detail and full decimation.
127 class lod_image {
128 unsigned int f;
129 unsigned int entries;
130 std::vector<const d2::image *> im;
131 std::vector<pt> transformation;
133 public:
135 * Constructor
137 lod_image(unsigned int _f) {
139 pt _pt;
141 f = _f;
142 im.push_back(d2::image_rw::copy(f, "3D reference image"));
143 assert(im.back());
144 entries = 1;
145 _pt = d3::align::projective(f);
146 _pt.scale(1 / _pt.scale_2d());
147 transformation.push_back(_pt);
149 while (im.back()->height() > 4
150 && im.back()->width() > 4) {
152 im.push_back(im.back()->scale_by_half("3D, reduced LOD"));
153 assert(im.back());
155 _pt.scale(1 / _pt.scale_2d() / pow(2, entries));
156 transformation.push_back(_pt);
158 entries += 1;
163 * Get the number of scales
165 unsigned int count() {
166 return entries;
170 * Get an image.
172 const d2::image *get_image(unsigned int i) {
173 assert(i >= 0);
174 assert(i < entries);
175 return im[i];
178 int in_bounds(d2::point p) {
179 return im[0]->in_bounds(p);
183 * Get a 'trilinear' color. We currently don't do interpolation
184 * between levels of detail; hence, it's discontinuous in tl_coord.
186 d2::pixel get_tl(d2::point p, ale_pos tl_coord) {
188 assert(in_bounds(p));
190 tl_coord = round(tl_coord);
192 if (tl_coord >= entries)
193 tl_coord = entries;
194 if (tl_coord < 0)
195 tl_coord = 0;
197 p = p / (ale_pos) pow(2, tl_coord);
199 unsigned int itlc = (unsigned int) tl_coord;
201 if (p[0] > im[itlc]->height() - 1)
202 p[0] = im[itlc]->height() - 1;
203 if (p[1] > im[itlc]->width() - 1)
204 p[1] = im[itlc]->width() - 1;
206 assert(p[0] >= 0);
207 assert(p[1] >= 0);
209 return im[itlc]->get_bl(p);
212 d2::pixel get_max_diff(d2::point p, ale_pos tl_coord) {
213 assert(in_bounds(p));
215 tl_coord = round(tl_coord);
217 if (tl_coord >= entries)
218 tl_coord = entries;
219 if (tl_coord < 0)
220 tl_coord = 0;
222 p = p / (ale_pos) pow(2, tl_coord);
224 unsigned int itlc = (unsigned int) tl_coord;
226 if (p[0] > im[itlc]->height() - 1)
227 p[0] = im[itlc]->height() - 1;
228 if (p[1] > im[itlc]->width() - 1)
229 p[1] = im[itlc]->width() - 1;
231 assert(p[0] >= 0);
232 assert(p[1] >= 0);
234 return im[itlc]->get_max_diff(p);
238 * Get the transformation
240 pt get_t(unsigned int i) {
241 assert(i >= 0);
242 assert(i < entries);
243 return transformation[i];
247 * Get the camera origin in world coordinates
249 point origin() {
250 return transformation[0].origin();
254 * Destructor
256 ~lod_image() {
257 for (unsigned int i = 0; i < entries; i++) {
258 delete im[i];
264 * Structure to hold weight information for reference images.
266 class ref_weights {
267 unsigned int f;
268 std::vector<d2::image *> weights;
269 pt transformation;
270 int tc_low;
271 int tc_high;
272 int initialized;
274 void set_image(d2::image *im, ale_real value) {
275 for (unsigned int i = 0; i < im->height(); i++)
276 for (unsigned int j = 0; i < im->width(); j++)
277 im->pix(i, j) = d2::pixel(value, value, value);
280 d2::image *make_image(ale_pos sf, ale_real init_value = 0) {
281 d2::image *result = new d2::image_ale_real(
282 (unsigned int) ceil(transformation.unscaled_height() * sf),
283 (unsigned int) ceil(transformation.unscaled_width() * sf), 3);
285 if (init_value != 0)
286 set_image(result, init_value);
288 return result;
291 public:
294 * Explicit weight subtree
296 struct subtree {
297 ale_real node_value;
298 subtree *children[2][2];
300 subtree(ale_real nv, subtree *a, subtree *b, subtree *c, subtree *d) {
301 node_value = nv;
302 children[0][0] = a;
303 children[0][1] = b;
304 children[1][0] = c;
305 children[1][1] = d;
308 ~subtree() {
309 for (int i = 0; i < 2; i++)
310 for (int j = 0; j < 2; j++)
311 delete children[i][j];
316 * Constructor
318 ref_weights(unsigned int _f) {
319 f = _f;
320 transformation = d3::align::projective(f);
321 initialized = 0;
325 * Check spatial bounds.
327 int in_spatial_bounds(d2::point p) {
328 if (p[0] < 0)
329 return 0;
330 if (p[1] < 0)
331 return 0;
332 if (p[0] > transformation.unscaled_height() - 1)
333 return 0;
334 if (p[1] > transformation.unscaled_width() - 1)
335 return 0;
336 if (p[2] >= 0)
337 return 0;
339 return 1;
342 int in_spatial_bounds(const space::traverse &t) {
343 d2::point p = transformation.wp_unscaled(t.get_centroid()).xy();
344 return in_spatial_bounds(p);
348 * Increase resolution to the given level.
350 void increase_resolution(int tc, unsigned int i, unsigned int j) {
351 d2::image *im = weights[tc - tc_low];
352 assert(im);
353 assert(i <= im->height() - 1);
354 assert(j <= im->width() - 1);
357 * Check for the cases known to have no lower level of detail.
360 if (im->pix(i, j)[0] == -1)
361 return;
363 if (tc == tc_high)
364 return;
366 increase_resolution(tc + 1, i / 2, j / 2);
369 * Load the lower-level image structure.
372 d2::image *iim = weights[tc + 1 - tc_low];
374 assert(iim);
375 assert(i / 2 <= iim->height() - 1);
376 assert(j / 2 <= iim->width() - 1);
379 * Check for the case where no lower level of detail is
380 * available.
383 if (iim->pix(i / 2, j / 2)[0] == -1)
384 return;
387 * Spread out the lower level of detail among (uninitialized)
388 * peer values.
391 for (unsigned int ii = (i / 2) * 2; ii < (i / 2) * 2 + 1; ii++)
392 for (unsigned int jj = (j / 2) * 2; jj < (j / 2) * 2 + 1; jj++) {
393 assert(ii <= im->height() - 1);
394 assert(jj <= im->width() - 1);
395 assert(im->pix(ii, jj)[0] == 0);
397 im->pix(ii, jj) = iim->pix(i / 2, j / 2);
401 * Set the lower level of detail to point here.
404 iim->pix(i / 2, j / 2)[0] = -1;
408 * Add weights to positive higher-resolution pixels where
409 * found when their current values match the given subtree
410 * values; set negative pixels to zero and return 0 if no
411 * positive higher-resolution pixels are found.
413 int add_partial(int tc, unsigned int i, unsigned int j, ale_real weight, subtree *st) {
414 d2::image *im = weights[tc - tc_low];
415 assert(im);
416 assert(i <= im->height() - 1);
417 assert(j <= im->width() - 1);
420 * Check for positive values.
423 if (im->pix(i, j)[0] > 0) {
424 if (st && st->node_value == im->pix(i, j)[0])
425 im->pix(i, j)[0] += weight;
426 return 1;
430 * Handle the case where there are no higher levels of detail.
433 if (tc == tc_low) {
434 assert(im->pix(i, j)[0] == 0);
435 return 0;
439 * Handle the case where higher levels of detail are available.
442 int success[2][2];
444 for (int ii = 0; ii < 2; ii++)
445 for (int jj = 0; jj < 2; jj++)
446 success[ii][jj] = add_partial(tc - 1, i * 2 + ii, j * 2 + jj, weight,
447 st ? st->children[ii][jj] : NULL);
449 if (!success[0][0]
450 && !success[0][1]
451 && !success[1][0]
452 && !success[1][1]) {
453 im->pix(i, j)[0] = 0;
454 return 0;
457 d2::image *iim = weights[tc - 1 - tc_low];
458 assert(iim);
460 for (int ii = 0; ii < 2; ii++)
461 for (int jj = 0; jj < 2; jj++)
462 if (success[ii][jj] == 0) {
463 assert(i * 2 + ii < iim->height());
464 assert(j * 2 + jj < iim->width());
466 im->pix(i * 2 + ii, j * 2 + jj)[0] = weight;
469 im->pix(i, j)[0] = -1;
473 * Add weight.
475 void add_weight(int tc, unsigned int i, unsigned int j, ale_real weight, subtree *st) {
476 d2::image *im = weights[tc - tc_low];
477 assert(im);
478 assert(i <= im->height() - 1);
479 assert(j <= im->width() - 1);
482 * Increase resolution, if necessary
485 increase_resolution(tc, i, j);
488 * Attempt to add the weight at levels of detail
489 * where weight is defined.
492 if (add_partial(tc, i, j, weight, st))
493 return;
496 * If no weights are defined at any level of detail,
497 * then set the weight here.
500 im->pix(i, j)[0] = weight;
503 void add_weight(int tc, d2::point p, ale_real weight, subtree *st) {
505 p *= pow(2, -tc);
507 unsigned int i = (unsigned int) floor(p[0]);
508 unsigned int j = (unsigned int) floor(p[1]);
510 add_weight(tc, i, j, weight, st);
513 void add_weight(const space::traverse &t, ale_real weight, subtree *st) {
515 if (weight == 0)
516 return;
518 ale_pos tc = transformation.trilinear_coordinate(t);
519 d2::point p = transformation.wp_unscaled(t.get_centroid()).xy();
520 assert(in_spatial_bounds(p));
522 tc = round(tc);
525 * Establish a reasonable (?) upper bound on resolution.
528 if (tc < input_decimation_lower) {
529 weight /= pow(4, (input_decimation_lower - tc));
530 tc = input_decimation_lower;
534 * Initialize, if necessary.
537 if (!initialized) {
538 tc_low = tc_high = (int) tc;
540 ale_real sf = pow(2, -tc);
542 weights[0] = make_image(sf);
546 * Check resolution bounds
549 assert (tc_low <= tc_high);
552 * Generate additional levels of detail, if necessary.
555 while (tc < tc_low) {
556 tc_low--;
558 ale_real sf = pow(2, -tc_low);
560 weights.insert(weights.begin(), make_image(sf));
563 while (tc > tc_high) {
564 tc_high++;
566 ale_real sf = pow(2, -tc_high);
568 weights.push_back(make_image(sf, -1));
571 add_weight((int) tc, p, weight, st);
575 * Get weight
577 ale_real get_weight(int tc, unsigned int i, unsigned int j) {
578 d2::image *im = weights[tc - tc_low];
579 assert(im);
580 assert(i < im->height());
581 assert(j < im->width());
583 if (im->pix(i, j)[0] == -1) {
584 return (get_weight(tc - 1, i * 2 + 0, j * 2 + 0)
585 + get_weight(tc - 1, i * 2 + 1, j * 2 + 0)
586 + get_weight(tc - 1, i * 2 + 1, j * 2 + 1)
587 + get_weight(tc - 1, i * 2 + 0, j * 2 + 1)) / 4;
590 if (im->pix(i, j)[0] == 0) {
591 return get_weight(tc + 1, i / 2, j / 2);
594 return im->pix(i, j)[0];
597 ale_real get_weight(int tc, d2::point p) {
599 p *= pow(2, -tc);
601 unsigned int i = (unsigned int) floor(p[0]);
602 unsigned int j = (unsigned int) floor(p[1]);
604 return get_weight(tc, i, j);
607 ale_real get_weight(const space::traverse &t) {
608 ale_pos tc = transformation.trilinear_coordinate(t);
609 d2::point p = transformation.wp_unscaled(t.get_centroid()).xy();
610 assert(in_spatial_bounds(p));
612 if (!initialized)
613 return 0;
615 tc = round(tc);
617 if (tc < tc_low) {
618 tc = tc_low;
621 if (tc <= tc_high) {
622 return get_weight((int) tc, p);
625 assert(tc > tc_high);
627 int multiplier = (int) pow(2, (tc - tc_high));
628 ale_real result = 0;
630 for (int i = 0; i < multiplier; i++)
631 for (int j = 0; j < multiplier; j++) {
632 result += get_weight(tc_high,
633 (unsigned int) floor(p[0]) * multiplier + i,
634 (unsigned int) floor(p[1]) * multiplier + j);
637 return result;
641 * Check whether a subtree is simple.
643 int is_simple(subtree *s) {
644 assert (s);
646 if (s->node_value == -1
647 && s->children[0][0] == NULL
648 && s->children[0][1] == NULL
649 && s->children[1][0] == NULL
650 && s->children[1][1] == NULL)
651 return 1;
653 return 0;
657 * Get a weight subtree.
659 subtree *get_subtree(int tc, unsigned int i, unsigned int j) {
662 * tc > tc_high is handled recursively.
665 if (tc > tc_high) {
666 subtree *result = new subtree(-1,
667 get_subtree(tc - 1, i * 2 + 0, j * 2 + 0),
668 get_subtree(tc - 1, i * 2 + 0, j * 2 + 1),
669 get_subtree(tc - 1, i * 2 + 1, j * 2 + 0),
670 get_subtree(tc - 1, i * 2 + 1, j * 2 + 1));
672 if (is_simple(result)) {
673 delete result;
674 return NULL;
677 return result;
680 assert(tc >= tc_low);
681 assert(weights[tc - tc_low]);
683 d2::image *im = weights[tc - tc_low];
686 * Rectangular images will, in general, have
687 * out-of-bounds tree sections. Handle this case.
690 if (i >= im->height())
691 return NULL;
692 if (j >= im->width())
693 return NULL;
696 * -1 weights are handled recursively
699 if (im->pix(i, j)[0] == -1) {
700 subtree *result = new subtree(-1,
701 get_subtree(tc - 1, i * 2 + 0, j * 2 + 0),
702 get_subtree(tc - 1, i * 2 + 0, j * 2 + 1),
703 get_subtree(tc - 1, i * 2 + 1, j * 2 + 0),
704 get_subtree(tc - 1, i * 2 + 1, j * 2 + 1));
706 if (is_simple(result)) {
707 im->pix(i, j)[0] = 0;
708 delete result;
709 return NULL;
712 return result;
716 * Zero weights have NULL subtrees.
719 if (im->pix(i, j)[0] == 0)
720 return NULL;
723 * Handle the remaining case.
726 return new subtree(im->pix(i, j)[0], NULL, NULL, NULL, NULL);
729 subtree *get_subtree(int tc, d2::point p) {
730 p *= pow(2, -tc);
732 unsigned int i = (unsigned int) floor(p[0]);
733 unsigned int j = (unsigned int) floor(p[1]);
735 return get_subtree(tc, i, j);
738 subtree *get_subtree(const space::traverse &t) {
739 ale_pos tc = transformation.trilinear_coordinate(t);
740 d2::point p = transformation.wp_unscaled(t.get_centroid()).xy();
741 assert(in_spatial_bounds(p));
743 if (!initialized)
744 return NULL;
746 if (tc < input_decimation_lower)
747 tc = input_decimation_lower;
749 tc = round(tc);
751 if (tc < tc_low)
752 return NULL;
754 return get_subtree((int) tc, p);
758 * Destructor
760 ~ref_weights() {
761 for (unsigned int i = 0; i < weights.size(); i++) {
762 delete weights[i];
768 * Resolution check.
770 static int resolution_ok(pt transformation, ale_pos tc) {
772 if (pow(2, tc) > transformation.unscaled_height()
773 || pow(2, tc) > transformation.unscaled_width())
774 return 0;
776 if (tc < input_decimation_lower)
777 return 0;
779 return 1;
783 * Structure to hold input frame information at all levels of detail.
785 class lod_images {
788 * All images.
791 std::vector<lod_image *> images;
793 public:
795 lod_images() {
796 images.resize(d2::image_rw::count(), NULL);
799 void open(unsigned int f) {
800 assert (images[f] == NULL);
802 if (images[f] == NULL)
803 images[f] = new lod_image(f);
806 void open_all() {
807 for (unsigned int f = 0; f < d2::image_rw::count(); f++)
808 open(f);
811 lod_image *get(unsigned int f) {
812 assert (images[f] != NULL);
813 return images[f];
816 void close(unsigned int f) {
817 assert (images[f] != NULL);
818 delete images[f];
819 images[f] = NULL;
822 void close_all() {
823 for (unsigned int f = 0; f < d2::image_rw::count(); f++)
824 close(f);
827 ~lod_images() {
828 close_all();
833 * All levels-of-detail
836 static struct lod_images *al;
839 * Data structure for storing best encountered subspace candidates.
841 class candidates {
842 std::vector<std::vector<std::pair<ale_pos, ale_real>>> levels;
843 int image_index;
844 unsigned int height;
845 unsigned int width;
847 public:
848 candidates(int f) {
850 image_index = f;
851 height = al->get(f)->get_t(0).unscaled_height();
852 width = al->get(f)->get_t(0).unscaled_width();
855 * Is this necessary?
858 levels.resize(primary_decimation_upper - input_decimation_lower + 1);
859 for (int l = input_decimation_lower; l <= primary_decimation_upper; l++) {
860 for (unsigned int i = 0; i < (unsigned int) floor(height / pow(2, l)); i++)
861 for (unsigned int j = 0; j < (unsigned int) floor(width / pow(2, l)); j++)
862 for (unsigned int k = 0; k < pairwise_ambiguity; k++) {
863 levels[l][i * width * pairwise_ambiguity + j * pairwise_ambiguity + k] =
864 std::pair<ale_pos, ale_real>(0, 0);
870 * Point p is expected to be in local projective coordinates.
873 void add_candidate(point p, ale_pos tc, ale_real score) {
874 assert(tc <= primary_decimation_upper);
875 assert(tc >= input_decimation_lower);
876 assert(p[2] < 0);
877 assert(score >= 0);
879 int i = (unsigned int) floor(p[0] / pow(2, tc));
880 int j = (unsigned int) floor(p[1] / pow(2, tc));
882 assert(i < floor(height / pow(2, tc)));
883 assert(j < floor(width / pow(2, tc)));
885 for (int k = 0; k < pairwise_ambiguity; k++) {
886 std::pair<ale_pos, ale_real> *pk =
887 &(levels[tc][i * width * pairwise_ambiguity + j * pairwise_ambiguity]);
889 if (pk->first != 0 && score >= pk->second)
890 continue;
892 ale_pos tp = pk->first;
893 ale_real tr = pk->second;
895 pk->first = p[2];
896 pk->second = score;
898 p[2] = tp;
899 score = tr;
904 * Generate subspaces for candidates.
907 void generate_subspaces() {
908 assert(0);
913 * List for calculating weighted median.
915 class wml {
916 ale_real *data;
917 unsigned int size;
918 unsigned int used;
920 ale_real &_w(unsigned int i) {
921 assert(i <= used);
922 return data[i * 2];
925 ale_real &_d(unsigned int i) {
926 assert(i <= used);
927 return data[i * 2 + 1];
930 void increase_capacity() {
932 if (size > 0)
933 size *= 2;
934 else
935 size = 1;
937 data = (ale_real *) realloc(data, sizeof(ale_real) * 2 * (size * 1));
939 assert(data);
940 assert (size > used);
942 if (!data) {
943 fprintf(stderr, "Unable to allocate %d bytes of memory\n",
944 sizeof(ale_real) * 2 * (size * 1));
945 exit(1);
949 void insert_weight(unsigned int i, ale_real v, ale_real w) {
950 assert(used < size);
951 assert(used >= i);
952 for (unsigned int j = used; j > i; j--) {
953 _w(j) = _w(j - 1);
954 _d(j) = _d(j - 1);
957 _w(i) = w;
958 _d(i) = v;
960 used++;
963 public:
965 unsigned int get_size() {
966 return size;
969 unsigned int get_used() {
970 return used;
973 void print_info() {
974 fprintf(stderr, "[st %p sz %u el", this, size);
975 for (unsigned int i = 0; i < used; i++)
976 fprintf(stderr, " (%f, %f)", _d(i), _w(i));
977 fprintf(stderr, "]\n");
980 void clear() {
981 used = 0;
984 void insert_weight(ale_real v, ale_real w) {
985 for (unsigned int i = 0; i < used; i++) {
986 if (_d(i) == v) {
987 _w(i) += w;
988 return;
990 if (_d(i) > v) {
991 if (used == size)
992 increase_capacity();
993 insert_weight(i, v, w);
994 return;
997 if (used == size)
998 increase_capacity();
999 insert_weight(used, v, w);
1003 * Finds the median at half-weight, or between half-weight
1004 * and zero-weight, depending on the attenuation value.
1007 ale_real find_median(double attenuation = 0) {
1009 assert(attenuation >= 0);
1010 assert(attenuation <= 1);
1012 ale_real zero1 = 0;
1013 ale_real zero2 = 0;
1014 ale_real undefined = zero1 / zero2;
1016 ale_accum weight_sum = 0;
1018 for (unsigned int i = 0; i < used; i++)
1019 weight_sum += _w(i);
1021 // if (weight_sum == 0)
1022 // return undefined;
1024 if (used == 0 || used == 1)
1025 return undefined;
1027 if (weight_sum == 0) {
1028 ale_accum data_sum = 0;
1029 for (unsigned int i = 0; i < used; i++)
1030 data_sum += _d(i);
1031 return data_sum / used;
1035 ale_accum midpoint = weight_sum * (0.5 - 0.5 * attenuation);
1037 ale_accum weight_sum_2 = 0;
1039 for (unsigned int i = 0; i < used && weight_sum_2 < midpoint; i++) {
1040 weight_sum_2 += _w(i);
1042 if (weight_sum_2 > midpoint) {
1043 return _d(i);
1044 } else if (weight_sum_2 == midpoint) {
1045 assert (i + 1 < used);
1046 return (_d(i) + _d(i + 1)) / 2;
1050 return undefined;
1053 wml(int initial_size = 0) {
1055 // if (initial_size == 0) {
1056 // initial_size = (int) (d2::image_rw::count() * 1.5);
1057 // }
1059 size = initial_size;
1060 used = 0;
1062 if (size > 0) {
1063 data = (ale_real *) malloc(size * sizeof(ale_real) * 2);
1064 assert(data);
1065 } else {
1066 data = NULL;
1071 * copy constructor. This is required to avoid undesired frees.
1074 wml(const wml &w) {
1075 size = w.size;
1076 used = w.used;
1077 data = (ale_real *) malloc(size * sizeof(ale_real) * 2);
1078 assert(data);
1080 memcpy(data, w.data, size * sizeof(ale_real) * 2);
1083 ~wml() {
1084 free(data);
1089 * Class for information regarding spatial regions of interest.
1091 * This class is configured for convenience in cases where sampling is
1092 * performed using an approximation of the fine:box:1,triangle:2 chain.
1093 * In this case, the *_1 variables would store the fine data and the
1094 * *_2 variables would store the coarse data. Other uses are also
1095 * possible.
1098 class spatial_info {
1100 * Map channel value --> weight.
1102 wml color_weights_1[3];
1103 wml color_weights_2[3];
1106 * Current color.
1108 d2::pixel color;
1111 * Map occupancy value --> weight.
1113 wml occupancy_weights_1;
1114 wml occupancy_weights_2;
1117 * Current occupancy value.
1119 ale_real occupancy;
1122 * pocc/socc density
1125 unsigned int pocc_density;
1126 unsigned int socc_density;
1129 * Insert a weight into a list.
1131 void insert_weight(wml *m, ale_real v, ale_real w) {
1132 m->insert_weight(v, w);
1136 * Find the median of a weighted list. Uses NaN for undefined.
1138 ale_real find_median(wml *m, double attenuation = 0) {
1139 return m->find_median(attenuation);
1142 public:
1144 * Constructor.
1146 spatial_info() {
1147 color = d2::pixel::zero();
1148 occupancy = 0;
1149 pocc_density = 0;
1150 socc_density = 0;
1154 * Accumulate color; primary data set.
1156 void accumulate_color_1(int f, d2::pixel color, d2::pixel weight) {
1157 for (int k = 0; k < 3; k++)
1158 insert_weight(&color_weights_1[k], color[k], weight[k]);
1162 * Accumulate color; secondary data set.
1164 void accumulate_color_2(d2::pixel color, d2::pixel weight) {
1165 for (int k = 0; k < 3; k++)
1166 insert_weight(&color_weights_2[k], color[k], weight[k]);
1170 * Accumulate occupancy; primary data set.
1172 void accumulate_occupancy_1(int f, ale_real occupancy, ale_real weight) {
1173 insert_weight(&occupancy_weights_1, occupancy, weight);
1177 * Accumulate occupancy; secondary data set.
1179 void accumulate_occupancy_2(ale_real occupancy, ale_real weight) {
1180 insert_weight(&occupancy_weights_2, occupancy, weight);
1182 if (occupancy == 0 || occupancy_weights_2.get_size() < 96)
1183 return;
1185 // fprintf(stderr, "%p updated socc with: %f %f\n", this, occupancy, weight);
1186 // occupancy_weights_2.print_info();
1190 * Update color (and clear accumulation structures).
1192 void update_color() {
1193 for (int d = 0; d < 3; d++) {
1194 ale_real c = find_median(&color_weights_1[d]);
1195 if (isnan(c))
1196 c = find_median(&color_weights_2[d]);
1197 if (isnan(c))
1198 c = 0;
1200 color[d] = c;
1202 color_weights_1[d].clear();
1203 color_weights_2[d].clear();
1208 * Update occupancy (and clear accumulation structures).
1210 void update_occupancy() {
1211 ale_real o = find_median(&occupancy_weights_1, occ_att);
1212 if (isnan(o))
1213 o = find_median(&occupancy_weights_2, occ_att);
1214 if (isnan(o))
1215 o = 0;
1217 occupancy = o;
1219 pocc_density = occupancy_weights_1.get_used();
1220 socc_density = occupancy_weights_2.get_used();
1222 occupancy_weights_1.clear();
1223 occupancy_weights_2.clear();
1228 * Get current color.
1230 d2::pixel get_color() {
1231 return color;
1235 * Get current occupancy.
1237 ale_real get_occupancy() {
1238 return occupancy;
1242 * Get primary color density.
1245 unsigned int get_pocc_density() {
1246 return pocc_density;
1249 unsigned int get_socc_density() {
1250 return socc_density;
1255 * Map spatial regions of interest to spatial info structures. XXX:
1256 * This may get very poor cache behavior in comparison with, say, an
1257 * array. Unfortunately, there is no immediately obvious array
1258 * representation. If some kind of array representation were adopted,
1259 * it would probably cluster regions of similar depth from the
1260 * perspective of the typical camera. In particular, for a
1261 * stereoscopic view, depth ordering for two random points tends to be
1262 * similar between cameras, I think. Unfortunately, it is never
1263 * identical for all points (unless cameras are co-located). One
1264 * possible approach would be to order based on, say, camera 0's idea
1265 * of depth.
1268 typedef std::map<struct space::node *, spatial_info> spatial_info_map_t;
1270 static spatial_info_map_t spatial_info_map;
1272 public:
1275 * Debugging variables.
1278 static unsigned long total_ambiguity;
1279 static unsigned long total_pixels;
1280 static unsigned long total_divisions;
1281 static unsigned long total_tsteps;
1284 * Member functions
1287 static void et(double et_parameter) {
1288 encounter_threshold = et_parameter;
1291 static void load_model(const char *name) {
1292 load_model_name = name;
1295 static void save_model(const char *name) {
1296 save_model_name = name;
1299 static void fc(ale_pos fc) {
1300 front_clip = fc;
1303 static void di_upper(ale_pos _dgi) {
1304 primary_decimation_upper = _dgi;
1307 static void do_try(ale_pos _dgo) {
1308 output_decimation_preferred = _dgo;
1311 static void di_lower(ale_pos _idiv) {
1312 input_decimation_lower = _idiv;
1315 static void oc() {
1316 output_clip = 1;
1319 static void no_oc() {
1320 output_clip = 0;
1323 static void rc(ale_pos rc) {
1324 rear_clip = rc;
1328 * Initialize 3D scene from 2D scene, using 2D and 3D alignment
1329 * information.
1331 static void init_from_d2() {
1334 * Rear clip value of 0 is converted to infinity.
1337 if (rear_clip == 0) {
1338 ale_pos one = +1;
1339 ale_pos zero = +0;
1341 rear_clip = one / zero;
1342 assert(isinf(rear_clip) == +1);
1346 * Scale and translate clipping plane depths.
1349 ale_pos cp_scalar = d3::align::projective(0).wc(point(0, 0, 0))[2];
1351 front_clip = front_clip * cp_scalar - cp_scalar;
1352 rear_clip = rear_clip * cp_scalar - cp_scalar;
1356 * Allocate image structures.
1359 al = new lod_images;
1361 if (tc_multiplier != 0) {
1362 al->open_all();
1367 * Perform spatial_info updating on a given subspace, for given
1368 * parameters.
1370 static void subspace_info_update(space::iterate si, int f, ref_weights *weights) {
1371 while(!si.done()) {
1373 space::traverse st = si.get();
1376 * Skip spaces with no color information.
1378 * XXX: This could be more efficient, perhaps.
1381 if (spatial_info_map.count(st.get_node()) == 0) {
1382 si.next();
1383 continue;
1387 * Get in-bounds centroid, if one exists.
1390 point p = al->get(f)->get_t(0).centroid(st);
1392 if (!p.defined()) {
1393 si.next();
1394 continue;
1398 * Get information on the subspace.
1401 spatial_info *sn = &spatial_info_map[st.get_node()];
1402 d2::pixel color = sn->get_color();
1403 ale_real occupancy = sn->get_occupancy();
1406 * Store current weight so we can later check for
1407 * modification by higher-resolution subspaces.
1410 ref_weights::subtree *tree = weights->get_subtree(st);
1413 * Check for higher resolution subspaces, and
1414 * update the space iterator.
1417 if (st.get_node()->positive
1418 || st.get_node()->negative) {
1421 * Cleave space for the higher-resolution pass,
1422 * skipping the current space, since we will
1423 * process that later.
1426 space::iterate cleaved_space = si.cleave();
1428 cleaved_space.next();
1430 subspace_info_update(cleaved_space, f, weights);
1432 } else {
1433 si.next();
1437 * Add new data on the subspace and update weights.
1440 ale_pos tc = al->get(f)->get_t(0).trilinear_coordinate(st);
1441 d2::pixel pcolor = al->get(f)->get_tl(p.xy(), tc);
1442 d2::pixel colordiff = (color - pcolor) * (ale_real) 256;
1444 if (falloff_exponent != 0) {
1445 d2::pixel max_diff = al->get(f)->get_max_diff(p.xy(), tc) * (ale_real) 256;
1447 for (int k = 0; k < 3; k++)
1448 if (max_diff[k] > 1)
1449 colordiff[k] /= pow(max_diff[k], falloff_exponent);
1453 * Determine the probability of
1454 * encounter, divided by the occupancy.
1457 d2::pixel encounter = d2::pixel(1, 1, 1) * (1 - weights->get_weight(st));
1460 * Update weights
1463 weights->add_weight(st, (encounter * occupancy)[0], tree);
1466 * Delete the subtree, if necessary.
1469 delete tree;
1472 * Check for cases in which the subspace should not be
1473 * updated.
1476 if (!resolution_ok(al->get(f)->get_t(0), tc))
1477 return;
1480 * Update subspace.
1483 sn->accumulate_color_1(f, pcolor, encounter);
1484 d2::pixel channel_occ = pexp(-colordiff * colordiff);
1486 ale_accum occ = channel_occ[0];
1488 for (int k = 1; k < 3; k++)
1489 if (channel_occ[k] < occ)
1490 occ = channel_occ[k];
1492 sn->accumulate_occupancy_1(f, occ, encounter[0]);
1498 * Run a single iteration of the spatial_info update cycle.
1500 static void spatial_info_update() {
1502 * Iterate through each frame.
1504 for (unsigned int f = 0; f < d2::image_rw::count(); f++) {
1507 * Open the frame and transformation.
1510 if (tc_multiplier == 0)
1511 al->open(f);
1514 * Allocate weights data structure for storing encounter
1515 * probabilities.
1518 ref_weights *weights = new ref_weights(f);
1521 * Call subspace_info_update for the root space.
1524 subspace_info_update(space::iterate(al->get(f)->origin()), f, weights);
1527 * Free weights.
1530 delete weights;
1533 * Close the frame and transformation.
1536 if (tc_multiplier == 0)
1537 al->close(f);
1541 * Update all spatial_info structures.
1543 for (spatial_info_map_t::iterator i = spatial_info_map.begin(); i != spatial_info_map.end(); i++) {
1544 i->second.update_color();
1545 i->second.update_occupancy();
1547 // d2::pixel color = i->second.get_color();
1549 // fprintf(stderr, "space p=%p updated to c=[%f %f %f] o=%f\n",
1550 // i->first, color[0], color[1], color[2],
1551 // i->second.get_occupancy());
1556 * Support function for view() and depth().
1559 static const void view_recurse(int type, d2::image *im, d2::image *weights, space::iterate si, pt _pt) {
1560 while (!si.done()) {
1561 space::traverse st = si.get();
1564 * XXX: This could be more efficient, perhaps.
1567 if (spatial_info_map.count(st.get_node()) == 0) {
1568 si.next();
1569 continue;
1572 spatial_info sn = spatial_info_map[st.get_node()];
1575 * Get information on the subspace.
1578 d2::pixel color = sn.get_color();
1579 // d2::pixel color = d2::pixel(1, 1, 1) * (double) (((unsigned int) (st.get_node()) / sizeof(space)) % 65535);
1580 ale_real occupancy = sn.get_occupancy();
1583 * Determine the view-local bounding box for the
1584 * subspace.
1587 point bb[2];
1589 _pt.get_view_local_bb(st, bb);
1591 point min = bb[0];
1592 point max = bb[1];
1595 * Data structure to check modification of weights by
1596 * higher-resolution subspaces.
1599 std::queue<d2::pixel> weight_queue;
1602 * Check for higher resolution subspaces, and
1603 * update the space iterator.
1606 if (st.get_node()->positive
1607 || st.get_node()->negative) {
1610 * Store information about current weights,
1611 * so we will know which areas have been
1612 * covered by higher-resolution subspaces.
1615 for (int i = (int) ceil(min[0]); i <= (int) floor(max[0]); i++)
1616 for (int j = (int) ceil(min[1]); j <= (int) floor(max[1]); j++)
1617 weight_queue.push(weights->get_pixel(i, j));
1620 * Cleave space for the higher-resolution pass,
1621 * skipping the current space, since we will
1622 * process that afterward.
1625 space::iterate cleaved_space = si.cleave();
1627 cleaved_space.next();
1629 view_recurse(type, im, weights, cleaved_space, _pt);
1631 } else {
1632 si.next();
1637 * Iterate over pixels in the bounding box, finding
1638 * pixels that intersect the subspace. XXX: assume
1639 * for now that all pixels in the bounding box
1640 * intersect the subspace.
1643 for (int i = (int) ceil(min[0]); i <= (int) floor(max[0]); i++)
1644 for (int j = (int) ceil(min[1]); j <= (int) floor(max[1]); j++) {
1647 * Check for higher-resolution updates.
1650 if (weight_queue.size()) {
1651 if (weight_queue.front() != weights->get_pixel(i, j)) {
1652 weight_queue.pop();
1653 continue;
1655 weight_queue.pop();
1659 * Determine the probability of encounter.
1662 d2::pixel encounter = (d2::pixel(1, 1, 1) - weights->get_pixel(i, j)) * occupancy;
1665 * Update images.
1668 if (type == 0) {
1671 * Color view
1674 weights->pix(i, j) += encounter;
1675 im->pix(i, j) += encounter * color;
1677 } else if (type == 1) {
1680 * Weighted (transparent) depth display
1683 ale_pos depth_value = _pt.wp_scaled(st.get_min())[2];
1684 weights->pix(i, j) += encounter;
1685 im->pix(i, j) += encounter * depth_value;
1687 } else if (type == 2) {
1690 * Ambiguity (ambivalence) measure.
1693 weights->pix(i, j) = d2::pixel(1, 1, 1);
1694 im->pix(i, j) += 0.1 * d2::pixel(1, 1, 1);
1696 } else if (type == 3) {
1699 * Closeness measure.
1702 ale_pos depth_value = _pt.wp_scaled(st.get_min())[2];
1703 if (weights->pix(i, j)[0] == 0) {
1704 weights->pix(i, j) = d2::pixel(1, 1, 1);
1705 im->pix(i, j) = d2::pixel(1, 1, 1) * depth_value;
1706 } else if (im->pix(i, j)[2] < depth_value) {
1707 im->pix(i, j) = d2::pixel(1, 1, 1) * depth_value;
1708 } else {
1709 continue;
1712 } else if (type == 4) {
1715 * Weighted (transparent) contribution display
1718 ale_pos contribution_value = sn.get_pocc_density() + sn.get_socc_density();
1719 weights->pix(i, j) += encounter;
1720 im->pix(i, j) += encounter * contribution_value;
1722 } else if (type == 5) {
1725 * Weighted (transparent) occupancy display
1728 ale_pos contribution_value = occupancy;
1729 weights->pix(i, j) += encounter;
1730 im->pix(i, j) += encounter * contribution_value;
1732 } else if (type == 6) {
1735 * (Depth, xres, yres) triple
1738 ale_pos depth_value = _pt.wp_scaled(st.get_min())[2];
1739 weights->pix(i, j)[0] += encounter[0];
1740 if (weights->pix(i, j)[1] < encounter[0]) {
1741 weights->pix(i, j)[1] = encounter[0];
1742 im->pix(i, j)[0] = weights->pix(i, j)[1] * depth_value;
1743 im->pix(i, j)[1] = max[0] - min[0];
1744 im->pix(i, j)[2] = max[1] - min[1];
1747 } else if (type == 7) {
1750 * (xoff, yoff, 0) triple
1753 weights->pix(i, j)[0] += encounter[0];
1754 if (weights->pix(i, j)[1] < encounter[0]) {
1755 weights->pix(i, j)[1] = encounter[0];
1756 im->pix(i, j)[0] = i - min[0];
1757 im->pix(i, j)[1] = j - min[1];
1758 im->pix(i, j)[2] = 0;
1761 } else
1762 assert(0);
1768 * Generate an depth image from a specified view.
1770 static const d2::image *depth(pt _pt, int n = -1) {
1771 assert ((unsigned int) n < d2::image_rw::count() || n < 0);
1773 if (n >= 0) {
1774 assert((int) floor(d2::align::of(n).scaled_height())
1775 == (int) floor(_pt.scaled_height()));
1776 assert((int) floor(d2::align::of(n).scaled_width())
1777 == (int) floor(_pt.scaled_width()));
1780 d2::image *im1 = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1781 (int) floor(_pt.scaled_width()), 3);
1783 d2::image *im2 = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1784 (int) floor(_pt.scaled_width()), 3);
1786 d2::image *im3 = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1787 (int) floor(_pt.scaled_width()), 3);
1789 _pt.view_angle(_pt.view_angle() * VIEW_ANGLE_MULTIPLIER);
1792 * Use adaptive subspace data.
1795 d2::image *weights = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1796 (int) floor(_pt.scaled_width()), 3);
1799 * Iterate through subspaces.
1802 space::iterate si(_pt.origin());
1804 view_recurse(6, im1, weights, si, _pt);
1806 delete weights;
1807 weights = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1808 (int) floor(_pt.scaled_width()), 3);
1810 view_recurse(7, im2, weights, si, _pt);
1813 * Normalize depths by weights
1816 if (normalize_weights)
1817 for (unsigned int i = 0; i < im1->height(); i++)
1818 for (unsigned int j = 0; j < im1->width(); j++)
1819 im1->pix(i, j)[0] /= weights->pix(i, j)[1];
1822 for (unsigned int i = 0; i < im1->height(); i++)
1823 for (unsigned int j = 0; j < im1->width(); j++) {
1826 * Handle interpolation.
1829 d2::point x;
1830 d2::point blx;
1831 d2::point res(im1->pix(i, j)[1], im1->pix(i, j)[2]);
1833 for (int d = 0; d < 2; d++) {
1835 if (im2->pix(i, j)[d] < res[d] / 2)
1836 x[d] = (ale_pos) (d?j:i) - res[d] / 2 - im2->pix(i, j)[d];
1837 else
1838 x[d] = (ale_pos) (d?j:i) + res[d] / 2 - im2->pix(i, j)[d];
1840 blx[d] = 1 - ((d?j:i) - x[d]) / res[d];
1843 ale_real depth_val = 0;
1844 ale_real depth_weight = 0;
1846 for (int ii = 0; ii < 2; ii++)
1847 for (int jj = 0; jj < 2; jj++) {
1848 d2::point p = x + d2::point(ii, jj) * res;
1849 if (im1->in_bounds(p)) {
1851 ale_real d = im1->get_bl(p)[0];
1853 if (isnan(d))
1854 continue;
1856 ale_real w = ((ii ? (1 - blx[0]) : blx[0]) * (jj ? (1 - blx[1]) : blx[1]));
1857 depth_weight += w;
1858 depth_val += w * d;
1862 ale_real depth = depth_val / depth_weight;
1865 * Handle exclusions and encounter thresholds
1868 point w = _pt.pw_scaled(point(i, j, depth));
1870 if (weights->pix(i, j)[0] < encounter_threshold || excluded(w)) {
1871 im3->pix(i, j) = d2::pixel::zero() / d2::pixel::zero();
1872 } else {
1873 im3->pix(i, j) = d2::pixel(1, 1, 1) * depth;
1877 delete weights;
1878 delete im1;
1879 delete im2;
1881 return im3;
1884 static const d2::image *depth(unsigned int n) {
1886 assert (n < d2::image_rw::count());
1888 pt _pt = align::projective(n);
1890 return depth(_pt, n);
1894 * Generate an image from a specified view.
1896 static const d2::image *view(pt _pt, int n = -1) {
1897 assert ((unsigned int) n < d2::image_rw::count() || n < 0);
1899 if (n >= 0) {
1900 assert((int) floor(d2::align::of(n).scaled_height())
1901 == (int) floor(_pt.scaled_height()));
1902 assert((int) floor(d2::align::of(n).scaled_width())
1903 == (int) floor(_pt.scaled_width()));
1906 const d2::image *depths = NULL;
1908 if (d3px_count > 0)
1909 depths = depth(_pt, n);
1911 d2::image *im = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1912 (int) floor(_pt.scaled_width()), 3);
1914 _pt.view_angle(_pt.view_angle() * VIEW_ANGLE_MULTIPLIER);
1917 * Use adaptive subspace data.
1920 d2::image *weights = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1921 (int) floor(_pt.scaled_width()), 3);
1924 * Iterate through subspaces.
1927 space::iterate si(_pt.origin());
1929 view_recurse(0, im, weights, si, _pt);
1931 for (unsigned int i = 0; i < im->height(); i++)
1932 for (unsigned int j = 0; j < im->width(); j++) {
1933 if (weights->pix(i, j).min_norm() < encounter_threshold
1934 || (d3px_count > 0 && isnan(depths->pix(i, j)[0]))) {
1935 im->pix(i, j) = d2::pixel::zero() / d2::pixel::zero();
1936 weights->pix(i, j) = d2::pixel::zero();
1937 } else if (normalize_weights)
1938 im->pix(i, j) /= weights->pix(i, j);
1941 if (d3px_count > 0)
1942 delete depths;
1944 delete weights;
1946 return im;
1949 static void tcem(double _tcem) {
1950 tc_multiplier = _tcem;
1953 static void oui(unsigned int _oui) {
1954 ou_iterations = _oui;
1957 static void pa(unsigned int _pa) {
1958 pairwise_ambiguity = _pa;
1961 static void pc(const char *_pc) {
1962 pairwise_comparisons = _pc;
1965 static void d3px(int _d3px_count, double *_d3px_parameters) {
1966 d3px_count = _d3px_count;
1967 d3px_parameters = _d3px_parameters;
1970 static void fx(double _fx) {
1971 falloff_exponent = _fx;
1974 static void nw() {
1975 normalize_weights = 1;
1978 static void no_nw() {
1979 normalize_weights = 0;
1982 static int excluded(point p) {
1983 for (int n = 0; n < d3px_count; n++) {
1984 double *region = d3px_parameters + (6 * n);
1985 if (p[0] >= region[0]
1986 && p[0] <= region[1]
1987 && p[1] >= region[2]
1988 && p[1] <= region[3]
1989 && p[2] >= region[4]
1990 && p[2] <= region[5])
1991 return 1;
1994 return 0;
1997 static const d2::image *view(unsigned int n) {
1999 assert (n < d2::image_rw::count());
2001 pt _pt = align::projective(n);
2003 return view(_pt, n);
2007 * Add specified control points to the model
2009 static void add_control_points() {
2012 typedef struct {point iw; point ip, is;} analytic;
2013 typedef std::multimap<ale_real,analytic> score_map;
2014 typedef std::pair<ale_real,analytic> score_map_element;
2016 static void solve_point_pair(unsigned int f1, unsigned int f2, int i, int j, ale_pos ii, ale_pos jj,
2017 ale_real *normalized_score, analytic *_a,
2018 const pt &_pt1, const pt &_pt2, const d2::image *if1, const d2::image *if2) {
2020 *normalized_score = 0;
2021 *normalized_score /= *normalized_score;
2022 assert(isnan(*normalized_score));
2025 * Create an orthonormal basis to
2026 * determine line intersection.
2029 point bp0 = _pt1.pw_scaled(point(i, j, 0));
2030 point bp1 = _pt1.pw_scaled(point(i, j, 10));
2031 point bp2 = _pt2.pw_scaled(point(ii, jj, 0));
2033 point b0 = (bp1 - bp0).normalize();
2034 point b1n = bp2 - bp0;
2035 point b1 = (b1n - b1n.dproduct(b0) * b0).normalize();
2036 point b2 = point(0, 0, 0).xproduct(b0, b1).normalize(); // Should already have norm=1
2039 * Select a fourth point to define a second line.
2042 point p3 = _pt2.pw_scaled(point(ii, jj, 10));
2045 * Representation in the new basis.
2048 d2::point nbp0 = d2::point(bp0.dproduct(b0), bp0.dproduct(b1));
2049 // d2::point nbp1 = d2::point(bp1.dproduct(b0), bp1.dproduct(b1));
2050 d2::point nbp2 = d2::point(bp2.dproduct(b0), bp2.dproduct(b1));
2051 d2::point np3 = d2::point( p3.dproduct(b0), p3.dproduct(b1));
2054 * Determine intersection of line
2055 * (nbp0, nbp1), which is parallel to
2056 * b0, with line (nbp2, np3).
2060 * XXX: a stronger check would be
2061 * better here, e.g., involving the
2062 * ratio (np3[0] - nbp2[0]) / (np3[1] -
2063 * nbp2[1]). Also, acceptance of these
2064 * cases is probably better than
2065 * rejection.
2068 if (np3[1] - nbp2[1] == 0)
2069 return;
2071 d2::point intersection = d2::point(nbp2[0]
2072 + (nbp0[1] - nbp2[1]) * (np3[0] - nbp2[0]) / (np3[1] - nbp2[1]),
2073 nbp0[1]);
2075 ale_pos b2_offset = b2.dproduct(bp0);
2078 * Map the intersection back to the world
2079 * basis.
2082 point iw = intersection[0] * b0 + intersection[1] * b1 + b2_offset * b2;
2085 * Reject intersection points behind a
2086 * camera.
2089 point icp = _pt1.wc(iw);
2090 point ics = _pt2.wc(iw);
2093 if (icp[2] >= 0 || ics[2] >= 0)
2094 return;
2097 * Reject clipping plane violations.
2100 if (iw[2] > front_clip
2101 || iw[2] < rear_clip)
2102 return;
2105 * Score the point.
2108 point ip = _pt1.wp_scaled(iw);
2110 point is = _pt2.wp_scaled(iw);
2112 _a->iw = iw;
2113 _a->ip = ip;
2114 _a->is = is;
2117 * Calculate score from color match. Assume for now
2118 * that the transformation can be approximated locally
2119 * with a translation.
2122 ale_pos score = 0;
2123 ale_pos divisor = 0;
2124 ale_pos l1_multiplier = 0.125;
2126 if (if1->in_bounds(ip.xy())
2127 && if2->in_bounds(is.xy())) {
2128 divisor += 1 - l1_multiplier;
2129 score += (1 - l1_multiplier)
2130 * (if1->get_bl(ip.xy()) - if2->get_bl(is.xy())).normsq();
2133 for (int iii = -1; iii <= 1; iii++)
2134 for (int jjj = -1; jjj <= 1; jjj++) {
2135 d2::point t(iii, jjj);
2137 if (!if1->in_bounds(ip.xy() + t)
2138 || !if2->in_bounds(is.xy() + t))
2139 continue;
2141 divisor += l1_multiplier;
2142 score += l1_multiplier
2143 * (if1->get_bl(ip.xy() + t) - if2->get_bl(is.xy() + t)).normsq();
2148 * Include third-camera contributions in the score.
2151 if (tc_multiplier != 0)
2152 for (unsigned int f = 0; f < d2::image_rw::count(); f++) {
2153 if (f == f1 || f == f2)
2154 continue;
2156 pt _ptf = align::projective(f);
2157 _ptf.scale(1 / _ptf.scale_2d() / pow(2, ceil(input_decimation_exponent)));
2159 point p = _ptf.wp_scaled(iw);
2161 if (!cl->reference[f]->in_bounds(p.xy())
2162 || !if2->in_bounds(ip.xy()))
2163 continue;
2165 divisor += tc_multiplier;
2166 score += tc_multiplier
2167 * (if1->get_bl(ip.xy()) - cl->reference[f]->get_bl(p.xy())).normsq();
2170 *normalized_score = score / divisor;
2174 * Generate a map from scores to 3D points for various depths at point (i, j) in f1.
2176 static score_map p2f_score_map(unsigned int i, unsigned int j, lod_image *if1, lod_image *if2,
2177 std::vector<pt> pt_outputs) {
2179 score_map result;
2182 * Map two depths to the secondary frame.
2185 point p1 = _pt2.wp_scaled(_pt1.pw_scaled(point(i, j, 1000)));
2186 point p2 = _pt2.wp_scaled(_pt1.pw_scaled(point(i, j, -1000)));
2189 * For cases where the mapped points define a
2190 * line and where points on the line fall
2191 * within the defined area of the frame,
2192 * determine the starting point for inspection.
2193 * In other cases, continue to the next pixel.
2196 ale_pos diff_i = p2[0] - p1[0];
2197 ale_pos diff_j = p2[1] - p1[1];
2198 ale_pos slope = diff_j / diff_i;
2200 if (isnan(slope)) {
2201 fprintf(stderr, "%d->%d (%d, %d) has undefined slope\n",
2202 f1, f2, i, j);
2203 return result;
2207 * Make absurdly large/small slopes either infinity, negative infinity, or zero.
2210 if (fabs(slope) > if2->width() * 100) {
2211 double zero = 0;
2212 double one = 1;
2213 double inf = one / zero;
2214 slope = inf;
2215 } else if (slope < 1 / (double) if2->height() / 100
2216 && slope > -1/ (double) if2->height() / 100) {
2217 slope = 0;
2220 ale_pos top_intersect = p1[1] - p1[0] * slope;
2221 ale_pos lef_intersect = p1[0] - p1[1] / slope;
2222 ale_pos rig_intersect = p1[0] - (p1[1] - if2->width() + 2) / slope;
2223 ale_pos sp_i, sp_j;
2225 if (slope == 0) {
2226 sp_i = lef_intersect;
2227 sp_j = 0;
2228 } else if (finite(slope) && top_intersect >= 0 && top_intersect < if2->width() - 1) {
2229 sp_i = 0;
2230 sp_j = top_intersect;
2231 } else if (slope > 0 && lef_intersect >= 0 && lef_intersect <= if2->height() - 1) {
2232 sp_i = lef_intersect;
2233 sp_j = 0;
2234 } else if (slope < 0 && rig_intersect >= 0 && rig_intersect <= if2->height() - 1) {
2235 sp_i = rig_intersect;
2236 sp_j = if2->width() - 2;
2237 } else {
2238 return result;
2242 * Determine increment values for examining
2243 * point, ensuring that incr_i is always
2244 * positive.
2247 ale_pos incr_i, incr_j;
2249 if (fabs(diff_i) > fabs(diff_j)) {
2250 incr_i = 1;
2251 incr_j = slope;
2252 } else if (slope > 0) {
2253 incr_i = 1 / slope;
2254 incr_j = 1;
2255 } else {
2256 incr_i = -1 / slope;
2257 incr_j = -1;
2261 * Examine regions near the projected line.
2264 for (ale_pos ii = sp_i, jj = sp_j;
2265 ii <= if2->height() - 1 && jj <= if2->width() - 1 && ii >= 0 && jj >= 0;
2266 ii += incr_i, jj += incr_j) {
2268 ale_real normalized_score;
2269 analytic _a;
2271 solve_point_pair(f1, f2, i, j, ii, jj, &normalized_score, &_a, _pt1, _pt2, if1, if2);
2274 * Reject points with undefined score.
2277 if (!finite(normalized_score))
2278 continue;
2281 * Add the point to the score map.
2284 result.insert(score_map_element(normalized_score, _a));
2288 * Iterate through regions and add new locations with sub-pixel resolution
2290 int accumulated_passes = 0;
2291 int max_acc_passes = pairwise_ambiguity;
2292 for (score_map::iterator smi = result.begin(); smi != result.end(); smi++) {
2293 point is = smi->second.is;
2295 if (accumulated_passes++ >= max_acc_passes)
2296 break;
2298 for (ale_pos epsilon = -0.5; epsilon <= 0.5; epsilon += 0.125) {
2300 if (fabs(epsilon) < 0.001)
2301 continue;
2303 ale_real normalized_score;
2304 analytic _a;
2306 solve_point_pair(f1, f2, i, j, is[0] + epsilon * incr_i, is[1] + epsilon * incr_j,
2307 &normalized_score, &_a, _pt1, _pt2, if1, if2);
2309 if (!finite(normalized_score))
2310 continue;
2312 result.insert(score_map_element(normalized_score, _a));
2316 return result;
2320 * Attempt to refine space around a point, to high and low resolutions
2321 * for two cameras, resulting in four resolutions in total.
2324 static void refine_space(point iw, pt _pt1, pt _pt2, std::vector<pt> pt_outputs) {
2326 space::traverse st = space::traverse::root();
2328 if (!st.includes(iw)) {
2329 assert(0);
2330 return;
2333 int camera_highres[2] = {0, 0};
2334 int camera_lowres[2] = {0, 0};
2336 std::vector<int> output_highres;
2337 std::vector<int> output_lowres;
2339 output_highres.resize(pt_outputs.size(), 0);
2340 output_lowres.resize(pt_outputs.size(), 0);
2343 * Loop until all resolutions of interest have been generated.
2346 for(;;) {
2348 point frame_min[2] = { point::posinf(), point::posinf() },
2349 frame_max[2] = { point::neginf(), point::neginf() };
2351 std::vector<point> output_max;
2352 std::vector<point> output_min;
2354 output_max.resize(pt_outputs.size(), point::posinf());
2355 output_min.resize(pt_outputs.size(), point::neginf());
2357 point p[2] = { st.get_min(), st.get_max() };
2360 * Cycle through the corner points bounding the
2361 * subspace to determine a bounding box.
2363 * NB: This code is not identical to
2364 * get_view_local_bb(), as it does not clip the
2365 * results.
2368 for (int ibit = 0; ibit < 2; ibit++)
2369 for (int jbit = 0; jbit < 2; jbit++)
2370 for (int kbit = 0; kbit < 2; kbit++) {
2371 point pp = point(p[ibit][0], p[jbit][1], p[kbit][2]);
2373 point ppp[2] = {_pt1.wp_scaled(pp), _pt2.wp_scaled(pp)};
2376 * Inputs
2379 for (int f = 0; f < 2; f++)
2380 for (int d = 0; d < 3; d++) {
2381 if (ppp[f][d] < frame_min[f][d] || isnan(ppp[f][d]))
2382 frame_min[f][d] = ppp[f][d];
2383 if (ppp[f][d] > frame_max[f][d] || isnan(ppp[f][d]))
2384 frame_max[f][d] = ppp[f][d];
2388 * Outputs
2391 for (unsigned int n = 0; n < pt_outputs.size(); n++) {
2393 point ppp_pt = pt_outputs[n].wp_scaled(pp);
2395 for (int d = 0; d < 3; d++) {
2396 if (ppp_pt[d] < output_min[n][d] || isnan(ppp_pt[d]))
2397 output_min[n][d] = ppp_pt[d];
2398 if (ppp_pt[d] > output_max[n][d] || isnan(ppp_pt[d]))
2399 output_max[n][d] = ppp_pt[d];
2405 * Generate any new desired spatial registers.
2409 * Inputs
2412 for (int f = 0; f < 2; f++) {
2415 * Low resolution
2418 if (frame_max[f][0] - frame_min[f][0] < 2
2419 && frame_max[f][1] - frame_min[f][1] < 2
2420 && camera_lowres[f] == 0) {
2421 spatial_info_map[st.get_node()];
2422 camera_lowres[f] = 1;
2426 * High resolution.
2429 if (frame_max[f][0] - frame_min[f][0] < 1
2430 && frame_max[f][1] - frame_min[f][1] < 1
2431 && camera_highres[f] == 0) {
2432 spatial_info_map[st.get_node()];
2433 camera_highres[f] = 1;
2438 * Outputs
2441 for (unsigned int n = 0; n < pt_outputs.size(); n++) {
2444 * Low resolution
2447 if (output_max[n][0] - output_min[n][0] < 2
2448 && output_max[n][1] - output_min[n][1] < 2
2449 && output_lowres[n] == 0) {
2450 spatial_info_map[st.get_node()];
2451 output_lowres[n] = 1;
2455 * High resolution.
2458 if (output_max[n][0] - output_min[n][0] < 1
2459 && output_max[n][1] - output_min[n][1] < 1
2460 && camera_highres[n] == 0) {
2461 spatial_info_map[st.get_node()];
2462 output_highres[n] = 1;
2467 * Check for completion
2470 if (camera_highres[0]
2471 && camera_highres[1]
2472 && camera_lowres[0]
2473 && camera_lowres[1]
2474 && !count(output_lowres.begin(), output_lowres.end(), 0)
2475 && !count(output_highres.begin(), output_highres.end(), 0));
2476 return;
2479 * Check precision before analyzing space further.
2482 if (st.precision_wall()) {
2483 fprintf(stderr, "\n\n*** Error: reached subspace precision wall ***\n\n");
2484 assert(0);
2485 return;
2488 if (st.positive().includes(iw)) {
2489 st = st.positive();
2490 total_tsteps++;
2491 } else if (st.negative().includes(iw)) {
2492 st = st.negative();
2493 total_tsteps++;
2494 } else {
2495 fprintf(stderr, "failed iw = (%f, %f, %f)\n",
2496 iw[0], iw[1], iw[2]);
2497 assert(0);
2503 * Analyze space in a manner dependent on the score map.
2506 static void analyze_space_from_map(unsigned int f1, unsigned int f2, unsigned int i,
2507 unsigned int j, pt _pt1, pt _pt2, score_map _sm, std::vector<pt> pt_outputs) {
2509 int accumulated_ambiguity = 0;
2510 int max_acc_amb = pairwise_ambiguity;
2512 for(score_map::iterator smi = _sm.begin(); smi != _sm.end(); smi++) {
2514 point iw = smi->second.iw;
2515 point ip = smi->second.ip;
2516 // point is = smi->second.is;
2518 if (accumulated_ambiguity++ >= max_acc_amb)
2519 break;
2521 total_ambiguity++;
2524 * Attempt to refine space around the intersection point.
2527 refine_space(iw, _pt1, _pt2, pt_outputs);
2531 static void refine_space_for_output(pt _pt, space::traverse st = space::traverse::root()) {
2532 if (!spatial_info_map.count(st.get_node())) {
2533 if (st.get_node()->positive)
2534 refine_space_for_output(_pt, st.positive());
2535 if (st.get_node()->negative)
2536 refine_space_for_output(_pt, st.negative());
2537 return;
2540 point bb[2];
2541 _pt.get_view_local_bb(st, bb);
2543 if (bb[0].xy().lengthtosq(bb[1].xy()) < 2)
2544 return;
2546 if (!_pt.scaled_in_bounds(bb[0]) || !_pt.scaled_in_bounds(bb[1]))
2547 return;
2549 spatial_info_map[st.positive().get_node()];
2550 spatial_info_map[st.negative().get_node()];
2552 refine_space_for_output(_pt, st.positive());
2553 refine_space_for_output(_pt, st.negative());
2556 static void refine_space_for_output(const char *d_out[], const char *v_out[],
2557 std::map<const char *, pt> *d3_depth_pt,
2558 std::map<const char *, pt> *d3_output_pt) {
2560 for (unsigned int f = 0; f < d2::image_rw::count(); f++)
2561 if (d_out[f] || v_out[f])
2562 refine_space_for_output(d3::align::projective(f));
2564 for (std::map<const char *, pt>::iterator i = d3_depth_pt->begin(); i != d3_depth_pt->end(); i++)
2565 refine_space_for_output(i->second);
2567 for (std::map<const char *, pt>::iterator i = d3_output_pt->begin(); i != d3_output_pt->end(); i++)
2568 refine_space_for_output(i->second);
2572 * Make pt list.
2574 static std::vector<pt> make_pt_list(const char *d_out[], const char *v_out[],
2575 std::map<const char *, pt> *d3_depth_pt,
2576 std::map<const char *, pt> *d3_output_pt) {
2578 std::vector<pt> result;
2580 for (unsigned int n = 0; n < d2::image_rw::count(); n++) {
2581 if (d_out[n] || v_out[n]) {
2582 result.push_back(align::projective(n));
2586 for (std::map<const char *, pt>::iterator i = d3_depth_pt->begin(); i != d3_depth_pt->end(); i++) {
2587 result.push_back(i->second);
2590 for (std::map<const char *, pt>::iterator i = d3_output_pt->begin(); i != d3_output_pt->end(); i++) {
2591 result.push_back(i->second);
2594 return result;
2599 * Initialize space and identify regions of interest for the adaptive
2600 * subspace model.
2602 static void make_space(const char *d_out[], const char *v_out[],
2603 std::map<const char *, pt> *d3_depth_pt,
2604 std::map<const char *, pt> *d3_output_pt) {
2606 fprintf(stderr, "Subdividing 3D space");
2608 std::vector<pt> pt_outputs = make_pt_list(d_out, v_out, d3_depth_pt, d3_output_pt);
2611 * Initialize root space.
2614 space::init_root();
2617 * Subdivide space to resolve intensity matches between pairs
2618 * of frames.
2621 for (unsigned int f1 = 0; f1 < d2::image_rw::count(); f1++) {
2623 if (!d_out[f1] && !v_out[f1] && !d3_depth_pt->size()
2624 && !d3_output_pt->size() && strcmp(pairwise_comparisons, "all"))
2625 continue;
2627 std::vector<const d2::image *> if1;
2629 if1.push_back(d2::image_rw::copy(f1, "3D reference image"));
2630 assert(if1.back());
2632 ale_pos decimation_index = input_decimation_exponent;
2633 while (decimation_index > 0
2634 && if1->height() > 2
2635 && if1->width() > 2) {
2637 if1.push_back(if1.back()->scale_by_half("3D, reduced LOD"));
2638 assert(if1.back());
2640 decimation_index -= 1;
2643 if (decimation_index > 0) {
2644 fprintf(stderr, "Error: --di argument is too large.\n");
2645 exit(1);
2649 for (unsigned int f2 = 0; f2 < d2::image_rw::count(); f2++) {
2651 if (f1 == f2)
2652 continue;
2654 std::vector<const d2::image *> if2;
2656 if2.push_back(d2::image_rw::copy(f2, "3D reference image"));
2657 assert(if2.back());
2659 ale_pos decimation_index = input_decimation_exponent;
2660 while (decimation_index > 0
2661 && if2->height() > 2
2662 && if2->width() > 2) {
2664 if2.push_back(if2.back()->scale_by_half("3D, reduced LOD"));
2665 assert(if2.back());
2667 decimation_index -= 1;
2670 if (decimation_index > 0) {
2671 fprintf(stderr, "Error: --di argument is too large.\n");
2672 exit(1);
2675 pt _pt1 = align::projective(f1);
2676 pt _pt2 = align::projective(f2);
2678 _pt1.scale(1 / _pt1.scale_2d() / pow(2, ceil(input_decimation_exponent)));
2679 _pt2.scale(1 / _pt2.scale_2d() / pow(2, ceil(input_decimation_exponent)));
2682 * Iterate over all points in the primary frame.
2685 for (unsigned int i = 0; i < if1->height(); i++)
2686 for (unsigned int j = 0; j < if1->width(); j++) {
2688 total_pixels++;
2691 * Generate a map from scores to 3D points for
2692 * various depths in f1.
2695 score_map _sm = p2f_score_map(i, j, if1, if2, pt_outputs);
2698 * Analyze space in a manner dependent on the score map.
2701 analyze_space_from_map(f1, f2, i, j, _pt1, _pt2, _sm, pt_outputs);
2703 delete if2;
2705 delete if1;
2709 * This is expensive.
2712 // refine_space_for_output(d_out, v_out, d3_depth_pt, d3_output_pt);
2714 fprintf(stderr, ".\n");
2719 * Update spatial information structures.
2721 * XXX: the name of this function is horribly misleading. There isn't
2722 * even a 'search depth' any longer, since there is no longer any
2723 * bounded DFS occurring.
2725 static void reduce_cost_to_search_depth(d2::exposure *exp_out, int inc_bit) {
2728 * Subspace model
2731 for (unsigned int i = 0; i < ou_iterations; i++)
2732 spatial_info_update();
2735 #if 0
2737 * Describe a scene to a renderer
2739 static void describe(render *r) {
2741 #endif
2744 #endif