Add clip plane checks to d3::scene subspace candidate selection code.
[Ale.git] / d3 / scene.h
blob4d454ea50acb3973a0ed16444a69e0d899ff9ae0
1 // Copyright 2003, 2004, 2005 David Hilvert <dhilvert@auricle.dyndns.org>,
2 // <dhilvert@ugcs.caltech.edu>
4 /* This file is part of the Anti-Lamenessing Engine.
6 The Anti-Lamenessing Engine is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 The Anti-Lamenessing Engine is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with the Anti-Lamenessing Engine; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * d3/scene.h: Representation of a 3D scene.
25 #ifndef __scene_h__
26 #define __scene_h__
28 #include "point.h"
31 * View angle multiplier.
33 * Setting this to a value larger than one can be useful for debugging.
36 #define VIEW_ANGLE_MULTIPLIER 1
38 class scene {
41 * Clipping planes
43 static ale_pos front_clip;
44 static ale_pos rear_clip;
47 * Decimation exponents for geometry
49 static int primary_decimation_upper;
50 static int input_decimation_lower;
51 static int output_decimation_preferred;
54 * Output clipping
56 static int output_clip;
59 * Model files
61 static const char *load_model_name;
62 static const char *save_model_name;
65 * Occupancy attenuation
68 static double occ_att;
71 * Normalization of output by weight
74 static int normalize_weights;
77 * Falloff exponent
80 static double falloff_exponent;
83 * Third-camera error multiplier
85 static double tc_multiplier;
88 * Occupancy update iterations
90 static unsigned int ou_iterations;
93 * Pairwise ambiguity
95 static unsigned int pairwise_ambiguity;
98 * Pairwise comparisons
100 static const char *pairwise_comparisons;
103 * 3D Post-exclusion
105 static int d3px_count;
106 static double *d3px_parameters;
109 * Nearness threshold
111 static const ale_real nearness;
114 * Encounter threshold for defined pixels.
116 static double encounter_threshold;
119 * Structure to hold input frame information at levels of
120 * detail between full detail and full decimation.
122 class lod_image {
123 unsigned int f;
124 unsigned int entries;
125 std::vector<const d2::image *> im;
126 std::vector<pt> transformation;
128 public:
130 * Constructor
132 lod_image(unsigned int _f) {
134 pt _pt;
136 f = _f;
137 im.push_back(d2::image_rw::copy(f, "3D reference image"));
138 assert(im.back());
139 entries = 1;
140 _pt = d3::align::projective(f);
141 _pt.scale(1 / _pt.scale_2d());
142 transformation.push_back(_pt);
144 while (im.back()->height() > 4
145 && im.back()->width() > 4) {
147 im.push_back(im.back()->scale_by_half("3D, reduced LOD"));
148 assert(im.back());
150 _pt.scale(1 / _pt.scale_2d() / pow(2, entries));
151 transformation.push_back(_pt);
153 entries += 1;
158 * Get the number of scales
160 unsigned int count() {
161 return entries;
165 * Get an image.
167 const d2::image *get_image(unsigned int i) {
168 assert(i >= 0);
169 assert(i < entries);
170 return im[i];
173 int in_bounds(d2::point p) {
174 return im[0]->in_bounds(p);
178 * Get a 'trilinear' color. We currently don't do interpolation
179 * between levels of detail; hence, it's discontinuous in tl_coord.
181 d2::pixel get_tl(d2::point p, ale_pos tl_coord) {
183 assert(in_bounds(p));
185 tl_coord = round(tl_coord);
187 if (tl_coord >= entries)
188 tl_coord = entries;
189 if (tl_coord < 0)
190 tl_coord = 0;
192 p = p / (ale_pos) pow(2, tl_coord);
194 unsigned int itlc = (unsigned int) tl_coord;
196 if (p[0] > im[itlc]->height() - 1)
197 p[0] = im[itlc]->height() - 1;
198 if (p[1] > im[itlc]->width() - 1)
199 p[1] = im[itlc]->width() - 1;
201 assert(p[0] >= 0);
202 assert(p[1] >= 0);
204 return im[itlc]->get_bl(p);
207 d2::pixel get_max_diff(d2::point p, ale_pos tl_coord) {
208 assert(in_bounds(p));
210 tl_coord = round(tl_coord);
212 if (tl_coord >= entries)
213 tl_coord = entries;
214 if (tl_coord < 0)
215 tl_coord = 0;
217 p = p / (ale_pos) pow(2, tl_coord);
219 unsigned int itlc = (unsigned int) tl_coord;
221 if (p[0] > im[itlc]->height() - 1)
222 p[0] = im[itlc]->height() - 1;
223 if (p[1] > im[itlc]->width() - 1)
224 p[1] = im[itlc]->width() - 1;
226 assert(p[0] >= 0);
227 assert(p[1] >= 0);
229 return im[itlc]->get_max_diff(p);
233 * Get the transformation
235 pt get_t(unsigned int i) {
236 assert(i >= 0);
237 assert(i < entries);
238 return transformation[i];
242 * Get the camera origin in world coordinates
244 point origin() {
245 return transformation[0].origin();
249 * Destructor
251 ~lod_image() {
252 for (unsigned int i = 0; i < entries; i++) {
253 delete im[i];
259 * Structure to hold weight information for reference images.
261 class ref_weights {
262 unsigned int f;
263 std::vector<d2::image *> weights;
264 pt transformation;
265 int tc_low;
266 int tc_high;
267 int initialized;
269 void set_image(d2::image *im, ale_real value) {
270 for (unsigned int i = 0; i < im->height(); i++)
271 for (unsigned int j = 0; i < im->width(); j++)
272 im->pix(i, j) = d2::pixel(value, value, value);
275 d2::image *make_image(ale_pos sf, ale_real init_value = 0) {
276 d2::image *result = new d2::image_ale_real(
277 (unsigned int) ceil(transformation.unscaled_height() * sf),
278 (unsigned int) ceil(transformation.unscaled_width() * sf), 3);
280 if (init_value != 0)
281 set_image(result, init_value);
283 return result;
286 public:
289 * Explicit weight subtree
291 struct subtree {
292 ale_real node_value;
293 subtree *children[2][2];
295 subtree(ale_real nv, subtree *a, subtree *b, subtree *c, subtree *d) {
296 node_value = nv;
297 children[0][0] = a;
298 children[0][1] = b;
299 children[1][0] = c;
300 children[1][1] = d;
303 ~subtree() {
304 for (int i = 0; i < 2; i++)
305 for (int j = 0; j < 2; j++)
306 delete children[i][j];
311 * Constructor
313 ref_weights(unsigned int _f) {
314 f = _f;
315 transformation = d3::align::projective(f);
316 initialized = 0;
320 * Check spatial bounds.
322 int in_spatial_bounds(d2::point p) {
323 if (p[0] < 0)
324 return 0;
325 if (p[1] < 0)
326 return 0;
327 if (p[0] > transformation.unscaled_height() - 1)
328 return 0;
329 if (p[1] > transformation.unscaled_width() - 1)
330 return 0;
331 if (p[2] >= 0)
332 return 0;
334 return 1;
337 int in_spatial_bounds(const space::traverse &t) {
338 d2::point p = transformation.wp_unscaled(t.get_centroid()).xy();
339 return in_spatial_bounds(p);
343 * Increase resolution to the given level.
345 void increase_resolution(int tc, unsigned int i, unsigned int j) {
346 d2::image *im = weights[tc - tc_low];
347 assert(im);
348 assert(i <= im->height() - 1);
349 assert(j <= im->width() - 1);
352 * Check for the cases known to have no lower level of detail.
355 if (im->pix(i, j)[0] == -1)
356 return;
358 if (tc == tc_high)
359 return;
361 increase_resolution(tc + 1, i / 2, j / 2);
364 * Load the lower-level image structure.
367 d2::image *iim = weights[tc + 1 - tc_low];
369 assert(iim);
370 assert(i / 2 <= iim->height() - 1);
371 assert(j / 2 <= iim->width() - 1);
374 * Check for the case where no lower level of detail is
375 * available.
378 if (iim->pix(i / 2, j / 2)[0] == -1)
379 return;
382 * Spread out the lower level of detail among (uninitialized)
383 * peer values.
386 for (unsigned int ii = (i / 2) * 2; ii < (i / 2) * 2 + 1; ii++)
387 for (unsigned int jj = (j / 2) * 2; jj < (j / 2) * 2 + 1; jj++) {
388 assert(ii <= im->height() - 1);
389 assert(jj <= im->width() - 1);
390 assert(im->pix(ii, jj)[0] == 0);
392 im->pix(ii, jj) = iim->pix(i / 2, j / 2);
396 * Set the lower level of detail to point here.
399 iim->pix(i / 2, j / 2)[0] = -1;
403 * Add weights to positive higher-resolution pixels where
404 * found when their current values match the given subtree
405 * values; set negative pixels to zero and return 0 if no
406 * positive higher-resolution pixels are found.
408 int add_partial(int tc, unsigned int i, unsigned int j, ale_real weight, subtree *st) {
409 d2::image *im = weights[tc - tc_low];
410 assert(im);
411 assert(i <= im->height() - 1);
412 assert(j <= im->width() - 1);
415 * Check for positive values.
418 if (im->pix(i, j)[0] > 0) {
419 if (st && st->node_value == im->pix(i, j)[0])
420 im->pix(i, j)[0] += weight;
421 return 1;
425 * Handle the case where there are no higher levels of detail.
428 if (tc == tc_low) {
429 assert(im->pix(i, j)[0] == 0);
430 return 0;
434 * Handle the case where higher levels of detail are available.
437 int success[2][2];
439 for (int ii = 0; ii < 2; ii++)
440 for (int jj = 0; jj < 2; jj++)
441 success[ii][jj] = add_partial(tc - 1, i * 2 + ii, j * 2 + jj, weight,
442 st ? st->children[ii][jj] : NULL);
444 if (!success[0][0]
445 && !success[0][1]
446 && !success[1][0]
447 && !success[1][1]) {
448 im->pix(i, j)[0] = 0;
449 return 0;
452 d2::image *iim = weights[tc - 1 - tc_low];
453 assert(iim);
455 for (int ii = 0; ii < 2; ii++)
456 for (int jj = 0; jj < 2; jj++)
457 if (success[ii][jj] == 0) {
458 assert(i * 2 + ii < iim->height());
459 assert(j * 2 + jj < iim->width());
461 im->pix(i * 2 + ii, j * 2 + jj)[0] = weight;
464 im->pix(i, j)[0] = -1;
468 * Add weight.
470 void add_weight(int tc, unsigned int i, unsigned int j, ale_real weight, subtree *st) {
471 d2::image *im = weights[tc - tc_low];
472 assert(im);
473 assert(i <= im->height() - 1);
474 assert(j <= im->width() - 1);
477 * Increase resolution, if necessary
480 increase_resolution(tc, i, j);
483 * Attempt to add the weight at levels of detail
484 * where weight is defined.
487 if (add_partial(tc, i, j, weight, st))
488 return;
491 * If no weights are defined at any level of detail,
492 * then set the weight here.
495 im->pix(i, j)[0] = weight;
498 void add_weight(int tc, d2::point p, ale_real weight, subtree *st) {
500 p *= pow(2, -tc);
502 unsigned int i = (unsigned int) floor(p[0]);
503 unsigned int j = (unsigned int) floor(p[1]);
505 add_weight(tc, i, j, weight, st);
508 void add_weight(const space::traverse &t, ale_real weight, subtree *st) {
510 if (weight == 0)
511 return;
513 ale_pos tc = transformation.trilinear_coordinate(t);
514 d2::point p = transformation.wp_unscaled(t.get_centroid()).xy();
515 assert(in_spatial_bounds(p));
517 tc = round(tc);
520 * Establish a reasonable (?) upper bound on resolution.
523 if (tc < input_decimation_lower) {
524 weight /= pow(4, (input_decimation_lower - tc));
525 tc = input_decimation_lower;
529 * Initialize, if necessary.
532 if (!initialized) {
533 tc_low = tc_high = (int) tc;
535 ale_real sf = pow(2, -tc);
537 weights[0] = make_image(sf);
541 * Check resolution bounds
544 assert (tc_low <= tc_high);
547 * Generate additional levels of detail, if necessary.
550 while (tc < tc_low) {
551 tc_low--;
553 ale_real sf = pow(2, -tc_low);
555 weights.insert(weights.begin(), make_image(sf));
558 while (tc > tc_high) {
559 tc_high++;
561 ale_real sf = pow(2, -tc_high);
563 weights.push_back(make_image(sf, -1));
566 add_weight((int) tc, p, weight, st);
570 * Get weight
572 ale_real get_weight(int tc, unsigned int i, unsigned int j) {
573 d2::image *im = weights[tc - tc_low];
574 assert(im);
575 assert(i < im->height());
576 assert(j < im->width());
578 if (im->pix(i, j)[0] == -1) {
579 return (get_weight(tc - 1, i * 2 + 0, j * 2 + 0)
580 + get_weight(tc - 1, i * 2 + 1, j * 2 + 0)
581 + get_weight(tc - 1, i * 2 + 1, j * 2 + 1)
582 + get_weight(tc - 1, i * 2 + 0, j * 2 + 1)) / 4;
585 if (im->pix(i, j)[0] == 0) {
586 return get_weight(tc + 1, i / 2, j / 2);
589 return im->pix(i, j)[0];
592 ale_real get_weight(int tc, d2::point p) {
594 p *= pow(2, -tc);
596 unsigned int i = (unsigned int) floor(p[0]);
597 unsigned int j = (unsigned int) floor(p[1]);
599 return get_weight(tc, i, j);
602 ale_real get_weight(const space::traverse &t) {
603 ale_pos tc = transformation.trilinear_coordinate(t);
604 d2::point p = transformation.wp_unscaled(t.get_centroid()).xy();
605 assert(in_spatial_bounds(p));
607 if (!initialized)
608 return 0;
610 tc = round(tc);
612 if (tc < tc_low) {
613 tc = tc_low;
616 if (tc <= tc_high) {
617 return get_weight((int) tc, p);
620 assert(tc > tc_high);
622 int multiplier = (int) pow(2, (tc - tc_high));
623 ale_real result = 0;
625 for (int i = 0; i < multiplier; i++)
626 for (int j = 0; j < multiplier; j++) {
627 result += get_weight(tc_high,
628 (unsigned int) floor(p[0]) * multiplier + i,
629 (unsigned int) floor(p[1]) * multiplier + j);
632 return result;
636 * Check whether a subtree is simple.
638 int is_simple(subtree *s) {
639 assert (s);
641 if (s->node_value == -1
642 && s->children[0][0] == NULL
643 && s->children[0][1] == NULL
644 && s->children[1][0] == NULL
645 && s->children[1][1] == NULL)
646 return 1;
648 return 0;
652 * Get a weight subtree.
654 subtree *get_subtree(int tc, unsigned int i, unsigned int j) {
657 * tc > tc_high is handled recursively.
660 if (tc > tc_high) {
661 subtree *result = new subtree(-1,
662 get_subtree(tc - 1, i * 2 + 0, j * 2 + 0),
663 get_subtree(tc - 1, i * 2 + 0, j * 2 + 1),
664 get_subtree(tc - 1, i * 2 + 1, j * 2 + 0),
665 get_subtree(tc - 1, i * 2 + 1, j * 2 + 1));
667 if (is_simple(result)) {
668 delete result;
669 return NULL;
672 return result;
675 assert(tc >= tc_low);
676 assert(weights[tc - tc_low]);
678 d2::image *im = weights[tc - tc_low];
681 * Rectangular images will, in general, have
682 * out-of-bounds tree sections. Handle this case.
685 if (i >= im->height())
686 return NULL;
687 if (j >= im->width())
688 return NULL;
691 * -1 weights are handled recursively
694 if (im->pix(i, j)[0] == -1) {
695 subtree *result = new subtree(-1,
696 get_subtree(tc - 1, i * 2 + 0, j * 2 + 0),
697 get_subtree(tc - 1, i * 2 + 0, j * 2 + 1),
698 get_subtree(tc - 1, i * 2 + 1, j * 2 + 0),
699 get_subtree(tc - 1, i * 2 + 1, j * 2 + 1));
701 if (is_simple(result)) {
702 im->pix(i, j)[0] = 0;
703 delete result;
704 return NULL;
707 return result;
711 * Zero weights have NULL subtrees.
714 if (im->pix(i, j)[0] == 0)
715 return NULL;
718 * Handle the remaining case.
721 return new subtree(im->pix(i, j)[0], NULL, NULL, NULL, NULL);
724 subtree *get_subtree(int tc, d2::point p) {
725 p *= pow(2, -tc);
727 unsigned int i = (unsigned int) floor(p[0]);
728 unsigned int j = (unsigned int) floor(p[1]);
730 return get_subtree(tc, i, j);
733 subtree *get_subtree(const space::traverse &t) {
734 ale_pos tc = transformation.trilinear_coordinate(t);
735 d2::point p = transformation.wp_unscaled(t.get_centroid()).xy();
736 assert(in_spatial_bounds(p));
738 if (!initialized)
739 return NULL;
741 if (tc < input_decimation_lower)
742 tc = input_decimation_lower;
744 tc = round(tc);
746 if (tc < tc_low)
747 return NULL;
749 return get_subtree((int) tc, p);
753 * Destructor
755 ~ref_weights() {
756 for (unsigned int i = 0; i < weights.size(); i++) {
757 delete weights[i];
763 * Resolution check.
765 static int resolution_ok(pt transformation, ale_pos tc) {
767 if (pow(2, tc) > transformation.unscaled_height()
768 || pow(2, tc) > transformation.unscaled_width())
769 return 0;
771 if (tc < input_decimation_lower)
772 return 0;
774 return 1;
778 * Structure to hold input frame information at all levels of detail.
780 class lod_images {
783 * All images.
786 std::vector<lod_image *> images;
788 public:
790 lod_images() {
791 images.resize(d2::image_rw::count(), NULL);
794 void open(unsigned int f) {
795 assert (images[f] == NULL);
797 if (images[f] == NULL)
798 images[f] = new lod_image(f);
801 void open_all() {
802 for (unsigned int f = 0; f < d2::image_rw::count(); f++)
803 open(f);
806 lod_image *get(unsigned int f) {
807 assert (images[f] != NULL);
808 return images[f];
811 void close(unsigned int f) {
812 assert (images[f] != NULL);
813 delete images[f];
814 images[f] = NULL;
817 void close_all() {
818 for (unsigned int f = 0; f < d2::image_rw::count(); f++)
819 close(f);
822 ~lod_images() {
823 close_all();
828 * All levels-of-detail
831 static struct lod_images *al;
834 * Data structure for storing best encountered subspace candidates.
836 class candidates {
837 std::vector<std::vector<std::pair<ale_pos, ale_real> > > levels;
838 int image_index;
839 unsigned int height;
840 unsigned int width;
843 * Point p is in world coordinates.
845 void generate_subspace(point iw, ale_pos diagonal) {
847 space::traverse st = space::traverse::root();
849 if (!st.includes(iw)) {
850 assert(0);
851 return;
854 int highres = 0;
855 int lowres = 0;
858 * Loop until resolutions of interest have been generated.
861 for(;;) {
863 ale_pos current_diagonal = (st.get_max() - st.get_min()).norm();
865 assert(!isnan(current_diagonal));
868 * Generate any new desired spatial registers.
872 * Inputs
875 for (int f = 0; f < 2; f++) {
878 * Low resolution
881 if (current_diagonal < 2 * diagonal
882 && lowres == 0) {
883 spatial_info_map[st.get_node()];
884 lowres = 1;
888 * High resolution.
891 if (current_diagonal < diagonal
892 && highres == 0) {
893 spatial_info_map[st.get_node()];
894 highres = 1;
899 * Check for completion
902 if (highres && lowres)
903 return;
906 * Check precision before analyzing space further.
909 if (st.precision_wall()) {
910 fprintf(stderr, "\n\n*** Error: reached subspace precision wall ***\n\n");
911 assert(0);
912 return;
915 if (st.positive().includes(iw)) {
916 st = st.positive();
917 total_tsteps++;
918 } else if (st.negative().includes(iw)) {
919 st = st.negative();
920 total_tsteps++;
921 } else {
922 fprintf(stderr, "failed iw = (%f, %f, %f)\n",
923 iw[0], iw[1], iw[2]);
924 assert(0);
929 public:
930 candidates(int f) {
932 image_index = f;
933 height = (unsigned int) al->get(f)->get_t(0).unscaled_height();
934 width = (unsigned int) al->get(f)->get_t(0).unscaled_width();
937 * Is this necessary?
940 levels.resize(primary_decimation_upper - input_decimation_lower + 1);
941 for (int l = input_decimation_lower; l <= primary_decimation_upper; l++) {
942 for (unsigned int i = 0; i < (unsigned int) floor(height / pow(2, l)); i++)
943 for (unsigned int j = 0; j < (unsigned int) floor(width / pow(2, l)); j++)
944 for (unsigned int k = 0; k < pairwise_ambiguity; k++) {
945 levels[l][i * width * pairwise_ambiguity + j * pairwise_ambiguity + k] =
946 std::pair<ale_pos, ale_real>(0, 0);
952 * Point p is expected to be in local projective coordinates.
955 void add_candidate(point p, int tc, ale_real score) {
956 assert(tc <= primary_decimation_upper);
957 assert(tc >= input_decimation_lower);
958 assert(p[2] < 0);
959 assert(score >= 0);
961 int i = (unsigned int) floor(p[0] / pow(2, tc));
962 int j = (unsigned int) floor(p[1] / pow(2, tc));
964 assert(i < floor(height / pow(2, tc)));
965 assert(j < floor(width / pow(2, tc)));
967 for (unsigned int k = 0; k < pairwise_ambiguity; k++) {
968 std::pair<ale_pos, ale_real> *pk =
969 &(levels[tc][i * width * pairwise_ambiguity + j * pairwise_ambiguity]);
971 if (pk->first != 0 && score >= pk->second)
972 continue;
974 ale_pos tp = pk->first;
975 ale_real tr = pk->second;
977 pk->first = p[2];
978 pk->second = score;
980 p[2] = tp;
981 score = tr;
986 * Generate subspaces for candidates.
989 void generate_subspaces() {
990 for (int l = input_decimation_lower; l <= primary_decimation_upper; l++) {
991 for (unsigned int i = 0; i < (unsigned int) floor(height / pow(2, l)); i++)
992 for (unsigned int j = 0; j < (unsigned int) floor(width / pow(2, l)); j++)
993 for (unsigned int k = 0; k < pairwise_ambiguity; k++) {
994 std::pair<ale_pos, ale_real> *pk =
995 &(levels[l - input_decimation_lower]
996 [i * width * pairwise_ambiguity + j * pairwise_ambiguity]);
998 if (pk->first == 0)
999 continue;
1001 ale_pos si = i * pow(2, l) + ((l > 0) ? pow(2, l - 1) : 0);
1002 ale_pos sj = j * pow(2, l) + ((l > 0) ? pow(2, l - 1) : 0);
1004 point p = al->get(image_index)->get_t(0).pw_unscaled(point(si, sj, pk->first));
1006 generate_subspace(p,
1007 al->get(image_index)->get_t(0).diagonal_distance_3d(pk->first, l));
1014 * List for calculating weighted median.
1016 class wml {
1017 ale_real *data;
1018 unsigned int size;
1019 unsigned int used;
1021 ale_real &_w(unsigned int i) {
1022 assert(i <= used);
1023 return data[i * 2];
1026 ale_real &_d(unsigned int i) {
1027 assert(i <= used);
1028 return data[i * 2 + 1];
1031 void increase_capacity() {
1033 if (size > 0)
1034 size *= 2;
1035 else
1036 size = 1;
1038 data = (ale_real *) realloc(data, sizeof(ale_real) * 2 * (size * 1));
1040 assert(data);
1041 assert (size > used);
1043 if (!data) {
1044 fprintf(stderr, "Unable to allocate %d bytes of memory\n",
1045 sizeof(ale_real) * 2 * (size * 1));
1046 exit(1);
1050 void insert_weight(unsigned int i, ale_real v, ale_real w) {
1051 assert(used < size);
1052 assert(used >= i);
1053 for (unsigned int j = used; j > i; j--) {
1054 _w(j) = _w(j - 1);
1055 _d(j) = _d(j - 1);
1058 _w(i) = w;
1059 _d(i) = v;
1061 used++;
1064 public:
1066 unsigned int get_size() {
1067 return size;
1070 unsigned int get_used() {
1071 return used;
1074 void print_info() {
1075 fprintf(stderr, "[st %p sz %u el", this, size);
1076 for (unsigned int i = 0; i < used; i++)
1077 fprintf(stderr, " (%f, %f)", _d(i), _w(i));
1078 fprintf(stderr, "]\n");
1081 void clear() {
1082 used = 0;
1085 void insert_weight(ale_real v, ale_real w) {
1086 for (unsigned int i = 0; i < used; i++) {
1087 if (_d(i) == v) {
1088 _w(i) += w;
1089 return;
1091 if (_d(i) > v) {
1092 if (used == size)
1093 increase_capacity();
1094 insert_weight(i, v, w);
1095 return;
1098 if (used == size)
1099 increase_capacity();
1100 insert_weight(used, v, w);
1104 * Finds the median at half-weight, or between half-weight
1105 * and zero-weight, depending on the attenuation value.
1108 ale_real find_median(double attenuation = 0) {
1110 assert(attenuation >= 0);
1111 assert(attenuation <= 1);
1113 ale_real zero1 = 0;
1114 ale_real zero2 = 0;
1115 ale_real undefined = zero1 / zero2;
1117 ale_accum weight_sum = 0;
1119 for (unsigned int i = 0; i < used; i++)
1120 weight_sum += _w(i);
1122 // if (weight_sum == 0)
1123 // return undefined;
1125 if (used == 0 || used == 1)
1126 return undefined;
1128 if (weight_sum == 0) {
1129 ale_accum data_sum = 0;
1130 for (unsigned int i = 0; i < used; i++)
1131 data_sum += _d(i);
1132 return data_sum / used;
1136 ale_accum midpoint = weight_sum * (0.5 - 0.5 * attenuation);
1138 ale_accum weight_sum_2 = 0;
1140 for (unsigned int i = 0; i < used && weight_sum_2 < midpoint; i++) {
1141 weight_sum_2 += _w(i);
1143 if (weight_sum_2 > midpoint) {
1144 return _d(i);
1145 } else if (weight_sum_2 == midpoint) {
1146 assert (i + 1 < used);
1147 return (_d(i) + _d(i + 1)) / 2;
1151 return undefined;
1154 wml(int initial_size = 0) {
1156 // if (initial_size == 0) {
1157 // initial_size = (int) (d2::image_rw::count() * 1.5);
1158 // }
1160 size = initial_size;
1161 used = 0;
1163 if (size > 0) {
1164 data = (ale_real *) malloc(size * sizeof(ale_real) * 2);
1165 assert(data);
1166 } else {
1167 data = NULL;
1172 * copy constructor. This is required to avoid undesired frees.
1175 wml(const wml &w) {
1176 size = w.size;
1177 used = w.used;
1178 data = (ale_real *) malloc(size * sizeof(ale_real) * 2);
1179 assert(data);
1181 memcpy(data, w.data, size * sizeof(ale_real) * 2);
1184 ~wml() {
1185 free(data);
1190 * Class for information regarding spatial regions of interest.
1192 * This class is configured for convenience in cases where sampling is
1193 * performed using an approximation of the fine:box:1,triangle:2 chain.
1194 * In this case, the *_1 variables would store the fine data and the
1195 * *_2 variables would store the coarse data. Other uses are also
1196 * possible.
1199 class spatial_info {
1201 * Map channel value --> weight.
1203 wml color_weights_1[3];
1204 wml color_weights_2[3];
1207 * Current color.
1209 d2::pixel color;
1212 * Map occupancy value --> weight.
1214 wml occupancy_weights_1;
1215 wml occupancy_weights_2;
1218 * Current occupancy value.
1220 ale_real occupancy;
1223 * pocc/socc density
1226 unsigned int pocc_density;
1227 unsigned int socc_density;
1230 * Insert a weight into a list.
1232 void insert_weight(wml *m, ale_real v, ale_real w) {
1233 m->insert_weight(v, w);
1237 * Find the median of a weighted list. Uses NaN for undefined.
1239 ale_real find_median(wml *m, double attenuation = 0) {
1240 return m->find_median(attenuation);
1243 public:
1245 * Constructor.
1247 spatial_info() {
1248 color = d2::pixel::zero();
1249 occupancy = 0;
1250 pocc_density = 0;
1251 socc_density = 0;
1255 * Accumulate color; primary data set.
1257 void accumulate_color_1(int f, d2::pixel color, d2::pixel weight) {
1258 for (int k = 0; k < 3; k++)
1259 insert_weight(&color_weights_1[k], color[k], weight[k]);
1263 * Accumulate color; secondary data set.
1265 void accumulate_color_2(d2::pixel color, d2::pixel weight) {
1266 for (int k = 0; k < 3; k++)
1267 insert_weight(&color_weights_2[k], color[k], weight[k]);
1271 * Accumulate occupancy; primary data set.
1273 void accumulate_occupancy_1(int f, ale_real occupancy, ale_real weight) {
1274 insert_weight(&occupancy_weights_1, occupancy, weight);
1278 * Accumulate occupancy; secondary data set.
1280 void accumulate_occupancy_2(ale_real occupancy, ale_real weight) {
1281 insert_weight(&occupancy_weights_2, occupancy, weight);
1283 if (occupancy == 0 || occupancy_weights_2.get_size() < 96)
1284 return;
1286 // fprintf(stderr, "%p updated socc with: %f %f\n", this, occupancy, weight);
1287 // occupancy_weights_2.print_info();
1291 * Update color (and clear accumulation structures).
1293 void update_color() {
1294 for (int d = 0; d < 3; d++) {
1295 ale_real c = find_median(&color_weights_1[d]);
1296 if (isnan(c))
1297 c = find_median(&color_weights_2[d]);
1298 if (isnan(c))
1299 c = 0;
1301 color[d] = c;
1303 color_weights_1[d].clear();
1304 color_weights_2[d].clear();
1309 * Update occupancy (and clear accumulation structures).
1311 void update_occupancy() {
1312 ale_real o = find_median(&occupancy_weights_1, occ_att);
1313 if (isnan(o))
1314 o = find_median(&occupancy_weights_2, occ_att);
1315 if (isnan(o))
1316 o = 0;
1318 occupancy = o;
1320 pocc_density = occupancy_weights_1.get_used();
1321 socc_density = occupancy_weights_2.get_used();
1323 occupancy_weights_1.clear();
1324 occupancy_weights_2.clear();
1329 * Get current color.
1331 d2::pixel get_color() {
1332 return color;
1336 * Get current occupancy.
1338 ale_real get_occupancy() {
1339 return occupancy;
1343 * Get primary color density.
1346 unsigned int get_pocc_density() {
1347 return pocc_density;
1350 unsigned int get_socc_density() {
1351 return socc_density;
1356 * Map spatial regions of interest to spatial info structures. XXX:
1357 * This may get very poor cache behavior in comparison with, say, an
1358 * array. Unfortunately, there is no immediately obvious array
1359 * representation. If some kind of array representation were adopted,
1360 * it would probably cluster regions of similar depth from the
1361 * perspective of the typical camera. In particular, for a
1362 * stereoscopic view, depth ordering for two random points tends to be
1363 * similar between cameras, I think. Unfortunately, it is never
1364 * identical for all points (unless cameras are co-located). One
1365 * possible approach would be to order based on, say, camera 0's idea
1366 * of depth.
1369 typedef std::map<struct space::node *, spatial_info> spatial_info_map_t;
1371 static spatial_info_map_t spatial_info_map;
1373 public:
1376 * Debugging variables.
1379 static unsigned long total_ambiguity;
1380 static unsigned long total_pixels;
1381 static unsigned long total_divisions;
1382 static unsigned long total_tsteps;
1385 * Member functions
1388 static void et(double et_parameter) {
1389 encounter_threshold = et_parameter;
1392 static void load_model(const char *name) {
1393 load_model_name = name;
1396 static void save_model(const char *name) {
1397 save_model_name = name;
1400 static void fc(ale_pos fc) {
1401 front_clip = fc;
1404 static void di_upper(ale_pos _dgi) {
1405 primary_decimation_upper = (int) round(_dgi);
1408 static void do_try(ale_pos _dgo) {
1409 output_decimation_preferred = (int) round(_dgo);
1412 static void di_lower(ale_pos _idiv) {
1413 input_decimation_lower = (int) round(_idiv);
1416 static void oc() {
1417 output_clip = 1;
1420 static void no_oc() {
1421 output_clip = 0;
1424 static void rc(ale_pos rc) {
1425 rear_clip = rc;
1429 * Initialize 3D scene from 2D scene, using 2D and 3D alignment
1430 * information.
1432 static void init_from_d2() {
1435 * Rear clip value of 0 is converted to infinity.
1438 if (rear_clip == 0) {
1439 ale_pos one = +1;
1440 ale_pos zero = +0;
1442 rear_clip = one / zero;
1443 assert(isinf(rear_clip) == +1);
1447 * Scale and translate clipping plane depths.
1450 ale_pos cp_scalar = d3::align::projective(0).wc(point(0, 0, 0))[2];
1452 front_clip = front_clip * cp_scalar - cp_scalar;
1453 rear_clip = rear_clip * cp_scalar - cp_scalar;
1457 * Allocate image structures.
1460 al = new lod_images;
1462 if (tc_multiplier != 0) {
1463 al->open_all();
1468 * Perform spatial_info updating on a given subspace, for given
1469 * parameters.
1471 static void subspace_info_update(space::iterate si, int f, ref_weights *weights) {
1472 while(!si.done()) {
1474 space::traverse st = si.get();
1477 * Skip spaces with no color information.
1479 * XXX: This could be more efficient, perhaps.
1482 if (spatial_info_map.count(st.get_node()) == 0) {
1483 si.next();
1484 continue;
1488 * Get in-bounds centroid, if one exists.
1491 point p = al->get(f)->get_t(0).centroid(st);
1493 if (!p.defined()) {
1494 si.next();
1495 continue;
1499 * Get information on the subspace.
1502 spatial_info *sn = &spatial_info_map[st.get_node()];
1503 d2::pixel color = sn->get_color();
1504 ale_real occupancy = sn->get_occupancy();
1507 * Store current weight so we can later check for
1508 * modification by higher-resolution subspaces.
1511 ref_weights::subtree *tree = weights->get_subtree(st);
1514 * Check for higher resolution subspaces, and
1515 * update the space iterator.
1518 if (st.get_node()->positive
1519 || st.get_node()->negative) {
1522 * Cleave space for the higher-resolution pass,
1523 * skipping the current space, since we will
1524 * process that later.
1527 space::iterate cleaved_space = si.cleave();
1529 cleaved_space.next();
1531 subspace_info_update(cleaved_space, f, weights);
1533 } else {
1534 si.next();
1538 * Add new data on the subspace and update weights.
1541 ale_pos tc = al->get(f)->get_t(0).trilinear_coordinate(st);
1542 d2::pixel pcolor = al->get(f)->get_tl(p.xy(), tc);
1543 d2::pixel colordiff = (color - pcolor) * (ale_real) 256;
1545 if (falloff_exponent != 0) {
1546 d2::pixel max_diff = al->get(f)->get_max_diff(p.xy(), tc) * (ale_real) 256;
1548 for (int k = 0; k < 3; k++)
1549 if (max_diff[k] > 1)
1550 colordiff[k] /= pow(max_diff[k], falloff_exponent);
1554 * Determine the probability of
1555 * encounter, divided by the occupancy.
1558 d2::pixel encounter = d2::pixel(1, 1, 1) * (1 - weights->get_weight(st));
1561 * Update weights
1564 weights->add_weight(st, (encounter * occupancy)[0], tree);
1567 * Delete the subtree, if necessary.
1570 delete tree;
1573 * Check for cases in which the subspace should not be
1574 * updated.
1577 if (!resolution_ok(al->get(f)->get_t(0), tc))
1578 return;
1581 * Update subspace.
1584 sn->accumulate_color_1(f, pcolor, encounter);
1585 d2::pixel channel_occ = pexp(-colordiff * colordiff);
1587 ale_accum occ = channel_occ[0];
1589 for (int k = 1; k < 3; k++)
1590 if (channel_occ[k] < occ)
1591 occ = channel_occ[k];
1593 sn->accumulate_occupancy_1(f, occ, encounter[0]);
1599 * Run a single iteration of the spatial_info update cycle.
1601 static void spatial_info_update() {
1603 * Iterate through each frame.
1605 for (unsigned int f = 0; f < d2::image_rw::count(); f++) {
1608 * Open the frame and transformation.
1611 if (tc_multiplier == 0)
1612 al->open(f);
1615 * Allocate weights data structure for storing encounter
1616 * probabilities.
1619 ref_weights *weights = new ref_weights(f);
1622 * Call subspace_info_update for the root space.
1625 subspace_info_update(space::iterate(al->get(f)->origin()), f, weights);
1628 * Free weights.
1631 delete weights;
1634 * Close the frame and transformation.
1637 if (tc_multiplier == 0)
1638 al->close(f);
1642 * Update all spatial_info structures.
1644 for (spatial_info_map_t::iterator i = spatial_info_map.begin(); i != spatial_info_map.end(); i++) {
1645 i->second.update_color();
1646 i->second.update_occupancy();
1648 // d2::pixel color = i->second.get_color();
1650 // fprintf(stderr, "space p=%p updated to c=[%f %f %f] o=%f\n",
1651 // i->first, color[0], color[1], color[2],
1652 // i->second.get_occupancy());
1657 * Support function for view() and depth().
1660 static const void view_recurse(int type, d2::image *im, d2::image *weights, space::iterate si, pt _pt) {
1661 while (!si.done()) {
1662 space::traverse st = si.get();
1665 * XXX: This could be more efficient, perhaps.
1668 if (spatial_info_map.count(st.get_node()) == 0) {
1669 si.next();
1670 continue;
1673 spatial_info sn = spatial_info_map[st.get_node()];
1676 * Get information on the subspace.
1679 d2::pixel color = sn.get_color();
1680 // d2::pixel color = d2::pixel(1, 1, 1) * (double) (((unsigned int) (st.get_node()) / sizeof(space)) % 65535);
1681 ale_real occupancy = sn.get_occupancy();
1684 * Determine the view-local bounding box for the
1685 * subspace.
1688 point bb[2];
1690 _pt.get_view_local_bb(st, bb);
1692 point min = bb[0];
1693 point max = bb[1];
1696 * Data structure to check modification of weights by
1697 * higher-resolution subspaces.
1700 std::queue<d2::pixel> weight_queue;
1703 * Check for higher resolution subspaces, and
1704 * update the space iterator.
1707 if (st.get_node()->positive
1708 || st.get_node()->negative) {
1711 * Store information about current weights,
1712 * so we will know which areas have been
1713 * covered by higher-resolution subspaces.
1716 for (int i = (int) ceil(min[0]); i <= (int) floor(max[0]); i++)
1717 for (int j = (int) ceil(min[1]); j <= (int) floor(max[1]); j++)
1718 weight_queue.push(weights->get_pixel(i, j));
1721 * Cleave space for the higher-resolution pass,
1722 * skipping the current space, since we will
1723 * process that afterward.
1726 space::iterate cleaved_space = si.cleave();
1728 cleaved_space.next();
1730 view_recurse(type, im, weights, cleaved_space, _pt);
1732 } else {
1733 si.next();
1738 * Iterate over pixels in the bounding box, finding
1739 * pixels that intersect the subspace. XXX: assume
1740 * for now that all pixels in the bounding box
1741 * intersect the subspace.
1744 for (int i = (int) ceil(min[0]); i <= (int) floor(max[0]); i++)
1745 for (int j = (int) ceil(min[1]); j <= (int) floor(max[1]); j++) {
1748 * Check for higher-resolution updates.
1751 if (weight_queue.size()) {
1752 if (weight_queue.front() != weights->get_pixel(i, j)) {
1753 weight_queue.pop();
1754 continue;
1756 weight_queue.pop();
1760 * Determine the probability of encounter.
1763 d2::pixel encounter = (d2::pixel(1, 1, 1) - weights->get_pixel(i, j)) * occupancy;
1766 * Update images.
1769 if (type == 0) {
1772 * Color view
1775 weights->pix(i, j) += encounter;
1776 im->pix(i, j) += encounter * color;
1778 } else if (type == 1) {
1781 * Weighted (transparent) depth display
1784 ale_pos depth_value = _pt.wp_scaled(st.get_min())[2];
1785 weights->pix(i, j) += encounter;
1786 im->pix(i, j) += encounter * depth_value;
1788 } else if (type == 2) {
1791 * Ambiguity (ambivalence) measure.
1794 weights->pix(i, j) = d2::pixel(1, 1, 1);
1795 im->pix(i, j) += 0.1 * d2::pixel(1, 1, 1);
1797 } else if (type == 3) {
1800 * Closeness measure.
1803 ale_pos depth_value = _pt.wp_scaled(st.get_min())[2];
1804 if (weights->pix(i, j)[0] == 0) {
1805 weights->pix(i, j) = d2::pixel(1, 1, 1);
1806 im->pix(i, j) = d2::pixel(1, 1, 1) * depth_value;
1807 } else if (im->pix(i, j)[2] < depth_value) {
1808 im->pix(i, j) = d2::pixel(1, 1, 1) * depth_value;
1809 } else {
1810 continue;
1813 } else if (type == 4) {
1816 * Weighted (transparent) contribution display
1819 ale_pos contribution_value = sn.get_pocc_density() + sn.get_socc_density();
1820 weights->pix(i, j) += encounter;
1821 im->pix(i, j) += encounter * contribution_value;
1823 } else if (type == 5) {
1826 * Weighted (transparent) occupancy display
1829 ale_pos contribution_value = occupancy;
1830 weights->pix(i, j) += encounter;
1831 im->pix(i, j) += encounter * contribution_value;
1833 } else if (type == 6) {
1836 * (Depth, xres, yres) triple
1839 ale_pos depth_value = _pt.wp_scaled(st.get_min())[2];
1840 weights->pix(i, j)[0] += encounter[0];
1841 if (weights->pix(i, j)[1] < encounter[0]) {
1842 weights->pix(i, j)[1] = encounter[0];
1843 im->pix(i, j)[0] = weights->pix(i, j)[1] * depth_value;
1844 im->pix(i, j)[1] = max[0] - min[0];
1845 im->pix(i, j)[2] = max[1] - min[1];
1848 } else if (type == 7) {
1851 * (xoff, yoff, 0) triple
1854 weights->pix(i, j)[0] += encounter[0];
1855 if (weights->pix(i, j)[1] < encounter[0]) {
1856 weights->pix(i, j)[1] = encounter[0];
1857 im->pix(i, j)[0] = i - min[0];
1858 im->pix(i, j)[1] = j - min[1];
1859 im->pix(i, j)[2] = 0;
1862 } else
1863 assert(0);
1869 * Generate an depth image from a specified view.
1871 static const d2::image *depth(pt _pt, int n = -1) {
1872 assert ((unsigned int) n < d2::image_rw::count() || n < 0);
1874 if (n >= 0) {
1875 assert((int) floor(d2::align::of(n).scaled_height())
1876 == (int) floor(_pt.scaled_height()));
1877 assert((int) floor(d2::align::of(n).scaled_width())
1878 == (int) floor(_pt.scaled_width()));
1881 d2::image *im1 = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1882 (int) floor(_pt.scaled_width()), 3);
1884 d2::image *im2 = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1885 (int) floor(_pt.scaled_width()), 3);
1887 d2::image *im3 = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1888 (int) floor(_pt.scaled_width()), 3);
1890 _pt.view_angle(_pt.view_angle() * VIEW_ANGLE_MULTIPLIER);
1893 * Use adaptive subspace data.
1896 d2::image *weights = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1897 (int) floor(_pt.scaled_width()), 3);
1900 * Iterate through subspaces.
1903 space::iterate si(_pt.origin());
1905 view_recurse(6, im1, weights, si, _pt);
1907 delete weights;
1908 weights = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1909 (int) floor(_pt.scaled_width()), 3);
1911 view_recurse(7, im2, weights, si, _pt);
1914 * Normalize depths by weights
1917 if (normalize_weights)
1918 for (unsigned int i = 0; i < im1->height(); i++)
1919 for (unsigned int j = 0; j < im1->width(); j++)
1920 im1->pix(i, j)[0] /= weights->pix(i, j)[1];
1923 for (unsigned int i = 0; i < im1->height(); i++)
1924 for (unsigned int j = 0; j < im1->width(); j++) {
1927 * Handle interpolation.
1930 d2::point x;
1931 d2::point blx;
1932 d2::point res(im1->pix(i, j)[1], im1->pix(i, j)[2]);
1934 for (int d = 0; d < 2; d++) {
1936 if (im2->pix(i, j)[d] < res[d] / 2)
1937 x[d] = (ale_pos) (d?j:i) - res[d] / 2 - im2->pix(i, j)[d];
1938 else
1939 x[d] = (ale_pos) (d?j:i) + res[d] / 2 - im2->pix(i, j)[d];
1941 blx[d] = 1 - ((d?j:i) - x[d]) / res[d];
1944 ale_real depth_val = 0;
1945 ale_real depth_weight = 0;
1947 for (int ii = 0; ii < 2; ii++)
1948 for (int jj = 0; jj < 2; jj++) {
1949 d2::point p = x + d2::point(ii, jj) * res;
1950 if (im1->in_bounds(p)) {
1952 ale_real d = im1->get_bl(p)[0];
1954 if (isnan(d))
1955 continue;
1957 ale_real w = ((ii ? (1 - blx[0]) : blx[0]) * (jj ? (1 - blx[1]) : blx[1]));
1958 depth_weight += w;
1959 depth_val += w * d;
1963 ale_real depth = depth_val / depth_weight;
1966 * Handle exclusions and encounter thresholds
1969 point w = _pt.pw_scaled(point(i, j, depth));
1971 if (weights->pix(i, j)[0] < encounter_threshold || excluded(w)) {
1972 im3->pix(i, j) = d2::pixel::zero() / d2::pixel::zero();
1973 } else {
1974 im3->pix(i, j) = d2::pixel(1, 1, 1) * depth;
1978 delete weights;
1979 delete im1;
1980 delete im2;
1982 return im3;
1985 static const d2::image *depth(unsigned int n) {
1987 assert (n < d2::image_rw::count());
1989 pt _pt = align::projective(n);
1991 return depth(_pt, n);
1995 * Generate an image from a specified view.
1997 static const d2::image *view(pt _pt, int n = -1) {
1998 assert ((unsigned int) n < d2::image_rw::count() || n < 0);
2000 if (n >= 0) {
2001 assert((int) floor(d2::align::of(n).scaled_height())
2002 == (int) floor(_pt.scaled_height()));
2003 assert((int) floor(d2::align::of(n).scaled_width())
2004 == (int) floor(_pt.scaled_width()));
2007 const d2::image *depths = NULL;
2009 if (d3px_count > 0)
2010 depths = depth(_pt, n);
2012 d2::image *im = new d2::image_ale_real((int) floor(_pt.scaled_height()),
2013 (int) floor(_pt.scaled_width()), 3);
2015 _pt.view_angle(_pt.view_angle() * VIEW_ANGLE_MULTIPLIER);
2018 * Use adaptive subspace data.
2021 d2::image *weights = new d2::image_ale_real((int) floor(_pt.scaled_height()),
2022 (int) floor(_pt.scaled_width()), 3);
2025 * Iterate through subspaces.
2028 space::iterate si(_pt.origin());
2030 view_recurse(0, im, weights, si, _pt);
2032 for (unsigned int i = 0; i < im->height(); i++)
2033 for (unsigned int j = 0; j < im->width(); j++) {
2034 if (weights->pix(i, j).min_norm() < encounter_threshold
2035 || (d3px_count > 0 && isnan(depths->pix(i, j)[0]))) {
2036 im->pix(i, j) = d2::pixel::zero() / d2::pixel::zero();
2037 weights->pix(i, j) = d2::pixel::zero();
2038 } else if (normalize_weights)
2039 im->pix(i, j) /= weights->pix(i, j);
2042 if (d3px_count > 0)
2043 delete depths;
2045 delete weights;
2047 return im;
2050 static void tcem(double _tcem) {
2051 tc_multiplier = _tcem;
2054 static void oui(unsigned int _oui) {
2055 ou_iterations = _oui;
2058 static void pa(unsigned int _pa) {
2059 pairwise_ambiguity = _pa;
2062 static void pc(const char *_pc) {
2063 pairwise_comparisons = _pc;
2066 static void d3px(int _d3px_count, double *_d3px_parameters) {
2067 d3px_count = _d3px_count;
2068 d3px_parameters = _d3px_parameters;
2071 static void fx(double _fx) {
2072 falloff_exponent = _fx;
2075 static void nw() {
2076 normalize_weights = 1;
2079 static void no_nw() {
2080 normalize_weights = 0;
2083 static int excluded(point p) {
2084 for (int n = 0; n < d3px_count; n++) {
2085 double *region = d3px_parameters + (6 * n);
2086 if (p[0] >= region[0]
2087 && p[0] <= region[1]
2088 && p[1] >= region[2]
2089 && p[1] <= region[3]
2090 && p[2] >= region[4]
2091 && p[2] <= region[5])
2092 return 1;
2095 return 0;
2098 static const d2::image *view(unsigned int n) {
2100 assert (n < d2::image_rw::count());
2102 pt _pt = align::projective(n);
2104 return view(_pt, n);
2108 * Add specified control points to the model
2110 static void add_control_points() {
2113 typedef struct {point iw; point ip, is;} analytic;
2114 typedef std::multimap<ale_real,analytic> score_map;
2115 typedef std::pair<ale_real,analytic> score_map_element;
2117 static void solve_point_pair(unsigned int f1, unsigned int f2, int i, int j, ale_pos ii, ale_pos jj,
2118 ale_real *normalized_score, analytic *_a,
2119 const pt &_pt1, const pt &_pt2, const d2::image *if1, const d2::image *if2) {
2121 *normalized_score = 0;
2122 *normalized_score /= *normalized_score;
2123 assert(isnan(*normalized_score));
2126 * Create an orthonormal basis to
2127 * determine line intersection.
2130 point bp0 = _pt1.pw_scaled(point(i, j, 0));
2131 point bp1 = _pt1.pw_scaled(point(i, j, 10));
2132 point bp2 = _pt2.pw_scaled(point(ii, jj, 0));
2134 point b0 = (bp1 - bp0).normalize();
2135 point b1n = bp2 - bp0;
2136 point b1 = (b1n - b1n.dproduct(b0) * b0).normalize();
2137 point b2 = point(0, 0, 0).xproduct(b0, b1).normalize(); // Should already have norm=1
2140 * Select a fourth point to define a second line.
2143 point p3 = _pt2.pw_scaled(point(ii, jj, 10));
2146 * Representation in the new basis.
2149 d2::point nbp0 = d2::point(bp0.dproduct(b0), bp0.dproduct(b1));
2150 // d2::point nbp1 = d2::point(bp1.dproduct(b0), bp1.dproduct(b1));
2151 d2::point nbp2 = d2::point(bp2.dproduct(b0), bp2.dproduct(b1));
2152 d2::point np3 = d2::point( p3.dproduct(b0), p3.dproduct(b1));
2155 * Determine intersection of line
2156 * (nbp0, nbp1), which is parallel to
2157 * b0, with line (nbp2, np3).
2161 * XXX: a stronger check would be
2162 * better here, e.g., involving the
2163 * ratio (np3[0] - nbp2[0]) / (np3[1] -
2164 * nbp2[1]). Also, acceptance of these
2165 * cases is probably better than
2166 * rejection.
2169 if (np3[1] - nbp2[1] == 0)
2170 return;
2172 d2::point intersection = d2::point(nbp2[0]
2173 + (nbp0[1] - nbp2[1]) * (np3[0] - nbp2[0]) / (np3[1] - nbp2[1]),
2174 nbp0[1]);
2176 ale_pos b2_offset = b2.dproduct(bp0);
2179 * Map the intersection back to the world
2180 * basis.
2183 point iw = intersection[0] * b0 + intersection[1] * b1 + b2_offset * b2;
2186 * Reject intersection points behind a
2187 * camera.
2190 point icp = _pt1.wc(iw);
2191 point ics = _pt2.wc(iw);
2194 if (icp[2] >= 0 || ics[2] >= 0)
2195 return;
2198 * Reject clipping plane violations.
2201 if (iw[2] > front_clip
2202 || iw[2] < rear_clip)
2203 return;
2206 * Score the point.
2209 point ip = _pt1.wp_scaled(iw);
2211 point is = _pt2.wp_scaled(iw);
2213 _a->iw = iw;
2214 _a->ip = ip;
2215 _a->is = is;
2218 * Calculate score from color match. Assume for now
2219 * that the transformation can be approximated locally
2220 * with a translation.
2223 ale_pos score = 0;
2224 ale_pos divisor = 0;
2225 ale_pos l1_multiplier = 0.125;
2227 if (if1->in_bounds(ip.xy())
2228 && if2->in_bounds(is.xy())) {
2229 divisor += 1 - l1_multiplier;
2230 score += (1 - l1_multiplier)
2231 * (if1->get_bl(ip.xy()) - if2->get_bl(is.xy())).normsq();
2234 for (int iii = -1; iii <= 1; iii++)
2235 for (int jjj = -1; jjj <= 1; jjj++) {
2236 d2::point t(iii, jjj);
2238 if (!if1->in_bounds(ip.xy() + t)
2239 || !if2->in_bounds(is.xy() + t))
2240 continue;
2242 divisor += l1_multiplier;
2243 score += l1_multiplier
2244 * (if1->get_bl(ip.xy() + t) - if2->get_bl(is.xy() + t)).normsq();
2249 * Include third-camera contributions in the score.
2252 if (tc_multiplier != 0)
2253 for (unsigned int f = 0; f < d2::image_rw::count(); f++) {
2254 if (f == f1 || f == f2)
2255 continue;
2257 pt _ptf = align::projective(f);
2258 _ptf.scale(1 / _ptf.scale_2d() / pow(2, ceil(input_decimation_exponent)));
2260 point p = _ptf.wp_scaled(iw);
2262 if (!cl->reference[f]->in_bounds(p.xy())
2263 || !if2->in_bounds(ip.xy()))
2264 continue;
2266 divisor += tc_multiplier;
2267 score += tc_multiplier
2268 * (if1->get_bl(ip.xy()) - cl->reference[f]->get_bl(p.xy())).normsq();
2271 *normalized_score = score / divisor;
2275 * Generate a map from scores to 3D points for various depths at point (i, j) in f1.
2277 static score_map p2f_score_map(unsigned int i, unsigned int j, lod_image *if1, lod_image *if2,
2278 std::vector<pt> pt_outputs) {
2280 score_map result;
2283 * Map two depths to the secondary frame.
2286 point p1 = _pt2.wp_scaled(_pt1.pw_scaled(point(i, j, 1000)));
2287 point p2 = _pt2.wp_scaled(_pt1.pw_scaled(point(i, j, -1000)));
2290 * For cases where the mapped points define a
2291 * line and where points on the line fall
2292 * within the defined area of the frame,
2293 * determine the starting point for inspection.
2294 * In other cases, continue to the next pixel.
2297 ale_pos diff_i = p2[0] - p1[0];
2298 ale_pos diff_j = p2[1] - p1[1];
2299 ale_pos slope = diff_j / diff_i;
2301 if (isnan(slope)) {
2302 fprintf(stderr, "%d->%d (%d, %d) has undefined slope\n",
2303 f1, f2, i, j);
2304 return result;
2308 * Make absurdly large/small slopes either infinity, negative infinity, or zero.
2311 if (fabs(slope) > if2->width() * 100) {
2312 double zero = 0;
2313 double one = 1;
2314 double inf = one / zero;
2315 slope = inf;
2316 } else if (slope < 1 / (double) if2->height() / 100
2317 && slope > -1/ (double) if2->height() / 100) {
2318 slope = 0;
2321 ale_pos top_intersect = p1[1] - p1[0] * slope;
2322 ale_pos lef_intersect = p1[0] - p1[1] / slope;
2323 ale_pos rig_intersect = p1[0] - (p1[1] - if2->width() + 2) / slope;
2324 ale_pos sp_i, sp_j;
2326 if (slope == 0) {
2327 sp_i = lef_intersect;
2328 sp_j = 0;
2329 } else if (finite(slope) && top_intersect >= 0 && top_intersect < if2->width() - 1) {
2330 sp_i = 0;
2331 sp_j = top_intersect;
2332 } else if (slope > 0 && lef_intersect >= 0 && lef_intersect <= if2->height() - 1) {
2333 sp_i = lef_intersect;
2334 sp_j = 0;
2335 } else if (slope < 0 && rig_intersect >= 0 && rig_intersect <= if2->height() - 1) {
2336 sp_i = rig_intersect;
2337 sp_j = if2->width() - 2;
2338 } else {
2339 return result;
2343 * Determine increment values for examining
2344 * point, ensuring that incr_i is always
2345 * positive.
2348 ale_pos incr_i, incr_j;
2350 if (fabs(diff_i) > fabs(diff_j)) {
2351 incr_i = 1;
2352 incr_j = slope;
2353 } else if (slope > 0) {
2354 incr_i = 1 / slope;
2355 incr_j = 1;
2356 } else {
2357 incr_i = -1 / slope;
2358 incr_j = -1;
2362 * Examine regions near the projected line.
2365 for (ale_pos ii = sp_i, jj = sp_j;
2366 ii <= if2->height() - 1 && jj <= if2->width() - 1 && ii >= 0 && jj >= 0;
2367 ii += incr_i, jj += incr_j) {
2369 ale_real normalized_score;
2370 analytic _a;
2372 solve_point_pair(f1, f2, i, j, ii, jj, &normalized_score, &_a, _pt1, _pt2, if1, if2);
2375 * Reject points with undefined score.
2378 if (!finite(normalized_score))
2379 continue;
2382 * Add the point to the score map.
2385 result.insert(score_map_element(normalized_score, _a));
2389 * Iterate through regions and add new locations with sub-pixel resolution
2391 int accumulated_passes = 0;
2392 int max_acc_passes = pairwise_ambiguity;
2393 for (score_map::iterator smi = result.begin(); smi != result.end(); smi++) {
2394 point is = smi->second.is;
2396 if (accumulated_passes++ >= max_acc_passes)
2397 break;
2399 for (ale_pos epsilon = -0.5; epsilon <= 0.5; epsilon += 0.125) {
2401 if (fabs(epsilon) < 0.001)
2402 continue;
2404 ale_real normalized_score;
2405 analytic _a;
2407 solve_point_pair(f1, f2, i, j, is[0] + epsilon * incr_i, is[1] + epsilon * incr_j,
2408 &normalized_score, &_a, _pt1, _pt2, if1, if2);
2410 if (!finite(normalized_score))
2411 continue;
2413 result.insert(score_map_element(normalized_score, _a));
2417 return result;
2421 * Make pt list.
2423 static std::vector<pt> make_pt_list(const char *d_out[], const char *v_out[],
2424 std::map<const char *, pt> *d3_depth_pt,
2425 std::map<const char *, pt> *d3_output_pt) {
2427 std::vector<pt> result;
2429 for (unsigned int n = 0; n < d2::image_rw::count(); n++) {
2430 if (d_out[n] || v_out[n]) {
2431 result.push_back(align::projective(n));
2435 for (std::map<const char *, pt>::iterator i = d3_depth_pt->begin(); i != d3_depth_pt->end(); i++) {
2436 result.push_back(i->second);
2439 for (std::map<const char *, pt>::iterator i = d3_output_pt->begin(); i != d3_output_pt->end(); i++) {
2440 result.push_back(i->second);
2443 return result;
2447 * Check whether a cell is visible from a given viewpoint. This function
2448 * is guaranteed to return 1 when a cell is visible, but it is not guaranteed
2449 * to return 0 when a cell is invisible.
2451 static int pt_might_be_visible(const pt &viewpoint, point min, point max) {
2453 point cell[2] = {min, max};
2456 * Cycle through all vertices of the cell to check certain
2457 * properties.
2459 int pos[3];
2460 int neg[3];
2461 for (int i = 0; i < 2; i++)
2462 for (int j = 0; j < 2; j++)
2463 for (int k = 0; k < 2; k++) {
2464 point p = viewpoint.wp_unscaled(point(cell[i][0], cell[j][1], cell[k][2]));
2465 if (viewpoint.unscaled_in_bounds(p))
2466 return 1;
2468 for (int d = 0; d < 3; d++)
2469 if (p[d] >= 0)
2470 pos[d] = 1;
2472 if (p[0] <= viewpoint.unscaled_height() - 1)
2473 neg[0] = 1;
2475 if (p[1] <= viewpoint.unscaled_width() - 1)
2476 neg[1] = 1;
2478 if (p[2] <= 0)
2479 neg[2] = 1;
2482 if (!neg[2])
2483 return 0;
2485 if (pos[2] && neg[2])
2486 return 1;
2488 if (!pos[0]
2489 || !neg[0]
2490 || !pos[1]
2491 || !neg[1])
2492 return 0;
2494 return 1;
2498 * Check whether a cell is output-visible.
2500 static void output_might_be_visible(const std::vector<pt> &pt_outputs, point min, point max) {
2501 for (int n = 0; n < pt_outputs.size(); n++)
2502 if (pt_visible(pt_outputs[n], min, max))
2503 return 1;
2504 return 0;
2508 * Check whether a cell is input-visible.
2510 static void input_might_be_visible(unsigned int f, point min, point max) {
2511 return pt_visible(align::projective(f), min, max);
2515 * Check lower-bound resolution constraints
2517 int exceeds_resolution_lower_bounds(unsigned int f1, unsigned int f2,
2518 point min, point max, const std::vector<pt> &pt_outputs) {
2519 assert(0);
2523 * Try the candidate nearest to the specified cell.
2525 static void try_nearest_candidate(unsigned int f1, unsigned int f2, candidates *c, point min, point max) {
2526 assert(0);
2529 int completely_clipped(point min, point max) {
2530 return (min[2] > front_clip
2531 || max[2] < rear_clip)
2535 * Find candidates for subspace creation.
2537 static void find_candidates(unsigned int f1, unsigned int f2, candidates *c, point min, point max,
2538 const std::vector<pt> &pt_outputs) {
2540 if (completely_clipped(min, max))
2541 return;
2543 if (!input_might_be_visible(f1, min, max)
2544 || !input_might_be_visible(f2, min, max))
2545 return;
2547 if (output_clip && !output_might_be_visible(pt_outputs, min, max))
2548 return;
2550 if (exceeds_resolution_lower_bounds(f1, f2, min, max, pt_outputs)) {
2551 try_nearest_candidate(f1, f2, c, min, max);
2552 return;
2555 point new_cells[2][2];
2557 space::traverse::get_next_cells(min, max, cells);
2559 find_candidates(f1, f2, c, new_cells[0][0], new_cells[0][1]);
2560 find_candidates(f1, f2, c, new_cells[1][0], new_cells[1][1]);
2564 * Initialize space and identify regions of interest for the adaptive
2565 * subspace model.
2567 static void make_space(const char *d_out[], const char *v_out[],
2568 std::map<const char *, pt> *d3_depth_pt,
2569 std::map<const char *, pt> *d3_output_pt) {
2571 fprintf(stderr, "Subdividing 3D space");
2573 std::vector<pt> pt_outputs = make_pt_list(d_out, v_out, d3_depth_pt, d3_output_pt);
2576 * Initialize root space.
2579 space::init_root();
2582 * Subdivide space to resolve intensity matches between pairs
2583 * of frames.
2586 for (unsigned int f1 = 0; f1 < d2::image_rw::count(); f1++) {
2588 if (tc_multiplier == 0)
2589 al->open(f1);
2591 for (unsigned int f2 = 0; f2 < d2::image_rw::count(); f2++) {
2593 if (f1 == f2)
2594 continue;
2596 if (tc_multiplier == 0)
2597 al->open(f2);
2599 candidates *c = new candidates(f1);
2601 find_candidates(f1, f2, c, point::neginf(), point::posinf(), pt_outputs);
2603 c->generate_subspaces();
2605 delete if1;
2608 fprintf(stderr, ".\n");
2613 * Update spatial information structures.
2615 * XXX: the name of this function is horribly misleading. There isn't
2616 * even a 'search depth' any longer, since there is no longer any
2617 * bounded DFS occurring.
2619 static void reduce_cost_to_search_depth(d2::exposure *exp_out, int inc_bit) {
2622 * Subspace model
2625 for (unsigned int i = 0; i < ou_iterations; i++)
2626 spatial_info_update();
2629 #if 0
2631 * Describe a scene to a renderer
2633 static void describe(render *r) {
2635 #endif
2638 #endif