Add code for generating new levels of detail to d3::scene::ref_weights.
[Ale.git] / d3 / scene.h
blob74775bc63a9747e3805cf0cea07dedddd8f218cc
1 // Copyright 2003, 2004, 2005 David Hilvert <dhilvert@auricle.dyndns.org>,
2 // <dhilvert@ugcs.caltech.edu>
4 /* This file is part of the Anti-Lamenessing Engine.
6 The Anti-Lamenessing Engine is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 The Anti-Lamenessing Engine is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with the Anti-Lamenessing Engine; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * d3/scene.h: Representation of a 3D scene.
25 #ifndef __scene_h__
26 #define __scene_h__
28 #include "point.h"
31 * View angle multiplier.
33 * Setting this to a value larger than one can be useful for debugging.
36 #define VIEW_ANGLE_MULTIPLIER 1
38 class scene {
41 * Clipping planes
43 static ale_pos front_clip;
44 static ale_pos rear_clip;
47 * Decimation exponents for geometry
49 static ale_pos primary_decimation_upper;
50 static ale_pos input_decimation_lower;
51 static ale_pos output_decimation_preferred;
54 * Input resolution divisor
56 static ale_pos input_resolution_divisor;
59 * Output clipping
61 static int output_clip;
64 * Model files
66 static const char *load_model_name;
67 static const char *save_model_name;
70 * Occupancy attenuation
73 static double occ_att;
76 * Normalization of output by weight
79 static int normalize_weights;
82 * Falloff exponent
85 static double falloff_exponent;
88 * Third-camera error multiplier
90 static double tc_multiplier;
93 * Occupancy update iterations
95 static unsigned int ou_iterations;
98 * Pairwise ambiguity
100 static unsigned int pairwise_ambiguity;
103 * Pairwise comparisons
105 static const char *pairwise_comparisons;
108 * 3D Post-exclusion
110 static int d3px_count;
111 static double *d3px_parameters;
114 * Nearness threshold
116 static const ale_real nearness;
119 * Encounter threshold for defined pixels.
121 static double encounter_threshold;
124 * Structure to hold input frame information at levels of
125 * detail between full detail and full decimation.
127 class lod_image {
128 unsigned int f;
129 unsigned int entries;
130 std::vector<const d2::image *> im;
131 std::vector<pt> transformation;
133 public:
135 * Constructor
137 lod_image(unsigned int _f) {
139 pt _pt;
141 f = _f;
142 im.push_back(d2::image_rw::copy(f, "3D reference image"));
143 assert(im.back());
144 entries = 1;
145 _pt = d3::align::projective(f);
146 _pt.scale(1 / _pt.scale_2d());
147 transformation.push_back(_pt);
149 while (im.back()->height() > 4
150 && im.back()->width() > 4) {
152 im.push_back(im.back()->scale_by_half("3D, reduced LOD"));
153 assert(im.back());
155 _pt.scale(1 / _pt.scale_2d() / pow(2, entries));
156 transformation.push_back(_pt);
158 entries += 1;
163 * Get the number of scales
165 unsigned int count() {
166 return entries;
170 * Get an image.
172 const d2::image *get_image(unsigned int i) {
173 assert(i >= 0);
174 assert(i < entries);
175 return im[i];
179 int in_bounds(d2::point p) {
180 return im[0]->in_bounds(p);
184 * Get a 'trilinear' color. We currently don't do interpolation
185 * between levels of detail; hence, it's discontinuous in tl_coord.
187 d2::pixel get_tl(d2::point p, ale_pos tl_coord) {
189 assert(in_bounds(p));
191 tl_coord = round(tl_coord);
193 if (tl_coord >= entries)
194 tl_coord = entries;
195 if (tl_coord < 0)
196 tl_coord = 0;
198 p = p / (ale_pos) pow(2, tl_coord);
200 unsigned int itlc = (unsigned int) tl_coord;
202 if (p[0] > im[itlc]->height() - 1)
203 p[0] = im[itlc]->height() - 1;
204 if (p[1] > im[itlc]->width() - 1)
205 p[1] = im[itlc]->width() - 1;
207 assert(p[0] >= 0);
208 assert(p[1] >= 0);
210 return im[itlc]->get_bl(p);
214 * Get the transformation
216 pt get_t(unsigned int i) {
217 assert(i >= 0);
218 assert(i < entries);
219 return transformation[i];
223 * Get the camera origin in world coordinates
225 point origin() {
226 return transformation[0].cw(point(0, 0, 0));
230 * Destructor
232 ~lod_image() {
233 for (unsigned int i = 0; i < entries; i++) {
234 delete im[i];
240 * Structure to hold weight information for reference images.
242 class ref_weights {
243 unsigned int f;
244 std::vector<d2::image *> weights;
245 pt transformation;
246 int tc_low;
247 int tc_high;
248 int initialized;
250 void set_image(d2::image *im, ale_real value) {
251 for (unsigned int i = 0; i < im->height(); i++)
252 for (unsigned int j = 0; i < im->width(); j++)
253 im->pix(i, j) = d2::pixel(value, value, value);
256 d2::image *make_image(ale_pos sf, ale_real init_value = 0) {
257 d2::image *result = new d2::image_ale_real(
258 (unsigned int) ceil(transformation.unscaled_height() * sf),
259 (unsigned int) ceil(transformation.unscaled_width() * sf), 3);
261 if (init_value != 0)
262 set_image(result, init_value);
264 return result;
267 public:
269 * Constructor
271 ref_weights(unsigned int _f) {
272 f = _f;
273 transformation = d3::align::projective(f);
274 initialized = 0;
278 * Check spatial bounds.
280 int in_spatial_bounds(d2::point p) {
281 if (p[0] < 0)
282 return 0;
283 if (p[1] < 0)
284 return 0;
285 if (p[0] > transformation.unscaled_height() - 1)
286 return 0;
287 if (p[1] > transformation.unscaled_width() - 1)
288 return 0;
289 if (p[2] >= 0)
290 return 0;
292 return 1;
295 int in_spatial_bounds(const space::traverse &t) {
296 d2::point p = transformation.wp_unscaled(t.get_centroid()).xy();
297 return in_spatial_bounds(p);
301 * Add weight.
303 void add_weight(const space::traverse &t, ale_real weight) {
304 ale_pos tc = transformation.trilinear_coordinate(t);
305 d2::point p = transformation.wp_unscaled(t.get_centroid()).xy();
306 assert(in_spatial_bounds(p));
308 tc = round(tc);
311 * Establish a reasonable (?) upper bound on resolution.
314 if (tc < input_decimation_lower)
315 tc = input_decimation_lower;
318 * Initialize, if necessary.
321 if (!initialized) {
322 tc_low = tc_high = tc;
324 ale_real sf = pow(2, -tc);
326 weights[0] = make_image(sf);
329 assert (tc_low <= tc_high);
332 * Generate additional levels of detail, if necessary.
335 while (tc < tc_low) {
336 tc_low--;
338 ale_real sf = pow(2, -tc_low);
340 weights.insert(weights.begin(), make_image(sf));
343 while (tc > tc_high) {
344 tc_high++;
346 ale_real sf = pow(2, -tc_high);
348 weights.push_back(make_image(sf, -1));
353 * Get weight
355 ale_real get_weight(const space::traverse &t) {
356 ale_pos tc = transformation.trilinear_coordinate(t);
357 d2::point p = transformation.wp_unscaled(t.get_centroid()).xy();
358 assert(in_spatial_bounds(p));
360 if (!initialized)
361 return 0;
365 * Destructor
367 ~ref_weights() {
368 for (unsigned int i = 0; i < weights.size(); i++) {
369 delete weights[i];
375 * Resolution check.
377 int resolution_ok(transformation t, const space::traverse &t) {
378 ale_pos tc = transformation.trilinear_coordinate(t);
379 assert(0);
383 * Structure to hold input frame information at all levels of detail.
385 class lod_images {
388 * All images.
391 std::vector<lod_image *> images;
393 public:
395 lod_images() {
396 images.resize(d2::image_rw::count(), NULL);
399 void open(unsigned int f) {
400 assert (images[f] = NULL);
402 if (images[f] == NULL)
403 images[f] = new lod_image(f);
406 void open_all() {
407 for (unsigned int f = 0; f < d2::image_rw::count(); f++)
408 open(f);
411 lod_image *get(unsigned int f) {
412 assert (images[f] != NULL);
413 return images[f];
416 void close(unsigned int f) {
417 assert (images[f] != NULL);
418 delete images[f];
419 images[f] = NULL;
422 void close_all() {
423 for (unsigned int f = 0; f < d2::image_rw::count(); f++)
424 close(f);
427 ~lod_images() {
428 close_all();
433 * All levels-of-detail
436 static struct lod_images *al;
439 * List for calculating weighted median.
441 class wml {
442 ale_real *data;
443 unsigned int size;
444 unsigned int used;
446 ale_real &_w(unsigned int i) {
447 assert(i <= used);
448 return data[i * 2];
451 ale_real &_d(unsigned int i) {
452 assert(i <= used);
453 return data[i * 2 + 1];
456 void increase_capacity() {
458 if (size > 0)
459 size *= 2;
460 else
461 size = 1;
463 data = (ale_real *) realloc(data, sizeof(ale_real) * 2 * (size * 1));
465 assert(data);
466 assert (size > used);
468 if (!data) {
469 fprintf(stderr, "Unable to allocate %d bytes of memory\n",
470 sizeof(ale_real) * 2 * (size * 1));
471 exit(1);
475 void insert_weight(unsigned int i, ale_real v, ale_real w) {
476 assert(used < size);
477 assert(used >= i);
478 for (unsigned int j = used; j > i; j--) {
479 _w(j) = _w(j - 1);
480 _d(j) = _d(j - 1);
483 _w(i) = w;
484 _d(i) = v;
486 used++;
489 public:
491 unsigned int get_size() {
492 return size;
495 unsigned int get_used() {
496 return used;
499 void print_info() {
500 fprintf(stderr, "[st %p sz %u el", this, size);
501 for (unsigned int i = 0; i < used; i++)
502 fprintf(stderr, " (%f, %f)", _d(i), _w(i));
503 fprintf(stderr, "]\n");
506 void clear() {
507 used = 0;
510 void insert_weight(ale_real v, ale_real w) {
511 for (unsigned int i = 0; i < used; i++) {
512 if (_d(i) == v) {
513 _w(i) += w;
514 return;
516 if (_d(i) > v) {
517 if (used == size)
518 increase_capacity();
519 insert_weight(i, v, w);
520 return;
523 if (used == size)
524 increase_capacity();
525 insert_weight(used, v, w);
529 * Finds the median at half-weight, or between half-weight
530 * and zero-weight, depending on the attenuation value.
533 ale_real find_median(double attenuation = 0) {
535 assert(attenuation >= 0);
536 assert(attenuation <= 1);
538 ale_real zero1 = 0;
539 ale_real zero2 = 0;
540 ale_real undefined = zero1 / zero2;
542 ale_accum weight_sum = 0;
544 for (unsigned int i = 0; i < used; i++)
545 weight_sum += _w(i);
547 // if (weight_sum == 0)
548 // return undefined;
550 if (used == 0 || used == 1)
551 return undefined;
553 if (weight_sum == 0) {
554 ale_accum data_sum = 0;
555 for (unsigned int i = 0; i < used; i++)
556 data_sum += _d(i);
557 return data_sum / used;
561 ale_accum midpoint = weight_sum * (0.5 - 0.5 * attenuation);
563 ale_accum weight_sum_2 = 0;
565 for (unsigned int i = 0; i < used && weight_sum_2 < midpoint; i++) {
566 weight_sum_2 += _w(i);
568 if (weight_sum_2 > midpoint) {
569 return _d(i);
570 } else if (weight_sum_2 == midpoint) {
571 assert (i + 1 < used);
572 return (_d(i) + _d(i + 1)) / 2;
576 return undefined;
579 wml(int initial_size = 0) {
581 // if (initial_size == 0) {
582 // initial_size = (int) (d2::image_rw::count() * 1.5);
583 // }
585 size = initial_size;
586 used = 0;
588 if (size > 0) {
589 data = (ale_real *) malloc(size * sizeof(ale_real) * 2);
590 assert(data);
591 } else {
592 data = NULL;
597 * copy constructor. This is required to avoid undesired frees.
600 wml(const wml &w) {
601 size = w.size;
602 used = w.used;
603 data = (ale_real *) malloc(size * sizeof(ale_real) * 2);
604 assert(data);
606 memcpy(data, w.data, size * sizeof(ale_real) * 2);
609 ~wml() {
610 free(data);
615 * Class for information regarding spatial regions of interest.
617 * This class is configured for convenience in cases where sampling is
618 * performed using an approximation of the fine:box:1,triangle:2 chain.
619 * In this case, the *_1 variables would store the fine data and the
620 * *_2 variables would store the coarse data. Other uses are also
621 * possible.
624 class spatial_info {
626 * Map channel value --> weight.
628 wml color_weights_1[3];
629 wml color_weights_2[3];
632 * Current color.
634 d2::pixel color;
637 * Map occupancy value --> weight.
639 wml occupancy_weights_1;
640 wml occupancy_weights_2;
643 * Current occupancy value.
645 ale_real occupancy;
648 * pocc/socc density
651 unsigned int pocc_density;
652 unsigned int socc_density;
655 * Insert a weight into a list.
657 void insert_weight(wml *m, ale_real v, ale_real w) {
658 m->insert_weight(v, w);
662 * Find the median of a weighted list. Uses NaN for undefined.
664 ale_real find_median(wml *m, double attenuation = 0) {
665 return m->find_median(attenuation);
668 public:
670 * Constructor.
672 spatial_info() {
673 color = d2::pixel::zero();
674 occupancy = 0;
675 pocc_density = 0;
676 socc_density = 0;
680 * Accumulate color; primary data set.
682 void accumulate_color_1(int f, int i, int j, d2::pixel color, d2::pixel weight) {
683 for (int k = 0; k < 3; k++)
684 insert_weight(&color_weights_1[k], color[k], weight[k]);
688 * Accumulate color; secondary data set.
690 void accumulate_color_2(d2::pixel color, d2::pixel weight) {
691 for (int k = 0; k < 3; k++)
692 insert_weight(&color_weights_2[k], color[k], weight[k]);
696 * Accumulate occupancy; primary data set.
698 void accumulate_occupancy_1(int f, int i, int j, ale_real occupancy, ale_real weight) {
699 insert_weight(&occupancy_weights_1, occupancy, weight);
703 * Accumulate occupancy; secondary data set.
705 void accumulate_occupancy_2(ale_real occupancy, ale_real weight) {
706 insert_weight(&occupancy_weights_2, occupancy, weight);
708 if (occupancy == 0 || occupancy_weights_2.get_size() < 96)
709 return;
711 // fprintf(stderr, "%p updated socc with: %f %f\n", this, occupancy, weight);
712 // occupancy_weights_2.print_info();
716 * Update color (and clear accumulation structures).
718 void update_color() {
719 for (int d = 0; d < 3; d++) {
720 ale_real c = find_median(&color_weights_1[d]);
721 if (isnan(c))
722 c = find_median(&color_weights_2[d]);
723 if (isnan(c))
724 c = 0;
726 color[d] = c;
728 color_weights_1[d].clear();
729 color_weights_2[d].clear();
734 * Update occupancy (and clear accumulation structures).
736 void update_occupancy() {
737 ale_real o = find_median(&occupancy_weights_1, occ_att);
738 if (isnan(o))
739 o = find_median(&occupancy_weights_2, occ_att);
740 if (isnan(o))
741 o = 0;
743 occupancy = o;
745 pocc_density = occupancy_weights_1.get_used();
746 socc_density = occupancy_weights_2.get_used();
748 occupancy_weights_1.clear();
749 occupancy_weights_2.clear();
754 * Get current color.
756 d2::pixel get_color() {
757 return color;
761 * Get current occupancy.
763 ale_real get_occupancy() {
764 return occupancy;
768 * Get primary color density.
771 unsigned int get_pocc_density() {
772 return pocc_density;
775 unsigned int get_socc_density() {
776 return socc_density;
781 * Map spatial regions of interest to spatial info structures. XXX:
782 * This may get very poor cache behavior in comparison with, say, an
783 * array. Unfortunately, there is no immediately obvious array
784 * representation. If some kind of array representation were adopted,
785 * it would probably cluster regions of similar depth from the
786 * perspective of the typical camera. In particular, for a
787 * stereoscopic view, depth ordering for two random points tends to be
788 * similar between cameras, I think. Unfortunately, it is never
789 * identical for all points (unless cameras are co-located). One
790 * possible approach would be to order based on, say, camera 0's idea
791 * of depth.
794 typedef std::map<struct space::node *, spatial_info> spatial_info_map_t;
796 static spatial_info_map_t spatial_info_map;
798 public:
801 * Debugging variables.
804 static unsigned long total_ambiguity;
805 static unsigned long total_pixels;
806 static unsigned long total_divisions;
807 static unsigned long total_tsteps;
810 * Member functions
813 static void et(double et_parameter) {
814 encounter_threshold = et_parameter;
817 static void load_model(const char *name) {
818 load_model_name = name;
821 static void save_model(const char *name) {
822 save_model_name = name;
825 static void fc(ale_pos fc) {
826 front_clip = fc;
829 static void di_upper(ale_pos _dgi) {
830 primary_decimation_upper = _dgi;
833 static void do_try(ale_pos _dgo) {
834 output_decimation_preferred = _dgo;
837 static void di_lower(ale_pos _idiv) {
838 input_decimation_lower = _idiv;
841 static void oc() {
842 output_clip = 1;
845 static void no_oc() {
846 output_clip = 0;
849 static void rc(ale_pos rc) {
850 rear_clip = rc;
854 * Initialize 3D scene from 2D scene, using 2D and 3D alignment
855 * information.
857 static void init_from_d2() {
860 * Rear clip value of 0 is converted to infinity.
863 if (rear_clip == 0) {
864 ale_pos one = +1;
865 ale_pos zero = +0;
867 rear_clip = one / zero;
868 assert(isinf(rear_clip) == +1);
872 * Scale and translate clipping plane depths.
875 ale_pos cp_scalar = d3::align::projective(0).wc(point(0, 0, 0))[2];
877 front_clip = front_clip * cp_scalar - cp_scalar;
878 rear_clip = rear_clip * cp_scalar - cp_scalar;
882 * Allocate image structures.
885 al = new lod_images;
887 if (tc_multiplier != 0) {
888 al->open_all();
893 * Perform spatial_info updating on a given subspace, for given
894 * parameters.
896 static void subspace_info_update(space::iterate si, int f, d2::image *weights, const d2::image *im, pt _pt) {
897 while(!si.done()) {
899 space::traverse st = si.get();
902 * XXX: This could be more efficient, perhaps.
905 if (spatial_info_map.count(st.get_node()) == 0) {
906 si.next();
907 continue;
910 spatial_info *sn = &spatial_info_map[st.get_node()];
913 * Get information on the subspace.
916 d2::pixel color = sn->get_color();
917 ale_real occupancy = sn->get_occupancy();
920 * Determine the view-local bounding box for the
921 * subspace.
924 point bb[2];
926 _pt.get_view_local_bb(st, bb);
928 point min = bb[0];
929 point max = bb[1];
931 // fprintf(stderr, "frame %d color update space pointer %p, bb (%f, %f) -> (%f, %f)\n",
932 // f, st.get_node(), min[0], min[1], max[0], max[1]);
934 // fprintf(stderr, "space %p c=[%f %f %f]\n", st.get_node(), color[0], color[1], color[2]);
935 // fprintf(stderr, "space %p occ=[%g]\n", st.get_node(), occupancy);
938 * Use the center of the bounding box to grab interpolation data.
941 d2::point interp((min[0] + max[0]) / 2, (min[1] + max[1]) / 2);
943 // fprintf(stderr, "interp=(%f, %f)\n", interp[0], interp[1]);
945 #if 0
947 * For interpolation points, ensure that the
948 * bounding box area is at least 0.25. XXX: Why?
949 * Remove this constraint.
951 * XXX: Is interpolation useful for anything, given
952 * that we're using spatial info registers at multiple
953 * resolutions?
956 if (/* (max[0] - min[0]) * (max[1] - min[1]) > 0.25
957 && */ max[0] > min[0]
958 && max[1] > min[1]) {
959 d2::pixel encounter = (d2::pixel(1, 1, 1) - weights->get_bl(interp));
960 d2::pixel pcolor = im->get_bl(interp);
961 d2::pixel colordiff = (color - pcolor) * (ale_real) 256;
963 if (falloff_exponent != 0) {
964 d2::pixel max_diff = im->get_max_diff(interp) * (ale_real) 256;
966 for (int k = 0; k < 3; k++)
967 if (max_diff[k] > 1)
968 colordiff[k] /= pow(max_diff[k], falloff_exponent);
971 // fprintf(stderr, "color_interp=(%f, %f, %f)\n", pcolor[0], pcolor[1], pcolor[2]);
973 // sn->accumulate_color_2(pcolor, encounter);
974 d2::pixel channel_occ = pexp(-colordiff * colordiff);
975 // fprintf(stderr, "color_diff=(%f, %f, %f)\n", colordiff[0], colordiff[1], colordiff[2]);
976 // fprintf(stderr, "channel_occ=(%g, %g, %g)\n", channel_occ[0], channel_occ[1], channel_occ[2]);
979 * XXX: the best approach is probably to use 3 separate occupancy
980 * data sets, just as there are 3 separate color data sets.
983 ale_accum occ = channel_occ[0];
985 for (int k = 1; k < 3; k++)
986 if (channel_occ[k] < occ)
987 occ = channel_occ[k];
989 sn->accumulate_occupancy_2(occ, encounter[0]);
990 #if 0
991 for (int k = 0; k < 3; k++)
992 sn->accumulate_occupancy_2(channel_occ[k], encounter[k]);
993 #endif
995 #endif
999 * Data structure to check modification of weights by
1000 * higher-resolution subspaces.
1003 std::queue<d2::pixel> weight_queue;
1006 * Check for higher resolution subspaces, and
1007 * update the space iterator.
1010 if (st.get_node()->positive
1011 || st.get_node()->negative) {
1014 * Store information about current weights,
1015 * so we will know which areas have been
1016 * covered by higher-resolution subspaces.
1019 for (int i = (int) ceil(min[0]); i <= (int) floor(max[0]); i++)
1020 for (int j = (int) ceil(min[1]); j <= (int) floor(max[1]); j++) {
1021 if (i < 0 || j < 0)
1022 continue;
1023 weight_queue.push(weights->get_pixel(i, j));
1027 * Cleave space for the higher-resolution pass,
1028 * skipping the current space, since we will
1029 * process that later.
1032 space::iterate cleaved_space = si.cleave();
1034 cleaved_space.next();
1036 subspace_info_update(cleaved_space, f, weights, im, _pt);
1037 } else {
1038 si.next();
1043 * Iterate over pixels in the bounding box,
1044 * adding new data to the subspace. XXX:
1045 * assume for now that all pixels in the
1046 * bounding box intersect the subspace.
1049 for (int i = (int) ceil(min[0]); i <= (int) floor(max[0]); i++)
1050 for (int j = (int) ceil(min[1]); j <= (int) floor(max[1]); j++) {
1052 if (i < 0 || j < 0)
1053 continue;
1055 d2::pixel pcolor = im->get_pixel(i, j);
1056 d2::pixel colordiff = (color - pcolor) * (ale_real) 256;
1058 if (falloff_exponent != 0) {
1059 d2::pixel max_diff = im->get_max_diff(interp) * (ale_real) 256;
1061 for (int k = 0; k < 3; k++)
1062 if (max_diff[k] > 1)
1063 colordiff[k] /= pow(max_diff[k], falloff_exponent);
1066 // fprintf(stderr, "(i, j) == (%d, %d); c=[%f %f %f]\n",
1067 // i, j, pcolor[0], pcolor[1], pcolor[2]);
1070 * Determine the probability of
1071 * encounter, divided by the occupancy.
1074 d2::pixel encounter = (d2::pixel(1, 1, 1) - weights->get_pixel(i, j));
1077 * Check for higher-resolution modifications.
1080 int high_res_mod = 0;
1082 if (weight_queue.size()) {
1083 if (weight_queue.front() != weights->get_pixel(i, j)) {
1084 high_res_mod = 1;
1085 encounter = d2::pixel(1, 1, 1) - weight_queue.front();
1087 weight_queue.pop();
1091 * Update subspace.
1094 sn->accumulate_color_1(f, i, j, pcolor, encounter);
1095 d2::pixel channel_occ = pexp(-colordiff * colordiff);
1096 // fprintf(stderr, "encounter=(%f, %f, %f)\n", encounter[0], encounter[1], encounter[2]);
1097 // fprintf(stderr, "color_diff=(%f, %f, %f)\n", colordiff[0], colordiff[1], colordiff[2]);
1098 // fprintf(stderr, "channel_occ=(%g, %g, %g)\n", channel_occ[0], channel_occ[1], channel_occ[2]);
1100 ale_accum occ = channel_occ[0];
1102 for (int k = 1; k < 3; k++)
1103 if (channel_occ[k] < occ)
1104 occ = channel_occ[k];
1106 sn->accumulate_occupancy_1(f, i, j, occ, encounter[0]);
1109 * If weights have not been updated by
1110 * higher-resolution cells, then update
1111 * weights at the current resolution.
1114 if (!high_res_mod)
1115 weights->pix(i, j) += encounter * occupancy;
1122 * Run a single iteration of the spatial_info update cycle.
1124 static void spatial_info_update() {
1126 * Iterate through each frame.
1128 for (unsigned int f = 0; f < d2::image_rw::count(); f++) {
1131 * Open the frame and transformation.
1134 if (tc_multiplier == 0)
1135 al->open(f);
1138 * Allocate an image for storing encounter probabilities.
1140 d2::image *weights = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1141 (int) floor(_pt.scaled_width()), 3);
1143 assert(weights);
1146 * Call subspace_info_update for the root space.
1149 subspace_info_update(space::iterate(al->get(f)->origin()), f, weights, _pt);
1151 delete weights;
1153 if (tc_multiplier == 0)
1154 al->close(f);
1158 * Update all spatial_info structures.
1160 for (spatial_info_map_t::iterator i = spatial_info_map.begin(); i != spatial_info_map.end(); i++) {
1161 i->second.update_color();
1162 i->second.update_occupancy();
1164 // d2::pixel color = i->second.get_color();
1166 // fprintf(stderr, "space p=%p updated to c=[%f %f %f] o=%f\n",
1167 // i->first, color[0], color[1], color[2],
1168 // i->second.get_occupancy());
1173 * Support function for view() and depth().
1176 static const void view_recurse(int type, d2::image *im, d2::image *weights, space::iterate si, pt _pt) {
1177 while (!si.done()) {
1178 space::traverse st = si.get();
1181 * XXX: This could be more efficient, perhaps.
1184 if (spatial_info_map.count(st.get_node()) == 0) {
1185 si.next();
1186 continue;
1189 spatial_info sn = spatial_info_map[st.get_node()];
1192 * Get information on the subspace.
1195 d2::pixel color = sn.get_color();
1196 // d2::pixel color = d2::pixel(1, 1, 1) * (double) (((unsigned int) (st.get_node()) / sizeof(space)) % 65535);
1197 ale_real occupancy = sn.get_occupancy();
1200 * Determine the view-local bounding box for the
1201 * subspace.
1204 point bb[2];
1206 _pt.get_view_local_bb(st, bb);
1208 point min = bb[0];
1209 point max = bb[1];
1212 * Data structure to check modification of weights by
1213 * higher-resolution subspaces.
1216 std::queue<d2::pixel> weight_queue;
1219 * Check for higher resolution subspaces, and
1220 * update the space iterator.
1223 if (st.get_node()->positive
1224 || st.get_node()->negative) {
1227 * Store information about current weights,
1228 * so we will know which areas have been
1229 * covered by higher-resolution subspaces.
1232 for (int i = (int) ceil(min[0]); i <= (int) floor(max[0]); i++)
1233 for (int j = (int) ceil(min[1]); j <= (int) floor(max[1]); j++)
1234 weight_queue.push(weights->get_pixel(i, j));
1237 * Cleave space for the higher-resolution pass,
1238 * skipping the current space, since we will
1239 * process that afterward.
1242 space::iterate cleaved_space = si.cleave();
1244 cleaved_space.next();
1246 view_recurse(type, im, weights, cleaved_space, _pt);
1248 } else {
1249 si.next();
1254 * Iterate over pixels in the bounding box, finding
1255 * pixels that intersect the subspace. XXX: assume
1256 * for now that all pixels in the bounding box
1257 * intersect the subspace.
1260 for (int i = (int) ceil(min[0]); i <= (int) floor(max[0]); i++)
1261 for (int j = (int) ceil(min[1]); j <= (int) floor(max[1]); j++) {
1264 * Check for higher-resolution updates.
1267 if (weight_queue.size()) {
1268 if (weight_queue.front() != weights->get_pixel(i, j)) {
1269 weight_queue.pop();
1270 continue;
1272 weight_queue.pop();
1276 * Determine the probability of encounter.
1279 d2::pixel encounter = (d2::pixel(1, 1, 1) - weights->get_pixel(i, j)) * occupancy;
1282 * Update images.
1285 if (type == 0) {
1288 * Color view
1291 weights->pix(i, j) += encounter;
1292 im->pix(i, j) += encounter * color;
1294 } else if (type == 1) {
1297 * Weighted (transparent) depth display
1300 ale_pos depth_value = _pt.wp_scaled(st.get_min())[2];
1301 weights->pix(i, j) += encounter;
1302 im->pix(i, j) += encounter * depth_value;
1304 } else if (type == 2) {
1307 * Ambiguity (ambivalence) measure.
1310 weights->pix(i, j) = d2::pixel(1, 1, 1);
1311 im->pix(i, j) += 0.1 * d2::pixel(1, 1, 1);
1313 } else if (type == 3) {
1316 * Closeness measure.
1319 ale_pos depth_value = _pt.wp_scaled(st.get_min())[2];
1320 if (weights->pix(i, j)[0] == 0) {
1321 weights->pix(i, j) = d2::pixel(1, 1, 1);
1322 im->pix(i, j) = d2::pixel(1, 1, 1) * depth_value;
1323 } else if (im->pix(i, j)[2] < depth_value) {
1324 im->pix(i, j) = d2::pixel(1, 1, 1) * depth_value;
1325 } else {
1326 continue;
1329 } else if (type == 4) {
1332 * Weighted (transparent) contribution display
1335 ale_pos contribution_value = sn.get_pocc_density() + sn.get_socc_density();
1336 weights->pix(i, j) += encounter;
1337 im->pix(i, j) += encounter * contribution_value;
1339 } else if (type == 5) {
1342 * Weighted (transparent) occupancy display
1345 ale_pos contribution_value = occupancy;
1346 weights->pix(i, j) += encounter;
1347 im->pix(i, j) += encounter * contribution_value;
1349 } else if (type == 6) {
1352 * (Depth, xres, yres) triple
1355 ale_pos depth_value = _pt.wp_scaled(st.get_min())[2];
1356 weights->pix(i, j)[0] += encounter[0];
1357 if (weights->pix(i, j)[1] < encounter[0]) {
1358 weights->pix(i, j)[1] = encounter[0];
1359 im->pix(i, j)[0] = weights->pix(i, j)[1] * depth_value;
1360 im->pix(i, j)[1] = max[0] - min[0];
1361 im->pix(i, j)[2] = max[1] - min[1];
1364 } else if (type == 7) {
1367 * (xoff, yoff, 0) triple
1370 weights->pix(i, j)[0] += encounter[0];
1371 if (weights->pix(i, j)[1] < encounter[0]) {
1372 weights->pix(i, j)[1] = encounter[0];
1373 im->pix(i, j)[0] = i - min[0];
1374 im->pix(i, j)[1] = j - min[1];
1375 im->pix(i, j)[2] = 0;
1378 } else
1379 assert(0);
1385 * Generate an depth image from a specified view.
1387 static const d2::image *depth(pt _pt, int n = -1) {
1388 assert ((unsigned int) n < d2::image_rw::count() || n < 0);
1390 if (n >= 0) {
1391 assert((int) floor(d2::align::of(n).scaled_height())
1392 == (int) floor(_pt.scaled_height()));
1393 assert((int) floor(d2::align::of(n).scaled_width())
1394 == (int) floor(_pt.scaled_width()));
1397 d2::image *im1 = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1398 (int) floor(_pt.scaled_width()), 3);
1400 d2::image *im2 = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1401 (int) floor(_pt.scaled_width()), 3);
1403 d2::image *im3 = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1404 (int) floor(_pt.scaled_width()), 3);
1406 _pt.view_angle(_pt.view_angle() * VIEW_ANGLE_MULTIPLIER);
1409 * Use adaptive subspace data.
1412 d2::image *weights = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1413 (int) floor(_pt.scaled_width()), 3);
1416 * Iterate through subspaces.
1419 space::iterate si(_pt);
1421 view_recurse(6, im1, weights, si, _pt);
1423 delete weights;
1424 weights = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1425 (int) floor(_pt.scaled_width()), 3);
1427 view_recurse(7, im2, weights, si, _pt);
1430 * Normalize depths by weights
1433 if (normalize_weights)
1434 for (unsigned int i = 0; i < im1->height(); i++)
1435 for (unsigned int j = 0; j < im1->width(); j++)
1436 im1->pix(i, j)[0] /= weights->pix(i, j)[1];
1439 for (unsigned int i = 0; i < im1->height(); i++)
1440 for (unsigned int j = 0; j < im1->width(); j++) {
1443 * Handle interpolation.
1446 d2::point x;
1447 d2::point blx;
1448 d2::point res(im1->pix(i, j)[1], im1->pix(i, j)[2]);
1450 for (int d = 0; d < 2; d++) {
1452 if (im2->pix(i, j)[d] < res[d] / 2)
1453 x[d] = (ale_pos) (d?j:i) - res[d] / 2 - im2->pix(i, j)[d];
1454 else
1455 x[d] = (ale_pos) (d?j:i) + res[d] / 2 - im2->pix(i, j)[d];
1457 blx[d] = 1 - ((d?j:i) - x[d]) / res[d];
1460 ale_real depth_val = 0;
1461 ale_real depth_weight = 0;
1463 for (int ii = 0; ii < 2; ii++)
1464 for (int jj = 0; jj < 2; jj++) {
1465 d2::point p = x + d2::point(ii, jj) * res;
1466 if (im1->in_bounds(p)) {
1468 ale_real d = im1->get_bl(p)[0];
1470 if (isnan(d))
1471 continue;
1473 ale_real w = ((ii ? (1 - blx[0]) : blx[0]) * (jj ? (1 - blx[1]) : blx[1]));
1474 depth_weight += w;
1475 depth_val += w * d;
1479 ale_real depth = depth_val / depth_weight;
1482 * Handle exclusions and encounter thresholds
1485 point w = _pt.pw_scaled(point(i, j, depth));
1487 if (weights->pix(i, j)[0] < encounter_threshold || excluded(w)) {
1488 im3->pix(i, j) = d2::pixel::zero() / d2::pixel::zero();
1489 } else {
1490 im3->pix(i, j) = d2::pixel(1, 1, 1) * depth;
1494 delete weights;
1495 delete im1;
1496 delete im2;
1498 return im3;
1501 static const d2::image *depth(unsigned int n) {
1503 assert (n < d2::image_rw::count());
1505 pt _pt = align::projective(n);
1507 return depth(_pt, n);
1511 * Generate an image from a specified view.
1513 static const d2::image *view(pt _pt, int n = -1) {
1514 assert ((unsigned int) n < d2::image_rw::count() || n < 0);
1516 if (n >= 0) {
1517 assert((int) floor(d2::align::of(n).scaled_height())
1518 == (int) floor(_pt.scaled_height()));
1519 assert((int) floor(d2::align::of(n).scaled_width())
1520 == (int) floor(_pt.scaled_width()));
1523 const d2::image *depths = NULL;
1525 if (d3px_count > 0)
1526 depths = depth(_pt, n);
1528 d2::image *im = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1529 (int) floor(_pt.scaled_width()), 3);
1531 _pt.view_angle(_pt.view_angle() * VIEW_ANGLE_MULTIPLIER);
1534 * Use adaptive subspace data.
1537 d2::image *weights = new d2::image_ale_real((int) floor(_pt.scaled_height()),
1538 (int) floor(_pt.scaled_width()), 3);
1541 * Iterate through subspaces.
1544 space::iterate si(_pt);
1546 view_recurse(0, im, weights, si, _pt);
1548 for (unsigned int i = 0; i < im->height(); i++)
1549 for (unsigned int j = 0; j < im->width(); j++) {
1550 if (weights->pix(i, j).min_norm() < encounter_threshold
1551 || (d3px_count > 0 && isnan(depths->pix(i, j)[0]))) {
1552 im->pix(i, j) = d2::pixel::zero() / d2::pixel::zero();
1553 weights->pix(i, j) = d2::pixel::zero();
1554 } else if (normalize_weights)
1555 im->pix(i, j) /= weights->pix(i, j);
1558 if (d3px_count > 0)
1559 delete depths;
1561 delete weights;
1563 return im;
1566 static void tcem(double _tcem) {
1567 tc_multiplier = _tcem;
1570 static void oui(unsigned int _oui) {
1571 ou_iterations = _oui;
1574 static void pa(unsigned int _pa) {
1575 pairwise_ambiguity = _pa;
1578 static void pc(const char *_pc) {
1579 pairwise_comparisons = _pc;
1582 static void d3px(int _d3px_count, double *_d3px_parameters) {
1583 d3px_count = _d3px_count;
1584 d3px_parameters = _d3px_parameters;
1587 static void fx(double _fx) {
1588 falloff_exponent = _fx;
1591 static void nw() {
1592 normalize_weights = 1;
1595 static void no_nw() {
1596 normalize_weights = 0;
1599 static int excluded(point p) {
1600 for (int n = 0; n < d3px_count; n++) {
1601 double *region = d3px_parameters + (6 * n);
1602 if (p[0] >= region[0]
1603 && p[0] <= region[1]
1604 && p[1] >= region[2]
1605 && p[1] <= region[3]
1606 && p[2] >= region[4]
1607 && p[2] <= region[5])
1608 return 1;
1611 return 0;
1614 static const d2::image *view(unsigned int n) {
1616 assert (n < d2::image_rw::count());
1618 pt _pt = align::projective(n);
1620 return view(_pt, n);
1624 * Add specified control points to the model
1626 static void add_control_points() {
1629 typedef struct {point iw; point ip, is;} analytic;
1630 typedef std::multimap<ale_real,analytic> score_map;
1631 typedef std::pair<ale_real,analytic> score_map_element;
1633 static void solve_point_pair(unsigned int f1, unsigned int f2, int i, int j, ale_pos ii, ale_pos jj,
1634 ale_real *normalized_score, analytic *_a,
1635 const pt &_pt1, const pt &_pt2, const d2::image *if1, const d2::image *if2) {
1637 *normalized_score = 0;
1638 *normalized_score /= *normalized_score;
1639 assert(isnan(*normalized_score));
1642 * Create an orthonormal basis to
1643 * determine line intersection.
1646 point bp0 = _pt1.pw_scaled(point(i, j, 0));
1647 point bp1 = _pt1.pw_scaled(point(i, j, 10));
1648 point bp2 = _pt2.pw_scaled(point(ii, jj, 0));
1650 point b0 = (bp1 - bp0).normalize();
1651 point b1n = bp2 - bp0;
1652 point b1 = (b1n - b1n.dproduct(b0) * b0).normalize();
1653 point b2 = point(0, 0, 0).xproduct(b0, b1).normalize(); // Should already have norm=1
1656 * Select a fourth point to define a second line.
1659 point p3 = _pt2.pw_scaled(point(ii, jj, 10));
1662 * Representation in the new basis.
1665 d2::point nbp0 = d2::point(bp0.dproduct(b0), bp0.dproduct(b1));
1666 // d2::point nbp1 = d2::point(bp1.dproduct(b0), bp1.dproduct(b1));
1667 d2::point nbp2 = d2::point(bp2.dproduct(b0), bp2.dproduct(b1));
1668 d2::point np3 = d2::point( p3.dproduct(b0), p3.dproduct(b1));
1671 * Determine intersection of line
1672 * (nbp0, nbp1), which is parallel to
1673 * b0, with line (nbp2, np3).
1677 * XXX: a stronger check would be
1678 * better here, e.g., involving the
1679 * ratio (np3[0] - nbp2[0]) / (np3[1] -
1680 * nbp2[1]). Also, acceptance of these
1681 * cases is probably better than
1682 * rejection.
1685 if (np3[1] - nbp2[1] == 0)
1686 return;
1688 d2::point intersection = d2::point(nbp2[0]
1689 + (nbp0[1] - nbp2[1]) * (np3[0] - nbp2[0]) / (np3[1] - nbp2[1]),
1690 nbp0[1]);
1692 ale_pos b2_offset = b2.dproduct(bp0);
1695 * Map the intersection back to the world
1696 * basis.
1699 point iw = intersection[0] * b0 + intersection[1] * b1 + b2_offset * b2;
1702 * Reject intersection points behind a
1703 * camera.
1706 point icp = _pt1.wc(iw);
1707 point ics = _pt2.wc(iw);
1710 if (icp[2] >= 0 || ics[2] >= 0)
1711 return;
1714 * Reject clipping plane violations.
1717 if (iw[2] > front_clip
1718 || iw[2] < rear_clip)
1719 return;
1722 * Score the point.
1725 point ip = _pt1.wp_scaled(iw);
1727 point is = _pt2.wp_scaled(iw);
1729 _a->iw = iw;
1730 _a->ip = ip;
1731 _a->is = is;
1734 * Calculate score from color match. Assume for now
1735 * that the transformation can be approximated locally
1736 * with a translation.
1739 ale_pos score = 0;
1740 ale_pos divisor = 0;
1741 ale_pos l1_multiplier = 0.125;
1743 if (if1->in_bounds(ip.xy())
1744 && if2->in_bounds(is.xy())) {
1745 divisor += 1 - l1_multiplier;
1746 score += (1 - l1_multiplier)
1747 * (if1->get_bl(ip.xy()) - if2->get_bl(is.xy())).normsq();
1750 for (int iii = -1; iii <= 1; iii++)
1751 for (int jjj = -1; jjj <= 1; jjj++) {
1752 d2::point t(iii, jjj);
1754 if (!if1->in_bounds(ip.xy() + t)
1755 || !if2->in_bounds(is.xy() + t))
1756 continue;
1758 divisor += l1_multiplier;
1759 score += l1_multiplier
1760 * (if1->get_bl(ip.xy() + t) - if2->get_bl(is.xy() + t)).normsq();
1765 * Include third-camera contributions in the score.
1768 if (tc_multiplier != 0)
1769 for (unsigned int f = 0; f < d2::image_rw::count(); f++) {
1770 if (f == f1 || f == f2)
1771 continue;
1773 assert(cl);
1774 assert(cl->reference);
1775 assert(cl->reference[f]);
1777 pt _ptf = align::projective(f);
1778 _ptf.scale(1 / _ptf.scale_2d() / pow(2, ceil(input_decimation_exponent)));
1780 point p = _ptf.wp_scaled(iw);
1782 if (!cl->reference[f]->in_bounds(p.xy())
1783 || !if2->in_bounds(ip.xy()))
1784 continue;
1786 divisor += tc_multiplier;
1787 score += tc_multiplier
1788 * (if1->get_bl(ip.xy()) - cl->reference[f]->get_bl(p.xy())).normsq();
1791 *normalized_score = score / divisor;
1795 * Generate a map from scores to 3D points for various depths at point (i, j) in f1.
1797 static score_map p2f_score_map(unsigned int i, unsigned int j, lod_image *if1, lod_image *if2,
1798 std::vector<pt> pt_outputs) {
1800 score_map result;
1803 * Map two depths to the secondary frame.
1806 point p1 = _pt2.wp_scaled(_pt1.pw_scaled(point(i, j, 1000)));
1807 point p2 = _pt2.wp_scaled(_pt1.pw_scaled(point(i, j, -1000)));
1810 * For cases where the mapped points define a
1811 * line and where points on the line fall
1812 * within the defined area of the frame,
1813 * determine the starting point for inspection.
1814 * In other cases, continue to the next pixel.
1817 ale_pos diff_i = p2[0] - p1[0];
1818 ale_pos diff_j = p2[1] - p1[1];
1819 ale_pos slope = diff_j / diff_i;
1821 if (isnan(slope)) {
1822 fprintf(stderr, "%d->%d (%d, %d) has undefined slope\n",
1823 f1, f2, i, j);
1824 return result;
1828 * Make absurdly large/small slopes either infinity, negative infinity, or zero.
1831 if (fabs(slope) > if2->width() * 100) {
1832 double zero = 0;
1833 double one = 1;
1834 double inf = one / zero;
1835 slope = inf;
1836 } else if (slope < 1 / (double) if2->height() / 100
1837 && slope > -1/ (double) if2->height() / 100) {
1838 slope = 0;
1841 ale_pos top_intersect = p1[1] - p1[0] * slope;
1842 ale_pos lef_intersect = p1[0] - p1[1] / slope;
1843 ale_pos rig_intersect = p1[0] - (p1[1] - if2->width() + 2) / slope;
1844 ale_pos sp_i, sp_j;
1846 if (slope == 0) {
1847 sp_i = lef_intersect;
1848 sp_j = 0;
1849 } else if (finite(slope) && top_intersect >= 0 && top_intersect < if2->width() - 1) {
1850 sp_i = 0;
1851 sp_j = top_intersect;
1852 } else if (slope > 0 && lef_intersect >= 0 && lef_intersect <= if2->height() - 1) {
1853 sp_i = lef_intersect;
1854 sp_j = 0;
1855 } else if (slope < 0 && rig_intersect >= 0 && rig_intersect <= if2->height() - 1) {
1856 sp_i = rig_intersect;
1857 sp_j = if2->width() - 2;
1858 } else {
1859 return result;
1863 * Determine increment values for examining
1864 * point, ensuring that incr_i is always
1865 * positive.
1868 ale_pos incr_i, incr_j;
1870 if (fabs(diff_i) > fabs(diff_j)) {
1871 incr_i = 1;
1872 incr_j = slope;
1873 } else if (slope > 0) {
1874 incr_i = 1 / slope;
1875 incr_j = 1;
1876 } else {
1877 incr_i = -1 / slope;
1878 incr_j = -1;
1882 * Examine regions near the projected line.
1885 for (ale_pos ii = sp_i, jj = sp_j;
1886 ii <= if2->height() - 1 && jj <= if2->width() - 1 && ii >= 0 && jj >= 0;
1887 ii += incr_i, jj += incr_j) {
1889 ale_real normalized_score;
1890 analytic _a;
1892 solve_point_pair(f1, f2, i, j, ii, jj, &normalized_score, &_a, _pt1, _pt2, if1, if2);
1895 * Reject points with undefined score.
1898 if (!finite(normalized_score))
1899 continue;
1902 * Add the point to the score map.
1905 result.insert(score_map_element(normalized_score, _a));
1909 * Iterate through regions and add new locations with sub-pixel resolution
1911 int accumulated_passes = 0;
1912 int max_acc_passes = pairwise_ambiguity;
1913 for (score_map::iterator smi = result.begin(); smi != result.end(); smi++) {
1914 point is = smi->second.is;
1916 if (accumulated_passes++ >= max_acc_passes)
1917 break;
1919 for (ale_pos epsilon = -0.5; epsilon <= 0.5; epsilon += 0.125) {
1921 if (fabs(epsilon) < 0.001)
1922 continue;
1924 ale_real normalized_score;
1925 analytic _a;
1927 solve_point_pair(f1, f2, i, j, is[0] + epsilon * incr_i, is[1] + epsilon * incr_j,
1928 &normalized_score, &_a, _pt1, _pt2, if1, if2);
1930 if (!finite(normalized_score))
1931 continue;
1933 result.insert(score_map_element(normalized_score, _a));
1937 return result;
1941 * Attempt to refine space around a point, to high and low resolutions
1942 * for two cameras, resulting in four resolutions in total.
1945 static void refine_space(point iw, pt _pt1, pt _pt2, std::vector<pt> pt_outputs) {
1947 space::traverse st = space::traverse::root();
1949 if (!st.includes(iw)) {
1950 assert(0);
1951 return;
1954 int camera_highres[2] = {0, 0};
1955 int camera_lowres[2] = {0, 0};
1957 std::vector<int> output_highres;
1958 std::vector<int> output_lowres;
1960 output_highres.resize(pt_outputs.size(), 0);
1961 output_lowres.resize(pt_outputs.size(), 0);
1964 * Loop until all resolutions of interest have been generated.
1967 for(;;) {
1969 point frame_min[2] = { point::posinf(), point::posinf() },
1970 frame_max[2] = { point::neginf(), point::neginf() };
1972 std::vector<point> output_max;
1973 std::vector<point> output_min;
1975 output_max.resize(pt_outputs.size(), point::posinf());
1976 output_min.resize(pt_outputs.size(), point::neginf());
1978 point p[2] = { st.get_min(), st.get_max() };
1981 * Cycle through the corner points bounding the
1982 * subspace to determine a bounding box.
1984 * NB: This code is not identical to
1985 * get_view_local_bb(), as it does not clip the
1986 * results.
1989 for (int ibit = 0; ibit < 2; ibit++)
1990 for (int jbit = 0; jbit < 2; jbit++)
1991 for (int kbit = 0; kbit < 2; kbit++) {
1992 point pp = point(p[ibit][0], p[jbit][1], p[kbit][2]);
1994 point ppp[2] = {_pt1.wp_scaled(pp), _pt2.wp_scaled(pp)};
1997 * Inputs
2000 for (int f = 0; f < 2; f++)
2001 for (int d = 0; d < 3; d++) {
2002 if (ppp[f][d] < frame_min[f][d] || isnan(ppp[f][d]))
2003 frame_min[f][d] = ppp[f][d];
2004 if (ppp[f][d] > frame_max[f][d] || isnan(ppp[f][d]))
2005 frame_max[f][d] = ppp[f][d];
2009 * Outputs
2012 for (unsigned int n = 0; n < pt_outputs.size(); n++) {
2014 point ppp_pt = pt_outputs[n].wp_scaled(pp);
2016 for (int d = 0; d < 3; d++) {
2017 if (ppp_pt[d] < output_min[n][d] || isnan(ppp_pt[d]))
2018 output_min[n][d] = ppp_pt[d];
2019 if (ppp_pt[d] > output_max[n][d] || isnan(ppp_pt[d]))
2020 output_max[n][d] = ppp_pt[d];
2026 * Generate any new desired spatial registers.
2030 * Inputs
2033 for (int f = 0; f < 2; f++) {
2036 * Low resolution
2039 if (frame_max[f][0] - frame_min[f][0] < 2
2040 && frame_max[f][1] - frame_min[f][1] < 2
2041 && camera_lowres[f] == 0) {
2042 spatial_info_map[st.get_node()];
2043 camera_lowres[f] = 1;
2047 * High resolution.
2050 if (frame_max[f][0] - frame_min[f][0] < 1
2051 && frame_max[f][1] - frame_min[f][1] < 1
2052 && camera_highres[f] == 0) {
2053 spatial_info_map[st.get_node()];
2054 camera_highres[f] = 1;
2059 * Outputs
2062 for (unsigned int n = 0; n < pt_outputs.size(); n++) {
2065 * Low resolution
2068 if (output_max[n][0] - output_min[n][0] < 2
2069 && output_max[n][1] - output_min[n][1] < 2
2070 && output_lowres[n] == 0) {
2071 spatial_info_map[st.get_node()];
2072 output_lowres[n] = 1;
2076 * High resolution.
2079 if (output_max[n][0] - output_min[n][0] < 1
2080 && output_max[n][1] - output_min[n][1] < 1
2081 && camera_highres[n] == 0) {
2082 spatial_info_map[st.get_node()];
2083 output_highres[n] = 1;
2088 * Check for completion
2091 if (camera_highres[0]
2092 && camera_highres[1]
2093 && camera_lowres[0]
2094 && camera_lowres[1]
2095 && !count(output_lowres.begin(), output_lowres.end(), 0)
2096 && !count(output_highres.begin(), output_highres.end(), 0));
2097 return;
2100 * Check precision before analyzing space further.
2103 if (st.precision_wall()) {
2104 fprintf(stderr, "\n\n*** Error: reached subspace precision wall ***\n\n");
2105 assert(0);
2106 return;
2109 if (st.positive().includes(iw)) {
2110 st = st.positive();
2111 total_tsteps++;
2112 } else if (st.negative().includes(iw)) {
2113 st = st.negative();
2114 total_tsteps++;
2115 } else {
2116 fprintf(stderr, "failed iw = (%f, %f, %f)\n",
2117 iw[0], iw[1], iw[2]);
2118 assert(0);
2124 * Analyze space in a manner dependent on the score map.
2127 static void analyze_space_from_map(unsigned int f1, unsigned int f2, unsigned int i,
2128 unsigned int j, pt _pt1, pt _pt2, score_map _sm, std::vector<pt> pt_outputs) {
2130 int accumulated_ambiguity = 0;
2131 int max_acc_amb = pairwise_ambiguity;
2133 for(score_map::iterator smi = _sm.begin(); smi != _sm.end(); smi++) {
2135 point iw = smi->second.iw;
2136 point ip = smi->second.ip;
2137 // point is = smi->second.is;
2139 if (accumulated_ambiguity++ >= max_acc_amb)
2140 break;
2142 total_ambiguity++;
2145 * Attempt to refine space around the intersection point.
2148 refine_space(iw, _pt1, _pt2, pt_outputs);
2152 static void refine_space_for_output(pt _pt, space::traverse st = space::traverse::root()) {
2153 if (!spatial_info_map.count(st.get_node())) {
2154 if (st.get_node()->positive)
2155 refine_space_for_output(_pt, st.positive());
2156 if (st.get_node()->negative)
2157 refine_space_for_output(_pt, st.negative());
2158 return;
2161 point bb[2];
2162 _pt.get_view_local_bb(st, bb);
2164 if (bb[0].xy().lengthtosq(bb[1].xy()) < 2)
2165 return;
2167 if (!_pt.scaled_in_bounds(bb[0]) || !_pt.scaled_in_bounds(bb[1]))
2168 return;
2170 spatial_info_map[st.positive().get_node()];
2171 spatial_info_map[st.negative().get_node()];
2173 refine_space_for_output(_pt, st.positive());
2174 refine_space_for_output(_pt, st.negative());
2177 static void refine_space_for_output(const char *d_out[], const char *v_out[],
2178 std::map<const char *, pt> *d3_depth_pt,
2179 std::map<const char *, pt> *d3_output_pt) {
2181 for (unsigned int f = 0; f < d2::image_rw::count(); f++)
2182 if (d_out[f] || v_out[f])
2183 refine_space_for_output(d3::align::projective(f));
2185 for (std::map<const char *, pt>::iterator i = d3_depth_pt->begin(); i != d3_depth_pt->end(); i++)
2186 refine_space_for_output(i->second);
2188 for (std::map<const char *, pt>::iterator i = d3_output_pt->begin(); i != d3_output_pt->end(); i++)
2189 refine_space_for_output(i->second);
2193 * Make pt list.
2195 static std::vector<pt> make_pt_list(const char *d_out[], const char *v_out[],
2196 std::map<const char *, pt> *d3_depth_pt,
2197 std::map<const char *, pt> *d3_output_pt) {
2199 std::vector<pt> result;
2201 for (unsigned int n = 0; n < d2::image_rw::count(); n++) {
2202 if (d_out[n] || v_out[n]) {
2203 result.push_back(align::projective(n));
2207 for (std::map<const char *, pt>::iterator i = d3_depth_pt->begin(); i != d3_depth_pt->end(); i++) {
2208 result.push_back(i->second);
2211 for (std::map<const char *, pt>::iterator i = d3_output_pt->begin(); i != d3_output_pt->end(); i++) {
2212 result.push_back(i->second);
2215 return result;
2220 * Initialize space and identify regions of interest for the adaptive
2221 * subspace model.
2223 static void make_space(const char *d_out[], const char *v_out[],
2224 std::map<const char *, pt> *d3_depth_pt,
2225 std::map<const char *, pt> *d3_output_pt) {
2227 fprintf(stderr, "Subdividing 3D space");
2229 std::vector<pt> pt_outputs = make_pt_list(d_out, v_out, d3_depth_pt, d3_output_pt);
2232 * Initialize root space.
2235 space::init_root();
2238 * Subdivide space to resolve intensity matches between pairs
2239 * of frames.
2242 for (unsigned int f1 = 0; f1 < d2::image_rw::count(); f1++) {
2244 if (!d_out[f1] && !v_out[f1] && !d3_depth_pt->size()
2245 && !d3_output_pt->size() && strcmp(pairwise_comparisons, "all"))
2246 continue;
2248 std::vector<const d2::image *> if1;
2250 if1.push_back(d2::image_rw::copy(f1, "3D reference image"));
2251 assert(if1.back());
2253 ale_pos decimation_index = input_decimation_exponent;
2254 while (decimation_index > 0
2255 && if1->height() > 2
2256 && if1->width() > 2) {
2258 if1.push_back(if1.back()->scale_by_half("3D, reduced LOD"));
2259 assert(if1.back());
2261 decimation_index -= 1;
2264 if (decimation_index > 0) {
2265 fprintf(stderr, "Error: --di argument is too large.\n");
2266 exit(1);
2270 for (unsigned int f2 = 0; f2 < d2::image_rw::count(); f2++) {
2272 if (f1 == f2)
2273 continue;
2275 std::vector<const d2::image *> if2;
2277 if2.push_back(d2::image_rw::copy(f2, "3D reference image"));
2278 assert(if2.back());
2280 ale_pos decimation_index = input_decimation_exponent;
2281 while (decimation_index > 0
2282 && if2->height() > 2
2283 && if2->width() > 2) {
2285 if2.push_back(if2.back()->scale_by_half("3D, reduced LOD"));
2286 assert(if2.back());
2288 decimation_index -= 1;
2291 if (decimation_index > 0) {
2292 fprintf(stderr, "Error: --di argument is too large.\n");
2293 exit(1);
2296 pt _pt1 = align::projective(f1);
2297 pt _pt2 = align::projective(f2);
2299 _pt1.scale(1 / _pt1.scale_2d() / pow(2, ceil(input_decimation_exponent)));
2300 _pt2.scale(1 / _pt2.scale_2d() / pow(2, ceil(input_decimation_exponent)));
2303 * Iterate over all points in the primary frame.
2306 for (unsigned int i = 0; i < if1->height(); i++)
2307 for (unsigned int j = 0; j < if1->width(); j++) {
2309 total_pixels++;
2312 * Generate a map from scores to 3D points for
2313 * various depths in f1.
2316 score_map _sm = p2f_score_map(i, j, if1, if2, pt_outputs);
2319 * Analyze space in a manner dependent on the score map.
2322 analyze_space_from_map(f1, f2, i, j, _pt1, _pt2, _sm, pt_outputs);
2324 delete if2;
2326 delete if1;
2330 * This is expensive.
2333 // refine_space_for_output(d_out, v_out, d3_depth_pt, d3_output_pt);
2335 fprintf(stderr, ".\n");
2340 * Update spatial information structures.
2342 * XXX: the name of this function is horribly misleading. There isn't
2343 * even a 'search depth' any longer, since there is no longer any
2344 * bounded DFS occurring.
2346 static void reduce_cost_to_search_depth(d2::exposure *exp_out, int inc_bit) {
2349 * Subspace model
2352 for (unsigned int i = 0; i < ou_iterations; i++)
2353 spatial_info_update();
2356 #if 0
2358 * Describe a scene to a renderer
2360 static void describe(render *r) {
2362 #endif
2365 #endif