d2/align: Migrate code (indicated as broken by GCC) to Libale.
[Ale.git] / d3 / scene.h
blob10833cc731534e46665975ec45d0c10a193a5350
1 // Copyright 2003, 2004, 2005, 2006 David Hilvert <dhilvert@auricle.dyndns.org>,
2 // <dhilvert@ugcs.caltech.edu>
4 /* This file is part of the Anti-Lamenessing Engine.
6 The Anti-Lamenessing Engine is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 The Anti-Lamenessing Engine is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with the Anti-Lamenessing Engine; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * d3/scene.h: Representation of a 3D scene.
25 #ifndef __scene_h__
26 #define __scene_h__
28 #include "point.h"
31 * View angle multiplier.
33 * Setting this to a value larger than one can be useful for debugging.
36 #define VIEW_ANGLE_MULTIPLIER 1
38 class scene {
41 * Clipping planes
43 static ale_pos front_clip;
44 static ale_pos rear_clip;
47 * Decimation exponents for geometry
49 static int primary_decimation_upper;
50 static int input_decimation_lower;
51 static int output_decimation_preferred;
54 * Output clipping
56 static int output_clip;
59 * Model files
61 static const char *load_model_name;
62 static const char *save_model_name;
65 * Occupancy attenuation
68 static double occ_att;
71 * Normalization of output by weight
74 static int normalize_weights;
77 * Filtering data
80 static int use_filter;
81 static const char *d3chain_type;
84 * Falloff exponent
87 static ale_real falloff_exponent;
90 * Third-camera error multiplier
92 static double tc_multiplier;
95 * Occupancy update iterations
97 static unsigned int ou_iterations;
100 * Pairwise ambiguity
102 static unsigned int pairwise_ambiguity;
105 * Pairwise comparisons
107 static const char *pairwise_comparisons;
110 * 3D Post-exclusion
112 static int d3px_count;
113 static double *d3px_parameters;
116 * Nearness threshold
118 static const ale_real nearness;
121 * Encounter threshold for defined pixels.
123 static double encounter_threshold;
126 * Median calculation radii.
128 static double depth_median_radius;
129 static double diff_median_radius;
132 * Flag for subspace traversal.
134 static int subspace_traverse;
137 * Structure to hold input frame information at levels of
138 * detail between full detail and full decimation.
140 class lod_image {
141 unsigned int f;
142 unsigned int entries;
143 std::vector<const d2::image *> im;
144 std::vector<pt> transformation;
146 public:
148 * Constructor
150 lod_image(unsigned int _f) {
152 pt _pt;
154 f = _f;
155 im.push_back(d2::image_rw::copy(f, "3D reference image"));
156 assert(im.back());
157 entries = 1;
158 _pt = d3::align::projective(f);
159 _pt.scale(1 / _pt.scale_2d());
160 transformation.push_back(_pt);
162 while (im.back()->height() > 4
163 && im.back()->width() > 4) {
165 im.push_back(im.back()->scale_by_half("3D, reduced LOD"));
166 assert(im.back());
168 _pt.scale(1 / _pt.scale_2d() / pow((ale_pos) 2, entries));
169 transformation.push_back(_pt);
171 entries += 1;
176 * Get the number of scales
178 unsigned int count() {
179 return entries;
183 * Get an image.
185 const d2::image *get_image(unsigned int i) {
186 assert(i < entries);
187 return im[i];
190 int in_bounds(d2::point p) {
191 return im[0]->in_bounds(p);
195 * Get a 'trilinear' color. We currently don't do interpolation
196 * between levels of detail; hence, it's discontinuous in tl_coord.
198 d2::pixel get_tl(d2::point p, ale_pos tl_coord) {
200 assert(in_bounds(p));
202 tl_coord = round(tl_coord);
204 if (tl_coord >= entries)
205 tl_coord = entries;
206 if (tl_coord < 0)
207 tl_coord = 0;
209 p = p / (ale_pos) pow(2, tl_coord);
211 unsigned int itlc = (unsigned int) tl_coord;
213 if (p[0] > im[itlc]->height() - 1)
214 p[0] = im[itlc]->height() - 1;
215 if (p[1] > im[itlc]->width() - 1)
216 p[1] = im[itlc]->width() - 1;
218 assert(p[0] >= 0);
219 assert(p[1] >= 0);
221 return im[itlc]->get_bl(p);
224 d2::pixel get_max_diff(d2::point p, ale_pos tl_coord) {
225 assert(in_bounds(p));
227 tl_coord = round(tl_coord);
229 if (tl_coord >= entries)
230 tl_coord = entries;
231 if (tl_coord < 0)
232 tl_coord = 0;
234 p = p / (ale_pos) pow(2, tl_coord);
236 unsigned int itlc = (unsigned int) tl_coord;
238 if (p[0] > im[itlc]->height() - 1)
239 p[0] = im[itlc]->height() - 1;
240 if (p[1] > im[itlc]->width() - 1)
241 p[1] = im[itlc]->width() - 1;
243 assert(p[0] >= 0);
244 assert(p[1] >= 0);
246 return im[itlc]->get_max_diff(p);
250 * Get the transformation
252 pt get_t(unsigned int i) {
253 assert(i >= 0);
254 assert(i < entries);
255 return transformation[i];
259 * Get the camera origin in world coordinates
261 point origin() {
262 return transformation[0].origin();
266 * Destructor
268 ~lod_image() {
269 for (unsigned int i = 0; i < entries; i++) {
270 delete im[i];
276 * Structure to hold weight information for reference images.
278 class ref_weights {
279 unsigned int f;
280 std::vector<d2::image *> weights;
281 pt transformation;
282 int tc_low;
283 int tc_high;
284 int initialized;
286 void set_image(d2::image *im, ale_real value) {
287 assert(im);
288 for (unsigned int i = 0; i < im->height(); i++)
289 for (unsigned int j = 0; j < im->width(); j++)
290 im->set_pixel(i, j, d2::pixel(value, value, value));
293 d2::image *make_image(ale_pos sf, ale_real init_value = 0) {
294 d2::image *result = d2::new_image_ale_real(
295 (unsigned int) ceil(transformation.unscaled_height() * sf),
296 (unsigned int) ceil(transformation.unscaled_width() * sf), 3);
297 assert(result);
299 if (init_value != 0)
300 set_image(result, init_value);
302 return result;
305 public:
308 * Explicit weight subtree
310 struct subtree {
311 ale_real node_value;
312 subtree *children[2][2];
314 subtree(ale_real nv, subtree *a, subtree *b, subtree *c, subtree *d) {
315 node_value = nv;
316 children[0][0] = a;
317 children[0][1] = b;
318 children[1][0] = c;
319 children[1][1] = d;
322 ~subtree() {
323 for (int i = 0; i < 2; i++)
324 for (int j = 0; j < 2; j++)
325 delete children[i][j];
330 * Constructor
332 ref_weights(unsigned int _f) {
333 f = _f;
334 transformation = d3::align::projective(f);
335 initialized = 0;
339 * Check spatial bounds.
341 int in_spatial_bounds(point p) {
343 if (!p.defined())
344 return 0;
346 if (p[0] < 0)
347 return 0;
348 if (p[1] < 0)
349 return 0;
350 if (p[0] > transformation.unscaled_height() - 1)
351 return 0;
352 if (p[1] > transformation.unscaled_width() - 1)
353 return 0;
354 if (p[2] >= 0)
355 return 0;
357 return 1;
360 int in_spatial_bounds(const space::traverse &t) {
361 point p = transformation.centroid(t);
362 return in_spatial_bounds(p);
366 * Increase resolution to the given level.
368 void increase_resolution(int tc, unsigned int i, unsigned int j) {
369 d2::image *im = weights[tc - tc_low];
370 assert(im);
371 assert(i <= im->height() - 1);
372 assert(j <= im->width() - 1);
375 * Check for the cases known to have no lower level of detail.
378 if (im->get_chan(i, j, 0) == -1)
379 return;
381 if (tc == tc_high)
382 return;
384 increase_resolution(tc + 1, i / 2, j / 2);
387 * Load the lower-level image structure.
390 d2::image *iim = weights[tc + 1 - tc_low];
392 assert(iim);
393 assert(i / 2 <= iim->height() - 1);
394 assert(j / 2 <= iim->width() - 1);
397 * Check for the case where no lower level of detail is
398 * available.
401 if (iim->get_chan(i / 2, j / 2, 0) == -1)
402 return;
405 * Spread out the lower level of detail among (uninitialized)
406 * peer values.
409 for (unsigned int ii = (i / 2) * 2; ii < (i / 2) * 2 + 1; ii++)
410 for (unsigned int jj = (j / 2) * 2; jj < (j / 2) * 2 + 1; jj++) {
411 assert(ii <= im->height() - 1);
412 assert(jj <= im->width() - 1);
413 assert(im->get_chan(ii, jj, 0) == 0);
415 im->set_pixel(ii, jj, iim->get_pixel(i / 2, j / 2));
419 * Set the lower level of detail to point here.
422 iim->set_chan(i / 2, j / 2, 0, -1);
426 * Add weights to positive higher-resolution pixels where
427 * found when their current values match the given subtree
428 * values; set negative pixels to zero and return 0 if no
429 * positive higher-resolution pixels are found.
431 int add_partial(int tc, unsigned int i, unsigned int j, ale_real weight, subtree *st) {
432 d2::image *im = weights[tc - tc_low];
433 assert(im);
435 if (i == im->height() - 1
436 || j == im->width() - 1) {
437 return 1;
440 assert(i <= im->height() - 1);
441 assert(j <= im->width() - 1);
444 * Check for positive values.
447 if (im->get_chan(i, j, 0) > 0) {
448 if (st && st->node_value == im->get_pixel(i, j)[0])
449 im->set_chan(i, j, 0, (ale_real) im->get_chan(i, j, 0)
450 + weight * (1 - im->get_pixel(i, j)[0]));
451 return 1;
455 * Handle the case where there are no higher levels of detail.
458 if (tc == tc_low) {
459 if (im->get_chan(i, j, 0) != 0) {
460 fprintf(stderr, "failing assertion: im[%d]->pix(%d, %d)[0] == %g\n", tc, i, j,
461 (double) im->get_chan(i, j, 0));
463 assert(im->get_chan(i, j, 0) == 0);
464 return 0;
468 * Handle the case where higher levels of detail are available.
471 int success[2][2];
473 for (int ii = 0; ii < 2; ii++)
474 for (int jj = 0; jj < 2; jj++)
475 success[ii][jj] = add_partial(tc - 1, i * 2 + ii, j * 2 + jj, weight,
476 st ? st->children[ii][jj] : NULL);
478 if (!success[0][0]
479 && !success[0][1]
480 && !success[1][0]
481 && !success[1][1]) {
482 im->set_chan(i, j, 0, 0);
483 return 0;
486 d2::image *iim = weights[tc - 1 - tc_low];
487 assert(iim);
489 for (int ii = 0; ii < 2; ii++)
490 for (int jj = 0; jj < 2; jj++)
491 if (success[ii][jj] == 0) {
492 assert(i * 2 + ii < iim->height());
493 assert(j * 2 + jj < iim->width());
495 iim->set_chan(i * 2 + ii, j * 2 + jj, 0, weight);
498 im->set_chan(i, j, 0, -1);
500 return 1;
504 * Add weight.
506 void add_weight(int tc, unsigned int i, unsigned int j, ale_real weight, subtree *st) {
508 assert (weight >= 0);
510 d2::image *im = weights[tc - tc_low];
511 assert(im);
513 // fprintf(stderr, "[aw tc=%d i=%d j=%d imax=%d jmax=%d]\n",
514 // tc, i, j, im->height(), im->width());
516 assert(i <= im->height() - 1);
517 assert(j <= im->width() - 1);
520 * Increase resolution, if necessary
523 increase_resolution(tc, i, j);
526 * Attempt to add the weight at levels of detail
527 * where weight is defined.
530 if (add_partial(tc, i, j, weight, st))
531 return;
534 * If no weights are defined at any level of detail,
535 * then set the weight here.
538 im->set_chan(i, j, 0, weight);
541 void add_weight(int tc, d2::point p, ale_real weight, subtree *st) {
543 assert (weight >= 0);
545 p *= pow(2, -tc);
547 unsigned int i = (unsigned int) floor(p[0]);
548 unsigned int j = (unsigned int) floor(p[1]);
550 add_weight(tc, i, j, weight, st);
553 void add_weight(const space::traverse &t, ale_real weight, subtree *st) {
555 assert (weight >= 0);
557 if (weight == 0)
558 return;
560 ale_pos tc = transformation.trilinear_coordinate(t);
561 point p = transformation.centroid(t);
562 assert(in_spatial_bounds(p));
564 tc = round(tc);
567 * Establish a reasonable (?) upper bound on resolution.
570 if (tc < input_decimation_lower) {
571 weight /= pow(4, (input_decimation_lower - tc));
572 tc = input_decimation_lower;
576 * Initialize, if necessary.
579 if (!initialized) {
580 tc_low = tc_high = (int) tc;
582 ale_pos sf = pow(2, -tc);
584 weights.push_back(make_image(sf));
586 initialized = 1;
590 * Check resolution bounds
593 assert (tc_low <= tc_high);
596 * Generate additional levels of detail, if necessary.
599 while (tc < tc_low) {
600 tc_low--;
602 ale_pos sf = pow(2, -tc_low);
604 weights.insert(weights.begin(), make_image(sf));
607 while (tc > tc_high) {
608 tc_high++;
610 ale_pos sf = pow(2, -tc_high);
612 weights.push_back(make_image(sf, -1));
615 add_weight((int) tc, p.xy(), weight, st);
619 * Get weight
621 ale_real get_weight(int tc, unsigned int i, unsigned int j) {
623 // fprintf(stderr, "[gw tc=%d i=%u j=%u tclow=%d tchigh=%d]\n",
624 // tc, i, j, tc_low, tc_high);
626 if (tc < tc_low || !initialized)
627 return 0;
629 if (tc > tc_high) {
630 return (get_weight(tc - 1, i * 2 + 0, j * 2 + 0)
631 + get_weight(tc - 1, i * 2 + 1, j * 2 + 0)
632 + get_weight(tc - 1, i * 2 + 1, j * 2 + 1)
633 + get_weight(tc - 1, i * 2 + 0, j * 2 + 1)) / 4;
636 assert(weights.size() > (unsigned int) (tc - tc_low));
638 d2::image *im = weights[tc - tc_low];
639 assert(im);
641 if (i == im->height())
642 return 1;
643 if (j == im->width())
644 return 1;
646 assert(i < im->height());
647 assert(j < im->width());
649 if (im->get_chan(i, j, 0) == -1) {
650 return (get_weight(tc - 1, i * 2 + 0, j * 2 + 0)
651 + get_weight(tc - 1, i * 2 + 1, j * 2 + 0)
652 + get_weight(tc - 1, i * 2 + 1, j * 2 + 1)
653 + get_weight(tc - 1, i * 2 + 0, j * 2 + 1)) / 4;
656 if (im->get_chan(i, j, 0) == 0) {
657 if (tc == tc_high)
658 return 0;
659 if (weights[tc - tc_low + 1]->get_chan(i / 2, j / 2, 0) == -1)
660 return 0;
661 return get_weight(tc + 1, i / 2, j / 2);
664 return im->get_chan(i, j, 0);
667 ale_real get_weight(int tc, d2::point p) {
669 p *= pow(2, -tc);
671 unsigned int i = (unsigned int) floor(p[0]);
672 unsigned int j = (unsigned int) floor(p[1]);
674 return get_weight(tc, i, j);
677 ale_real get_weight(const space::traverse &t) {
678 ale_pos tc = transformation.trilinear_coordinate(t);
679 point p = transformation.centroid(t);
680 assert(in_spatial_bounds(p));
682 if (!initialized)
683 return 0;
685 tc = round(tc);
687 if (tc < tc_low) {
688 tc = tc_low;
691 return get_weight((int) tc, p.xy());
695 * Check whether a subtree is simple.
697 int is_simple(subtree *s) {
698 assert (s);
700 if (s->node_value == -1
701 && s->children[0][0] == NULL
702 && s->children[0][1] == NULL
703 && s->children[1][0] == NULL
704 && s->children[1][1] == NULL)
705 return 1;
707 return 0;
711 * Get a weight subtree.
713 subtree *get_subtree(int tc, unsigned int i, unsigned int j) {
716 * tc > tc_high is handled recursively.
719 if (tc > tc_high) {
720 subtree *result = new subtree(-1,
721 get_subtree(tc - 1, i * 2 + 0, j * 2 + 0),
722 get_subtree(tc - 1, i * 2 + 0, j * 2 + 1),
723 get_subtree(tc - 1, i * 2 + 1, j * 2 + 0),
724 get_subtree(tc - 1, i * 2 + 1, j * 2 + 1));
726 if (is_simple(result)) {
727 delete result;
728 return NULL;
731 return result;
734 assert(tc >= tc_low);
735 assert(weights[tc - tc_low]);
737 d2::image *im = weights[tc - tc_low];
740 * Rectangular images will, in general, have
741 * out-of-bounds tree sections. Handle this case.
744 if (i >= im->height())
745 return NULL;
746 if (j >= im->width())
747 return NULL;
750 * -1 weights are handled recursively
753 if (im->get_chan(i, j, 0) == -1) {
754 subtree *result = new subtree(-1,
755 get_subtree(tc - 1, i * 2 + 0, j * 2 + 0),
756 get_subtree(tc - 1, i * 2 + 0, j * 2 + 1),
757 get_subtree(tc - 1, i * 2 + 1, j * 2 + 0),
758 get_subtree(tc - 1, i * 2 + 1, j * 2 + 1));
760 if (is_simple(result)) {
761 im->set_chan(i, j, 0, 0);
762 delete result;
763 return NULL;
766 return result;
770 * Zero weights have NULL subtrees.
773 if (im->get_chan(i, j, 0) == 0)
774 return NULL;
777 * Handle the remaining case.
780 return new subtree(im->get_chan(i, j, 0), NULL, NULL, NULL, NULL);
783 subtree *get_subtree(int tc, d2::point p) {
784 p *= pow(2, -tc);
786 unsigned int i = (unsigned int) floor(p[0]);
787 unsigned int j = (unsigned int) floor(p[1]);
789 return get_subtree(tc, i, j);
792 subtree *get_subtree(const space::traverse &t) {
793 ale_pos tc = transformation.trilinear_coordinate(t);
794 point p = transformation.centroid(t);
795 assert(in_spatial_bounds(p));
797 if (!initialized)
798 return NULL;
800 if (tc < input_decimation_lower)
801 tc = input_decimation_lower;
803 tc = round(tc);
805 if (tc < tc_low)
806 return NULL;
808 return get_subtree((int) tc, p.xy());
812 * Destructor
814 ~ref_weights() {
815 for (unsigned int i = 0; i < weights.size(); i++) {
816 delete weights[i];
822 * Resolution check.
824 static int resolution_ok(pt transformation, ale_pos tc) {
826 if (pow(2, tc) > transformation.unscaled_height()
827 || pow(2, tc) > transformation.unscaled_width())
828 return 0;
830 if (tc < input_decimation_lower - 1.5)
831 return 0;
833 return 1;
837 * Structure to hold input frame information at all levels of detail.
839 class lod_images {
842 * All images.
845 std::vector<lod_image *> images;
847 public:
849 lod_images() {
850 images.resize(d2::image_rw::count(), NULL);
853 unsigned int count() {
854 return d2::image_rw::count();
857 void open(unsigned int f) {
858 assert (images[f] == NULL);
860 if (images[f] == NULL)
861 images[f] = new lod_image(f);
864 void open_all() {
865 for (unsigned int f = 0; f < d2::image_rw::count(); f++)
866 open(f);
869 lod_image *get(unsigned int f) {
870 assert (images[f] != NULL);
871 return images[f];
874 void close(unsigned int f) {
875 assert (images[f] != NULL);
876 delete images[f];
877 images[f] = NULL;
880 void close_all() {
881 for (unsigned int f = 0; f < d2::image_rw::count(); f++)
882 close(f);
885 ~lod_images() {
886 close_all();
891 * All levels-of-detail
894 static struct lod_images *al;
897 * Data structure for storing best encountered subspace candidates.
899 class candidates {
900 std::vector<std::vector<std::pair<ale_pos, ale_real> > > levels;
901 int image_index;
902 unsigned int height;
903 unsigned int width;
906 * Point p is in world coordinates.
908 void generate_subspace(point iw, ale_pos diagonal) {
910 // fprintf(stderr, "[gs iw=%f %f %f d=%f]\n",
911 // iw[0], iw[1], iw[2], diagonal);
913 space::traverse st = space::traverse::root();
915 if (!st.includes(iw)) {
916 assert(0);
917 return;
920 int highres = 0;
921 int lowres = 0;
924 * Loop until resolutions of interest have been generated.
927 for(;;) {
929 ale_pos current_diagonal = (st.get_max() - st.get_min()).norm();
931 assert(!isnan(current_diagonal));
934 * Generate any new desired spatial registers.
938 * Inputs
941 for (int f = 0; f < 2; f++) {
944 * Low resolution
947 if (current_diagonal < 2 * diagonal
948 && lowres == 0) {
949 if (spatial_info_map.find(st.get_node()) == spatial_info_map.end()) {
950 spatial_info_map[st.get_node()];
951 ui::get()->d3_increment_spaces();
953 lowres = 1;
957 * High resolution.
960 if (current_diagonal < diagonal
961 && highres == 0) {
962 if (spatial_info_map.find(st.get_node()) == spatial_info_map.end()) {
963 spatial_info_map[st.get_node()];
964 ui::get()->d3_increment_spaces();
966 highres = 1;
971 * Check for completion
974 if (highres && lowres)
975 return;
978 * Check precision before analyzing space further.
981 if (st.precision_wall()) {
982 fprintf(stderr, "\n\n*** Error: reached subspace precision wall ***\n\n");
983 assert(0);
984 return;
987 if (st.positive().includes(iw)) {
988 st = st.positive();
989 total_tsteps++;
990 } else if (st.negative().includes(iw)) {
991 st = st.negative();
992 total_tsteps++;
993 } else {
994 fprintf(stderr, "failed iw = (%f, %f, %f)\n",
995 (double) iw[0], (double) iw[1], (double) iw[2]);
996 assert(0);
1001 public:
1002 candidates(int f) {
1004 image_index = f;
1005 height = (unsigned int) al->get(f)->get_t(0).unscaled_height();
1006 width = (unsigned int) al->get(f)->get_t(0).unscaled_width();
1009 * Is this necessary?
1012 levels.resize(primary_decimation_upper - input_decimation_lower + 1);
1013 for (int l = input_decimation_lower; l <= primary_decimation_upper; l++) {
1014 levels[l - input_decimation_lower].resize((unsigned int) (floor(height / pow(2, l))
1015 * floor(width / pow(2, l))
1016 * pairwise_ambiguity),
1017 std::pair<ale_pos, ale_real>(0, 0));
1022 * Point p is expected to be in local projective coordinates.
1025 void add_candidate(point p, int tc, ale_pos score) {
1026 assert(tc <= primary_decimation_upper);
1027 assert(tc >= input_decimation_lower);
1028 assert(p[2] < 0);
1029 assert(score >= 0);
1031 int i = (unsigned int) floor(p[0] / (ale_pos) pow(2, tc));
1032 int j = (unsigned int) floor(p[1] / (ale_pos) pow(2, tc));
1034 int swidth = (int) floor(width / pow(2, tc));
1036 assert(j < swidth);
1037 assert(i < (int) floor(height / pow(2, tc)));
1039 for (unsigned int k = 0; k < pairwise_ambiguity; k++) {
1040 std::pair<ale_pos, ale_real> *pk =
1041 &(levels[tc - input_decimation_lower][i * swidth * pairwise_ambiguity + j * pairwise_ambiguity + k]);
1043 if (pk->first != 0 && score >= (ale_pos) pk->second)
1044 continue;
1046 if (i == 1 && j == 1 && tc == 4)
1047 fprintf(stderr, "[ac p2=%f score=%f]\n", (double) p[2], (double) score);
1049 ale_pos tp = pk->first;
1050 ale_real tr = pk->second;
1052 pk->first = p[2];
1053 pk->second = score;
1055 p[2] = tp;
1056 score = tr;
1058 if (p[2] == 0)
1059 break;
1064 * Generate subspaces for candidates.
1067 void generate_subspaces() {
1069 fprintf(stderr, "+");
1070 for (int l = input_decimation_lower; l <= primary_decimation_upper; l++) {
1071 unsigned int sheight = (unsigned int) floor(height / pow(2, l));
1072 unsigned int swidth = (unsigned int) floor(width / pow(2, l));
1074 for (unsigned int i = 0; i < sheight; i++)
1075 for (unsigned int j = 0; j < swidth; j++)
1076 for (unsigned int k = 0; k < pairwise_ambiguity; k++) {
1077 std::pair<ale_pos, ale_real> *pk =
1078 &(levels[l - input_decimation_lower]
1079 [i * swidth * pairwise_ambiguity + j * pairwise_ambiguity + k]);
1081 if (pk->first == 0) {
1082 fprintf(stderr, "o");
1083 continue;
1084 } else {
1085 fprintf(stderr, "|");
1088 ale_pos si = i * pow(2, l) + ((l > 0) ? pow(2, l - 1) : 0);
1089 ale_pos sj = j * pow(2, l) + ((l > 0) ? pow(2, l - 1) : 0);
1091 // fprintf(stderr, "[gss l=%d i=%d j=%d d=%g]\n", l, i, j, pk->first);
1093 point p = al->get(image_index)->get_t(0).pw_unscaled(point(si, sj, pk->first));
1095 generate_subspace(p,
1096 al->get(image_index)->get_t(0).diagonal_distance_3d(pk->first, l));
1103 * List for calculating weighted median.
1105 class wml {
1106 ale_real *data;
1107 unsigned int size;
1108 unsigned int used;
1110 ale_real &_w(unsigned int i) {
1111 assert(i <= used);
1112 return data[i * 2];
1115 ale_real &_d(unsigned int i) {
1116 assert(i <= used);
1117 return data[i * 2 + 1];
1120 void increase_capacity() {
1122 if (size > 0)
1123 size *= 2;
1124 else
1125 size = 1;
1127 data = (ale_real *) realloc(data, sizeof(ale_real) * 2 * (size * 1));
1129 assert(data);
1130 assert (size > used);
1132 if (!data) {
1133 fprintf(stderr, "Unable to allocate %d bytes of memory\n",
1134 sizeof(ale_real) * 2 * (size * 1));
1135 exit(1);
1139 void insert_weight(unsigned int i, ale_real v, ale_real w) {
1140 assert(used < size);
1141 assert(used >= i);
1142 for (unsigned int j = used; j > i; j--) {
1143 _w(j) = _w(j - 1);
1144 _d(j) = _d(j - 1);
1147 _w(i) = w;
1148 _d(i) = v;
1150 used++;
1153 public:
1155 unsigned int get_size() {
1156 return size;
1159 unsigned int get_used() {
1160 return used;
1163 void print_info() {
1164 fprintf(stderr, "[st %p sz %u el", this, size);
1165 for (unsigned int i = 0; i < used; i++)
1166 fprintf(stderr, " (%f, %f)", (double) _d(i), (double) _w(i));
1167 fprintf(stderr, "]\n");
1170 void clear() {
1171 used = 0;
1174 void insert_weight(ale_real v, ale_real w) {
1175 for (unsigned int i = 0; i < used; i++) {
1176 if (_d(i) == v) {
1177 _w(i) += w;
1178 return;
1180 if (_d(i) > v) {
1181 if (used == size)
1182 increase_capacity();
1183 insert_weight(i, v, w);
1184 return;
1187 if (used == size)
1188 increase_capacity();
1189 insert_weight(used, v, w);
1193 * Finds the median at half-weight, or between half-weight
1194 * and zero-weight, depending on the attenuation value.
1197 ale_real find_median(double attenuation = 0) {
1199 assert(attenuation >= 0);
1200 assert(attenuation <= 1);
1202 ale_real zero1 = 0;
1203 ale_real zero2 = 0;
1204 ale_real undefined = zero1 / zero2;
1206 ale_accum weight_sum = 0;
1208 for (unsigned int i = 0; i < used; i++)
1209 weight_sum += _w(i);
1211 // if (weight_sum == 0)
1212 // return undefined;
1214 if (used == 0 || used == 1)
1215 return undefined;
1217 if (weight_sum == 0) {
1218 ale_accum data_sum = 0;
1219 for (unsigned int i = 0; i < used; i++)
1220 data_sum += _d(i);
1221 return data_sum / (ale_accum) used;
1225 ale_accum midpoint = weight_sum * (ale_accum) (0.5 - 0.5 * attenuation);
1227 ale_accum weight_sum_2 = 0;
1229 for (unsigned int i = 0; i < used && weight_sum_2 < midpoint; i++) {
1230 weight_sum_2 += _w(i);
1232 if (weight_sum_2 > midpoint) {
1233 return _d(i);
1234 } else if (weight_sum_2 == midpoint) {
1235 assert (i + 1 < used);
1236 return (_d(i) + _d(i + 1)) / 2;
1240 return undefined;
1243 wml(int initial_size = 0) {
1245 // if (initial_size == 0) {
1246 // initial_size = (int) (d2::image_rw::count() * 1.5);
1247 // }
1249 size = initial_size;
1250 used = 0;
1252 if (size > 0) {
1253 data = (ale_real *) malloc(size * sizeof(ale_real) * 2);
1254 assert(data);
1255 } else {
1256 data = NULL;
1261 * copy constructor. This is required to avoid undesired frees.
1264 wml(const wml &w) {
1265 size = w.size;
1266 used = w.used;
1267 data = (ale_real *) malloc(size * sizeof(ale_real) * 2);
1268 assert(data);
1270 memcpy(data, w.data, size * sizeof(ale_real) * 2);
1273 ~wml() {
1274 free(data);
1279 * Class for information regarding spatial regions of interest.
1281 * This class is configured for convenience in cases where sampling is
1282 * performed using an approximation of the fine:box:1,triangle:2 chain.
1283 * In this case, the *_1 variables would store the fine data and the
1284 * *_2 variables would store the coarse data. Other uses are also
1285 * possible.
1288 class spatial_info {
1290 * Map channel value --> weight.
1292 wml color_weights_1[3];
1293 wml color_weights_2[3];
1296 * Current color.
1298 d2::pixel color;
1301 * Map occupancy value --> weight.
1303 wml occupancy_weights_1;
1304 wml occupancy_weights_2;
1307 * Current occupancy value.
1309 ale_real occupancy;
1312 * pocc/socc density
1315 unsigned int pocc_density;
1316 unsigned int socc_density;
1319 * Insert a weight into a list.
1321 void insert_weight(wml *m, ale_real v, ale_real w) {
1322 m->insert_weight(v, w);
1326 * Find the median of a weighted list. Uses NaN for undefined.
1328 ale_real find_median(wml *m, double attenuation = 0) {
1329 return m->find_median(attenuation);
1332 public:
1334 * Constructor.
1336 spatial_info() {
1337 color = d2::pixel::zero();
1338 occupancy = 0;
1339 pocc_density = 0;
1340 socc_density = 0;
1344 * Accumulate color; primary data set.
1346 void accumulate_color_1(int f, d2::pixel color, d2::pixel weight) {
1347 for (int k = 0; k < 3; k++)
1348 insert_weight(&color_weights_1[k], color[k], weight[k]);
1352 * Accumulate color; secondary data set.
1354 void accumulate_color_2(d2::pixel color, d2::pixel weight) {
1355 for (int k = 0; k < 3; k++)
1356 insert_weight(&color_weights_2[k], color[k], weight[k]);
1360 * Accumulate occupancy; primary data set.
1362 void accumulate_occupancy_1(int f, ale_real occupancy, ale_real weight) {
1363 insert_weight(&occupancy_weights_1, occupancy, weight);
1367 * Accumulate occupancy; secondary data set.
1369 void accumulate_occupancy_2(ale_real occupancy, ale_real weight) {
1370 insert_weight(&occupancy_weights_2, occupancy, weight);
1372 if (occupancy == 0 || occupancy_weights_2.get_size() < 96)
1373 return;
1375 // fprintf(stderr, "%p updated socc with: %f %f\n", this, occupancy, weight);
1376 // occupancy_weights_2.print_info();
1380 * Update color (and clear accumulation structures).
1382 void update_color() {
1383 for (int d = 0; d < 3; d++) {
1384 ale_real c = find_median(&color_weights_1[d]);
1385 if (isnan(c))
1386 c = find_median(&color_weights_2[d]);
1387 if (isnan(c))
1388 c = 0;
1390 color[d] = c;
1392 color_weights_1[d].clear();
1393 color_weights_2[d].clear();
1398 * Update occupancy (and clear accumulation structures).
1400 void update_occupancy() {
1401 ale_real o = find_median(&occupancy_weights_1, occ_att);
1402 if (isnan(o))
1403 o = find_median(&occupancy_weights_2, occ_att);
1404 if (isnan(o))
1405 o = 0;
1407 occupancy = o;
1409 pocc_density = occupancy_weights_1.get_used();
1410 socc_density = occupancy_weights_2.get_used();
1412 occupancy_weights_1.clear();
1413 occupancy_weights_2.clear();
1418 * Get current color.
1420 d2::pixel get_color() {
1421 return color;
1425 * Get current occupancy.
1427 ale_real get_occupancy() {
1428 assert (finite(occupancy));
1429 return occupancy;
1433 * Get primary color density.
1436 unsigned int get_pocc_density() {
1437 return pocc_density;
1440 unsigned int get_socc_density() {
1441 return socc_density;
1446 * Map spatial regions of interest to spatial info structures. XXX:
1447 * This may get very poor cache behavior in comparison with, say, an
1448 * array. Unfortunately, there is no immediately obvious array
1449 * representation. If some kind of array representation were adopted,
1450 * it would probably cluster regions of similar depth from the
1451 * perspective of the typical camera. In particular, for a
1452 * stereoscopic view, depth ordering for two random points tends to be
1453 * similar between cameras, I think. Unfortunately, it is never
1454 * identical for all points (unless cameras are co-located). One
1455 * possible approach would be to order based on, say, camera 0's idea
1456 * of depth.
1459 #if !defined(HASH_MAP_GNU) && !defined(HASH_MAP_STD)
1460 typedef std::map<struct space::node *, spatial_info> spatial_info_map_t;
1461 #elif defined(HASH_MAP_GNU)
1462 struct node_hash
1464 size_t operator()(struct space::node *n) const
1466 return __gnu_cxx::hash<long>()((long) n);
1469 typedef __gnu_cxx::hash_map<struct space::node *, spatial_info, node_hash > spatial_info_map_t;
1470 #elif defined(HASH_MAP_STD)
1471 typedef std::hash_map<struct space::node *, spatial_info> spatial_info_map_t;
1472 #endif
1474 static spatial_info_map_t spatial_info_map;
1476 public:
1479 * Debugging variables.
1482 static unsigned long total_ambiguity;
1483 static unsigned long total_pixels;
1484 static unsigned long total_divisions;
1485 static unsigned long total_tsteps;
1488 * Member functions
1491 static void et(double et_parameter) {
1492 encounter_threshold = et_parameter;
1495 static void dmr(double dmr_parameter) {
1496 depth_median_radius = dmr_parameter;
1499 static void fmr(double fmr_parameter) {
1500 diff_median_radius = fmr_parameter;
1503 static void load_model(const char *name) {
1504 load_model_name = name;
1507 static void save_model(const char *name) {
1508 save_model_name = name;
1511 static void fc(ale_pos fc) {
1512 front_clip = fc;
1515 static void di_upper(ale_pos _dgi) {
1516 primary_decimation_upper = (int) round(_dgi);
1519 static void do_try(ale_pos _dgo) {
1520 output_decimation_preferred = (int) round(_dgo);
1523 static void di_lower(ale_pos _idiv) {
1524 input_decimation_lower = (int) round(_idiv);
1527 static void oc() {
1528 output_clip = 1;
1531 static void no_oc() {
1532 output_clip = 0;
1535 static void rc(ale_pos rc) {
1536 rear_clip = rc;
1540 * Initialize 3D scene from 2D scene, using 2D and 3D alignment
1541 * information.
1543 static void init_from_d2() {
1546 * Rear clip value of 0 is converted to infinity.
1549 if (rear_clip == 0) {
1550 ale_pos one = +1;
1551 ale_pos zero = +0;
1553 rear_clip = one / zero;
1554 assert(isinf(rear_clip) && rear_clip > 0);
1558 * Scale and translate clipping plane depths.
1561 ale_pos cp_scalar = d3::align::projective(0).wc(point(0, 0, 0))[2];
1563 front_clip = front_clip * cp_scalar - cp_scalar;
1564 rear_clip = rear_clip * cp_scalar - cp_scalar;
1567 * Allocate image structures.
1570 al = new lod_images;
1572 if (tc_multiplier != 0) {
1573 al->open_all();
1578 * Perform spatial_info updating on a given subspace, for given
1579 * parameters.
1581 static void subspace_info_update(space::iterate si, int f, ref_weights *weights) {
1583 while(!si.done()) {
1585 space::traverse st = si.get();
1588 * Reject out-of-bounds spaces.
1590 if (!weights->in_spatial_bounds(st)) {
1591 si.next();
1592 continue;
1596 * Skip spaces with no color information.
1598 * XXX: This could be more efficient, perhaps.
1601 if (spatial_info_map.count(st.get_node()) == 0) {
1602 si.next();
1603 continue;
1606 ui::get()->d3_increment_space_num();
1610 * Get in-bounds centroid, if one exists.
1613 point p = al->get(f)->get_t(0).centroid(st);
1615 if (!p.defined()) {
1616 si.next();
1617 continue;
1621 * Get information on the subspace.
1624 spatial_info *sn = &spatial_info_map[st.get_node()];
1625 d2::pixel color = sn->get_color();
1626 ale_real occupancy = sn->get_occupancy();
1629 * Store current weight so we can later check for
1630 * modification by higher-resolution subspaces.
1633 ref_weights::subtree *tree = weights->get_subtree(st);
1636 * Check for higher resolution subspaces, and
1637 * update the space iterator.
1640 if (st.get_node()->positive
1641 || st.get_node()->negative) {
1644 * Cleave space for the higher-resolution pass,
1645 * skipping the current space, since we will
1646 * process that later.
1649 space::iterate cleaved_space = si.cleave();
1651 cleaved_space.next();
1653 subspace_info_update(cleaved_space, f, weights);
1655 } else {
1656 si.next();
1660 * Add new data on the subspace and update weights.
1663 ale_pos tc = al->get(f)->get_t(0).trilinear_coordinate(st);
1664 d2::pixel pcolor = al->get(f)->get_tl(p.xy(), tc);
1665 d2::pixel colordiff = (color - pcolor) * (ale_real) 256;
1667 if (falloff_exponent != 0) {
1668 d2::pixel max_diff = al->get(f)->get_max_diff(p.xy(), tc) * (ale_real) 256;
1670 for (int k = 0; k < 3; k++)
1671 if (max_diff[k] > 1)
1672 colordiff[k] /= pow(max_diff[k], falloff_exponent);
1676 * Determine the probability of encounter.
1679 d2::pixel encounter = d2::pixel(1, 1, 1) * (1 - weights->get_weight(st));
1682 * Update weights
1685 weights->add_weight(st, occupancy, tree);
1688 * Delete the subtree, if necessary.
1691 delete tree;
1694 * Check for cases in which the subspace should not be
1695 * updated.
1698 if (!resolution_ok(al->get(f)->get_t(0), tc))
1699 continue;
1701 if (d2::render::is_excluded_f(p.xy(), f))
1702 continue;
1705 * Update subspace.
1708 sn->accumulate_color_1(f, pcolor, encounter);
1709 d2::pixel channel_occ = pexp(-colordiff * colordiff);
1711 ale_real occ = channel_occ[0];
1713 for (int k = 1; k < 3; k++)
1714 if (channel_occ[k] < occ)
1715 occ = channel_occ[k];
1717 sn->accumulate_occupancy_1(f, occ, encounter[0]);
1723 * Run a single iteration of the spatial_info update cycle.
1725 static void spatial_info_update() {
1727 * Iterate through each frame.
1729 for (unsigned int f = 0; f < d2::image_rw::count(); f++) {
1731 ui::get()->d3_occupancy_status(f);
1734 * Open the frame and transformation.
1737 if (tc_multiplier == 0)
1738 al->open(f);
1741 * Allocate weights data structure for storing encounter
1742 * probabilities.
1745 ref_weights *weights = new ref_weights(f);
1748 * Call subspace_info_update for the root space.
1751 subspace_info_update(space::iterate(al->get(f)->origin()), f, weights);
1754 * Free weights.
1757 delete weights;
1760 * Close the frame and transformation.
1763 if (tc_multiplier == 0)
1764 al->close(f);
1768 * Update all spatial_info structures.
1770 for (spatial_info_map_t::iterator i = spatial_info_map.begin(); i != spatial_info_map.end(); i++) {
1771 i->second.update_color();
1772 i->second.update_occupancy();
1774 // d2::pixel color = i->second.get_color();
1776 // fprintf(stderr, "space p=%p updated to c=[%f %f %f] o=%f\n",
1777 // i->first, color[0], color[1], color[2],
1778 // i->second.get_occupancy());
1783 * Support function for view() and depth(). This function
1784 * always performs exclusion.
1787 static const void view_recurse(int type, d2::image *im, d2::image *weights, space::iterate si, pt _pt,
1788 int prune = 0, d2::point pl = d2::point(0, 0), d2::point ph = d2::point(0, 0)) {
1789 while (!si.done()) {
1790 space::traverse st = si.get();
1793 * Remove excluded regions.
1796 if (excluded(st)) {
1797 si.cleave();
1798 continue;
1802 * Prune.
1805 if (prune && !_pt.check_inclusion_scaled(st, pl, ph)) {
1806 si.cleave();
1807 continue;
1811 * XXX: This could be more efficient, perhaps.
1814 if (spatial_info_map.count(st.get_node()) == 0) {
1815 si.next();
1816 continue;
1819 ui::get()->d3_increment_space_num();
1821 spatial_info sn = spatial_info_map[st.get_node()];
1824 * Get information on the subspace.
1827 d2::pixel color = sn.get_color();
1828 // d2::pixel color = d2::pixel(1, 1, 1) * (double) (((unsigned int) (st.get_node()) / sizeof(space)) % 65535);
1829 ale_real occupancy = sn.get_occupancy();
1832 * Determine the view-local bounding box for the
1833 * subspace.
1836 point bb[2];
1838 _pt.get_view_local_bb_scaled(st, bb);
1840 point min = bb[0];
1841 point max = bb[1];
1843 if (prune) {
1844 if (min[0] > ph[0]
1845 || min[1] > ph[1]
1846 || max[0] < pl[0]
1847 || max[1] < pl[1]) {
1848 si.next();
1849 continue;
1852 if (min[0] < pl[0])
1853 min[0] = pl[0];
1854 if (min[1] < pl[1])
1855 min[1] = pl[1];
1856 if (max[0] > ph[0])
1857 max[0] = ph[0];
1858 if (max[1] > ph[1])
1859 max[1] = ph[1];
1861 min[0] -= pl[0];
1862 min[1] -= pl[1];
1863 max[0] -= pl[0];
1864 max[1] -= pl[1];
1868 * Data structure to check modification of weights by
1869 * higher-resolution subspaces.
1872 std::queue<d2::pixel> weight_queue;
1875 * Check for higher resolution subspaces, and
1876 * update the space iterator.
1879 if (st.get_node()->positive
1880 || st.get_node()->negative) {
1883 * Store information about current weights,
1884 * so we will know which areas have been
1885 * covered by higher-resolution subspaces.
1888 for (int i = (int) ceil(min[0]); i <= (int) floor(max[0]); i++)
1889 for (int j = (int) ceil(min[1]); j <= (int) floor(max[1]); j++)
1890 weight_queue.push(weights->get_pixel(i, j));
1893 * Cleave space for the higher-resolution pass,
1894 * skipping the current space, since we will
1895 * process that afterward.
1898 space::iterate cleaved_space = si.cleave();
1900 cleaved_space.next();
1902 view_recurse(type, im, weights, cleaved_space, _pt, prune, pl, ph);
1904 } else {
1905 si.next();
1910 * Iterate over pixels in the bounding box, finding
1911 * pixels that intersect the subspace. XXX: assume
1912 * for now that all pixels in the bounding box
1913 * intersect the subspace.
1916 for (int i = (int) ceil(min[0]); i <= (int) floor(max[0]); i++)
1917 for (int j = (int) ceil(min[1]); j <= (int) floor(max[1]); j++) {
1920 * Check for higher-resolution updates.
1923 if (weight_queue.size()) {
1924 if (weight_queue.front() != weights->get_pixel(i, j)) {
1925 weight_queue.pop();
1926 continue;
1928 weight_queue.pop();
1932 * Determine the probability of encounter.
1935 d2::pixel encounter = (d2::pixel(1, 1, 1)
1936 - weights->get_pixel(i, j))
1937 * occupancy;
1940 * Update images.
1943 if (type == 0) {
1946 * Color view
1949 weights->set_pixel(i, j, (d2::pixel) weights->get_pixel(i, j)
1950 + encounter);
1951 im->set_pixel(i, j, (d2::pixel) im->get_pixel(i, j)
1952 + encounter * color);
1954 } else if (type == 1) {
1957 * Weighted (transparent) depth display
1960 ale_pos depth_value = _pt.wp_scaled(st.get_min())[2];
1961 weights->set_pixel(i, j, (d2::pixel) weights->get_pixel(i, j)
1962 + encounter);
1963 im->set_pixel(i, j, (d2::pixel) im->get_pixel(i, j)
1964 + encounter * (ale_real) depth_value);
1966 } else if (type == 2) {
1969 * Ambiguity (ambivalence) measure.
1972 weights->set_pixel(i, j, d2::pixel(1, 1, 1));
1973 im->set_pixel(i, j, (d2::pixel) im->get_pixel(i, j)
1974 + 0.1 * d2::pixel(1, 1, 1));
1976 } else if (type == 3) {
1979 * Closeness measure.
1982 ale_pos depth_value = _pt.wp_scaled(st.get_min())[2];
1983 if (weights->get_chan(i, j, 0) == 0) {
1984 weights->set_pixel(i, j, d2::pixel(1, 1, 1));
1985 im->set_pixel(i, j, d2::pixel(1, 1, 1)
1986 * (ale_real) depth_value);
1987 } else if (im->get_chan(i, j, 2) < (ale_sreal) depth_value) {
1988 im->set_pixel(i, j, d2::pixel(1, 1, 1)
1989 * (ale_real) depth_value);
1990 } else {
1991 continue;
1994 } else if (type == 4) {
1997 * Weighted (transparent) contribution display
2000 ale_pos contribution_value = sn.get_pocc_density() /* + sn.get_socc_density() */;
2001 weights->set_pixel(i, j, (d2::pixel) weights->get_pixel(i, j)
2002 + encounter);
2003 im->set_pixel(i, j, (d2::pixel) im->get_pixel(i, j)
2004 + encounter * (ale_real) contribution_value);
2006 assert (finite(encounter[0]));
2007 assert (finite(contribution_value));
2009 } else if (type == 5) {
2012 * Weighted (transparent) occupancy display
2015 ale_real contribution_value = occupancy;
2016 weights->set_pixel(i, j, (d2::pixel) weights->get_pixel(i, j)
2017 + encounter);
2018 im->set_pixel(i, j, (d2::pixel) im->get_pixel(i, j)
2019 + encounter * contribution_value);
2021 } else if (type == 6) {
2024 * (Depth, xres, yres) triple
2027 ale_pos depth_value = _pt.wp_scaled(st.get_min())[2];
2028 weights->set_chan(i, j, 0, weights->get_chan(i, j, 0)
2029 + encounter[0]);
2030 if (weights->get_pixel(i, j)[1] < encounter[0]) {
2031 weights->set_chan(i, j, 1, encounter[0]);
2032 im->set_pixel(i, j, d2::pixel(
2033 weights->get_pixel(i, j)[1] * (ale_real) depth_value,
2034 ale_pos_to_real(max[0] - min[0]),
2035 ale_pos_to_real(max[1] - min[1])));
2038 } else if (type == 7) {
2041 * (xoff, yoff, 0) triple
2044 weights->set_chan(i, j, 0,
2045 weights->get_chan(i, j, 0) + encounter[0]);
2046 if (weights->get_chan(i, j, 1) < (ale_sreal) encounter[0]) {
2047 weights->set_chan(i, j, 1, encounter[0]);
2048 im->set_pixel(i, j, d2::pixel(
2049 ale_pos_to_real(i - min[0]),
2050 ale_pos_to_real(j - min[1]),
2051 0));
2054 } else if (type == 8) {
2057 * Value = 1 for any intersected space.
2060 weights->set_pixel(i, j, d2::pixel(1, 1, 1));
2061 im->set_pixel(i, j, d2::pixel(1, 1, 1));
2063 } else if (type == 9) {
2066 * Number of contributions for the nearest space.
2069 if (weights->get_chan(i, j, 0) == 1)
2070 continue;
2072 weights->set_pixel(i, j, d2::pixel(1, 1, 1));
2073 im->set_pixel(i, j, d2::pixel(1, 1, 1) * (sn.get_pocc_density() * 0.1));
2075 } else
2076 assert(0);
2082 * Generate an depth image from a specified view.
2084 static const d2::image *depth(pt _pt, int n = -1, int prune = 0,
2085 d2::point pl = d2::point(0, 0), d2::point ph = d2::point(0, 0)) {
2086 assert ((unsigned int) n < d2::image_rw::count() || n < 0);
2088 _pt.view_angle(_pt.view_angle() * VIEW_ANGLE_MULTIPLIER);
2090 if (n >= 0) {
2091 assert((int) floor(d2::align::of(n).scaled_height())
2092 == (int) floor(_pt.scaled_height()));
2093 assert((int) floor(d2::align::of(n).scaled_width())
2094 == (int) floor(_pt.scaled_width()));
2097 d2::image *im1, *im2, *im3, *weights;;
2099 if (prune) {
2101 im1 = d2::new_image_ale_real((int) floor(ph[0] - pl[0]) + 1,
2102 (int) floor(ph[1] - pl[1]) + 1, 3);
2104 im2 = d2::new_image_ale_real((int) floor(ph[0] - pl[0]) + 1,
2105 (int) floor(ph[1] - pl[1]) + 1, 3);
2107 im3 = d2::new_image_ale_real((int) floor(ph[0] - pl[0]) + 1,
2108 (int) floor(ph[1] - pl[1]) + 1, 3);
2110 weights = d2::new_image_ale_real((int) floor(ph[0] - pl[0]) + 1,
2111 (int) floor(ph[1] - pl[1]) + 1, 3);
2113 } else {
2115 im1 = d2::new_image_ale_real((int) floor(_pt.scaled_height()),
2116 (int) floor(_pt.scaled_width()), 3);
2118 im2 = d2::new_image_ale_real((int) floor(_pt.scaled_height()),
2119 (int) floor(_pt.scaled_width()), 3);
2121 im3 = d2::new_image_ale_real((int) floor(_pt.scaled_height()),
2122 (int) floor(_pt.scaled_width()), 3);
2124 weights = d2::new_image_ale_real((int) floor(_pt.scaled_height()),
2125 (int) floor(_pt.scaled_width()), 3);
2129 * Iterate through subspaces.
2132 space::iterate si(_pt.origin());
2134 view_recurse(6, im1, weights, si, _pt, prune, pl, ph);
2136 delete weights;
2138 if (prune) {
2139 weights = d2::new_image_ale_real((int) floor(ph[0] - pl[0]) + 1,
2140 (int) floor(ph[1] - pl[1]) + 1, 3);
2141 } else {
2142 weights = d2::new_image_ale_real((int) floor(_pt.scaled_height()),
2143 (int) floor(_pt.scaled_width()), 3);
2146 #if 1
2147 view_recurse(7, im2, weights, si, _pt, prune, pl, ph);
2148 #else
2149 view_recurse(8, im2, weights, si, _pt, prune, pl, ph);
2150 return im2;
2151 #endif
2154 * Normalize depths by weights
2157 if (normalize_weights)
2158 for (unsigned int i = 0; i < im1->height(); i++)
2159 for (unsigned int j = 0; j < im1->width(); j++)
2160 im1->set_chan(i, j, 0, im1->get_chan(i, j, 0) / weights->get_chan(i, j, 1));
2163 for (unsigned int i = 0; i < im1->height(); i++)
2164 for (unsigned int j = 0; j < im1->width(); j++) {
2167 * Handle interpolation.
2170 d2::point x;
2171 d2::point blx;
2172 d2::point res((double) im1->get_chan(i, j, 1),
2173 (double) im1->get_chan(i, j, 2));
2175 for (int d = 0; d < 2; d++) {
2177 if (im2->get_chan(i, j, d) < (ale_sreal) res[d] / 2)
2178 x[d] = (ale_pos) (d?j:i) - res[d] / 2 - (ale_pos) im2->get_chan(i, j, d);
2179 else
2180 x[d] = (ale_pos) (d?j:i) + res[d] / 2 - (ale_pos) im2->get_chan(i, j, d);
2182 blx[d] = 1 - ((d?j:i) - x[d]) / res[d];
2185 ale_real depth_val = 0;
2186 ale_real depth_weight = 0;
2188 for (int ii = 0; ii < 2; ii++)
2189 for (int jj = 0; jj < 2; jj++) {
2190 d2::point p = x + d2::point(ii, jj) * res;
2191 if (im1->in_bounds(p)) {
2193 ale_real d = im1->get_bl(p)[0];
2195 if (isnan(d))
2196 continue;
2198 ale_real w = ale_pos_to_real((ii ? (1 - blx[0]) : blx[0]) * (jj ? (1 - blx[1]) : blx[1]));
2199 depth_weight += w;
2200 depth_val += w * d;
2204 ale_real depth = depth_val / depth_weight;
2207 * Handle encounter thresholds
2210 if (weights->get_chan(i, j, 0) < encounter_threshold) {
2211 im3->set_pixel(i, j, d2::pixel::zero() / d2::pixel::zero());
2212 } else {
2213 im3->set_pixel(i, j, d2::pixel(1, 1, 1) * depth);
2217 delete weights;
2218 delete im1;
2219 delete im2;
2221 return im3;
2224 static const d2::image *depth(unsigned int n) {
2226 assert (n < d2::image_rw::count());
2228 pt _pt = align::projective(n);
2230 return depth(_pt, n);
2235 * This function always performs exclusion.
2238 static space::node *most_visible_pointwise(d2::pixel *weight, space::iterate si, pt _pt, d2::point p) {
2240 space::node *result = NULL;
2242 while (!si.done()) {
2243 space::traverse st = si.get();
2246 * Prune certain regions known to be uninteresting.
2249 if (excluded(st) || !_pt.check_inclusion_scaled(st, p)) {
2250 si.cleave();
2251 continue;
2255 * XXX: This could be more efficient, perhaps.
2258 if (spatial_info_map.count(st.get_node()) == 0) {
2259 si.next();
2260 continue;
2263 spatial_info sn = spatial_info_map[st.get_node()];
2266 * Get information on the subspace.
2269 ale_real occupancy = sn.get_occupancy();
2272 * Preserve current weight in order to check for
2273 * modification by higher-resolution subspaces.
2276 d2::pixel old_weight = *weight;
2279 * Check for higher resolution subspaces, and
2280 * update the space iterator.
2283 if (st.get_node()->positive
2284 || st.get_node()->negative) {
2287 * Cleave space for the higher-resolution pass,
2288 * skipping the current space, since we will
2289 * process that afterward.
2292 space::iterate cleaved_space = si.cleave();
2294 cleaved_space.next();
2296 space::node *r = most_visible_pointwise(weight, cleaved_space, _pt, p);
2298 if (old_weight[1] != (*weight)[1])
2299 result = r;
2301 } else {
2302 si.next();
2307 * Check for higher-resolution updates.
2310 if (old_weight != *weight)
2311 continue;
2314 * Determine the probability of encounter.
2317 ale_real encounter = (1 - (*weight)[0]) * occupancy;
2320 * (*weight)[0] stores the cumulative weight; (*weight)[1] stores the maximum.
2323 if (encounter > (*weight)[1]) {
2324 result = st.get_node();
2325 (*weight)[1] = encounter;
2328 (*weight)[0] += encounter;
2331 return result;
2335 * This function performs exclusion iff SCALED is true.
2337 static void most_visible_generic(std::vector<space::node *> &results, d2::image *weights,
2338 space::iterate si, pt _pt, int scaled) {
2340 assert (results.size() == weights->height() * weights->width());
2342 while (!si.done()) {
2343 space::traverse st = si.get();
2345 if (scaled && excluded(st)) {
2346 si.cleave();
2347 continue;
2351 * XXX: This could be more efficient, perhaps.
2354 if (spatial_info_map.count(st.get_node()) == 0) {
2355 si.next();
2356 continue;
2359 spatial_info sn = spatial_info_map[st.get_node()];
2362 * Get information on the subspace.
2365 ale_real occupancy = sn.get_occupancy();
2368 * Determine the view-local bounding box for the
2369 * subspace.
2372 point bb[2];
2374 _pt.get_view_local_bb_scaled(st, bb);
2376 point min = bb[0];
2377 point max = bb[1];
2380 * Data structure to check modification of weights by
2381 * higher-resolution subspaces.
2384 std::queue<d2::pixel> weight_queue;
2387 * Check for higher resolution subspaces, and
2388 * update the space iterator.
2391 if (st.get_node()->positive
2392 || st.get_node()->negative) {
2395 * Store information about current weights,
2396 * so we will know which areas have been
2397 * covered by higher-resolution subspaces.
2400 for (int i = (int) ceil(min[0]); i <= (int) floor(max[0]); i++)
2401 for (int j = (int) ceil(min[1]); j <= (int) floor(max[1]); j++)
2402 weight_queue.push(weights->get_pixel(i, j));
2405 * Cleave space for the higher-resolution pass,
2406 * skipping the current space, since we will
2407 * process that afterward.
2410 space::iterate cleaved_space = si.cleave();
2412 cleaved_space.next();
2414 most_visible_generic(results, weights, cleaved_space, _pt, scaled);
2416 } else {
2417 si.next();
2422 * Iterate over pixels in the bounding box, finding
2423 * pixels that intersect the subspace. XXX: assume
2424 * for now that all pixels in the bounding box
2425 * intersect the subspace.
2428 for (int i = (int) ceil(min[0]); i <= (int) floor(max[0]); i++)
2429 for (int j = (int) ceil(min[1]); j <= (int) floor(max[1]); j++) {
2432 * Check for higher-resolution updates.
2435 if (weight_queue.size()) {
2436 if (weight_queue.front() != weights->get_pixel(i, j)) {
2437 weight_queue.pop();
2438 continue;
2440 weight_queue.pop();
2444 * Determine the probability of encounter.
2447 ale_real encounter = (1 - weights->get_pixel(i, j)[0]) * occupancy;
2450 * weights[0] stores the cumulative weight; weights[1] stores the maximum.
2453 if (encounter > weights->get_pixel(i, j)[1]
2454 || results[i * weights->width() + j] == NULL) {
2455 results[i * weights->width() + j] = st.get_node();
2456 weights->set_chan(i, j, 1, encounter);
2459 weights->set_chan(i, j, 0, weights->get_chan(i, j, 0) + encounter);
2464 static std::vector<space::node *> most_visible_scaled(pt _pt) {
2465 d2::image *weights = d2::new_image_ale_real((int) floor(_pt.scaled_height()),
2466 (int) floor(_pt.scaled_width()), 3);
2467 std::vector<space::node *> results;
2469 results.resize(weights->height() * weights->width(), 0);
2471 most_visible_generic(results, weights, space::iterate(_pt.origin()), _pt, 1);
2473 return results;
2476 static std::vector<space::node *> most_visible_unscaled(pt _pt) {
2477 d2::image *weights = d2::new_image_ale_real((int) floor(_pt.unscaled_height()),
2478 (int) floor(_pt.unscaled_width()), 3);
2479 std::vector<space::node *> results;
2481 results.resize(weights->height() * weights->width(), 0);
2483 most_visible_generic(results, weights, space::iterate(_pt.origin()), _pt, 0);
2485 return results;
2488 static const int visibility_search(const std::vector<space::node *> &fmv, space::node *mv) {
2490 if (mv == NULL)
2491 return 0;
2493 if (std::binary_search(fmv.begin(), fmv.end(), mv))
2494 return 1;
2496 return (visibility_search(fmv, mv->positive)
2497 || visibility_search(fmv, mv->negative));
2502 * Class to generate focal sample views.
2505 class view_generator {
2508 * Original projective transformation.
2511 pt original_pt;
2514 * Data type for shared view data.
2517 class shared_view {
2518 pt _pt;
2519 std::vector<space::node *> mv;
2520 d2::image *color;
2521 d2::image *color_weights;
2522 const d2::image *_depth;
2523 d2::image *median_depth;
2524 d2::image *median_diff;
2526 public:
2527 shared_view(pt _pt) {
2528 this->_pt = _pt;
2529 color = NULL;
2530 color_weights = NULL;
2531 _depth = NULL;
2532 median_depth = NULL;
2533 median_diff = NULL;
2536 shared_view(const shared_view &copy_origin) {
2537 _pt = copy_origin._pt;
2538 mv = copy_origin.mv;
2539 color = NULL;
2540 color_weights = NULL;
2541 _depth = NULL;
2542 median_depth = NULL;
2543 median_diff = NULL;
2546 ~shared_view() {
2547 delete color;
2548 delete _depth;
2549 delete color_weights;
2550 delete median_diff;
2551 delete median_depth;
2554 void get_view_recurse(d2::image *data, d2::image *weights, int type) {
2556 * Iterate through subspaces.
2559 space::iterate si(_pt.origin());
2561 ui::get()->d3_render_status(0, 0, -1, -1, -1, -1, 0);
2563 view_recurse(type, data, weights, si, _pt);
2566 void init_color() {
2567 color = d2::new_image_ale_real((int) floor(_pt.scaled_height()),
2568 (int) floor(_pt.scaled_width()), 3);
2570 color_weights = d2::new_image_ale_real((int) floor(_pt.scaled_height()),
2571 (int) floor(_pt.scaled_width()), 3);
2573 get_view_recurse(color, color_weights, 0);
2576 void init_depth() {
2577 _depth = depth(_pt, -1);
2580 void init_medians() {
2581 if (!_depth)
2582 init_depth();
2584 assert(_depth);
2586 median_diff = _depth->fcdiff_median((int) floor(diff_median_radius));
2587 median_depth = _depth->medians((int) floor(depth_median_radius));
2589 assert(median_diff);
2590 assert(median_depth);
2593 public:
2594 pt get_pt() {
2595 return _pt;
2598 space::node *get_most_visible(unsigned int i, unsigned int j) {
2599 unsigned int height = (int) floor(_pt.scaled_height());
2600 unsigned int width = (int) floor(_pt.scaled_width());
2602 if (i >= height
2603 || j >= width) {
2604 return NULL;
2607 if (mv.size() == 0) {
2608 mv = most_visible_scaled(_pt);
2611 assert (mv.size() > i * width + j);
2613 return mv[i * width + j];
2616 space::node *get_most_visible(d2::point p) {
2617 unsigned int i = (unsigned int) round (p[0]);
2618 unsigned int j = (unsigned int) round (p[1]);
2620 return get_most_visible(i, j);
2623 d2::pixel get_color(unsigned int i, unsigned int j) {
2624 if (color == NULL) {
2625 init_color();
2628 assert (color != NULL);
2630 return color->get_pixel(i, j);
2633 d2::pixel get_depth(unsigned int i, unsigned int j) {
2634 if (_depth == NULL) {
2635 init_depth();
2638 assert (_depth != NULL);
2640 return _depth->get_pixel(i, j);
2643 void get_median_depth_and_diff(d2::pixel *t, d2::pixel *f, unsigned int i, unsigned int j) {
2644 if (median_depth == NULL && median_diff == NULL)
2645 init_medians();
2647 assert (median_depth && median_diff);
2649 if (i >= median_depth->height()
2650 || j >= median_depth->width()) {
2651 *t = d2::pixel::undefined();
2652 *f = d2::pixel::undefined();
2653 } else {
2654 *t = median_depth->get_pixel(i, j);
2655 *f = median_diff->get_pixel(i, j);
2659 void get_color_and_weight(d2::pixel *c, d2::pixel *w, d2::point p) {
2660 if (color == NULL) {
2661 init_color();
2664 assert (color != NULL);
2666 if (!color->in_bounds(p)) {
2667 *c = d2::pixel::undefined();
2668 *w = d2::pixel::undefined();
2669 } else {
2670 *c = color->get_bl(p);
2671 *w = color_weights->get_bl(p);
2675 d2::pixel get_depth(d2::point p) {
2676 if (_depth == NULL) {
2677 init_depth();
2680 assert (_depth != NULL);
2682 if (!_depth->in_bounds(p)) {
2683 return d2::pixel::undefined();
2686 return _depth->get_bl(p);
2689 void get_median_depth_and_diff(d2::pixel *t, d2::pixel *f, d2::point p) {
2690 if (median_diff == NULL && median_depth == NULL)
2691 init_medians();
2693 assert (median_diff != NULL && median_depth != NULL);
2695 if (!median_diff->in_bounds(p)) {
2696 *t = d2::pixel::undefined();
2697 *f = d2::pixel::undefined();
2698 } else {
2699 *t = median_depth->get_bl(p);
2700 *f = median_diff->get_bl(p);
2707 * Shared view array, indexed by aperture diameter and view number.
2710 std::map<ale_pos, std::vector<shared_view> > aperture_to_shared_views_map;
2713 * Method to generate a new stochastic focal view.
2716 pt get_new_view(ale_pos aperture) {
2718 ale_pos ofx = aperture;
2719 ale_pos ofy = aperture;
2721 while (ofx * ofx + ofy * ofy > aperture * aperture / 4) {
2722 ofx = (rand() * aperture) / RAND_MAX - aperture / 2;
2723 ofy = (rand() * aperture) / RAND_MAX - aperture / 2;
2727 * Generate a new view from the given offset.
2730 point new_view = original_pt.cw(point(ofx, ofy, 0));
2731 pt _pt_new = original_pt;
2732 for (int d = 0; d < 3; d++)
2733 _pt_new.e().set_translation(d, -new_view[d]);
2735 return _pt_new;
2738 public:
2741 * Result type.
2744 class view {
2745 shared_view *sv;
2746 pt _pt;
2748 public:
2750 view(shared_view *sv, pt _pt = pt()) {
2751 this->sv = sv;
2752 if (sv) {
2753 this->_pt = sv->get_pt();
2754 } else {
2755 this->_pt = _pt;
2759 pt get_pt() {
2760 return _pt;
2763 space::node *get_most_visible(unsigned int i, unsigned int j) {
2764 assert (sv);
2765 return sv->get_most_visible(i, j);
2768 space::node *get_most_visible(d2::point p) {
2769 if (sv) {
2770 return sv->get_most_visible(p);
2773 d2::pixel weight(0, 0, 0);
2775 return most_visible_pointwise(&weight, space::iterate(_pt.origin()), _pt, p);
2779 d2::pixel get_color(unsigned int i, unsigned int j) {
2780 return sv->get_color(i, j);
2783 void get_color_and_weight(d2::pixel *color, d2::pixel *weight, d2::point p) {
2784 if (sv) {
2785 sv->get_color_and_weight(color, weight, p);
2786 return;
2790 * Determine weight and color for the given point.
2793 d2::image *im_point = d2::new_image_ale_real(1, 1, 3);
2794 d2::image *wt_point = d2::new_image_ale_real(1, 1, 3);
2796 view_recurse(0, im_point, wt_point, space::iterate(_pt.origin()), _pt, 1, p, p);
2798 *color = im_point->get_pixel(0, 0);
2799 *weight = wt_point->get_pixel(0, 0);
2801 delete im_point;
2802 delete wt_point;
2804 return;
2807 d2::pixel get_depth(unsigned int i, unsigned int j) {
2808 assert(sv);
2809 return sv->get_depth(i, j);
2812 void get_median_depth_and_diff(d2::pixel *depth, d2::pixel *diff, unsigned int i, unsigned int j) {
2813 assert(sv);
2814 sv->get_median_depth_and_diff(depth, diff, i, j);
2817 void get_median_depth_and_diff(d2::pixel *_depth, d2::pixel *_diff, d2::point p) {
2818 if (sv) {
2819 sv->get_median_depth_and_diff(_depth, _diff, p);
2820 return;
2824 * Generate a local depth image of required radius.
2827 ale_pos radius = 1;
2829 if (diff_median_radius + 1 > radius)
2830 radius = diff_median_radius + 1;
2831 if (depth_median_radius > radius)
2832 radius = depth_median_radius;
2834 d2::point pl = p - d2::point(radius, radius);
2835 d2::point ph = p + d2::point(radius, radius);
2836 const d2::image *local_depth = depth(_pt, -1, 1, pl, ph);
2839 * Find depth and diff at this point, check for
2840 * undefined values, and generate projections
2841 * of the image corners on the estimated normal
2842 * surface.
2845 d2::image *median_diffs = local_depth->fcdiff_median((int) floor(diff_median_radius));
2846 d2::image *median_depths = local_depth->medians((int) floor(depth_median_radius));
2848 *_depth = median_depths->get_pixel((int) radius, (int) radius);
2849 *_diff = median_diffs->get_pixel((int) radius, (int) radius);
2851 delete median_diffs;
2852 delete median_depths;
2853 delete local_depth;
2857 view get_view(ale_pos aperture, unsigned index, unsigned int randomization) {
2858 if (randomization == 0) {
2860 while (aperture_to_shared_views_map[aperture].size() <= index) {
2861 aperture_to_shared_views_map[aperture].push_back(shared_view(get_new_view(aperture)));
2864 return view(&(aperture_to_shared_views_map[aperture][index]));
2867 return view(NULL, get_new_view(aperture));
2870 view_generator(pt original_pt) {
2871 this->original_pt = original_pt;
2876 * Unfiltered function
2878 static const d2::image *view_nofilter_focus(pt _pt, int n) {
2880 assert ((unsigned int) n < d2::image_rw::count() || n < 0);
2882 if (n >= 0) {
2883 assert((int) floor(d2::align::of(n).scaled_height())
2884 == (int) floor(_pt.scaled_height()));
2885 assert((int) floor(d2::align::of(n).scaled_width())
2886 == (int) floor(_pt.scaled_width()));
2889 const d2::image *depths = depth(_pt, n);
2891 d2::image *im = d2::new_image_ale_real((int) floor(_pt.scaled_height()),
2892 (int) floor(_pt.scaled_width()), 3);
2894 _pt.view_angle(_pt.view_angle() * VIEW_ANGLE_MULTIPLIER);
2896 view_generator vg(_pt);
2898 for (unsigned int i = 0; i < im->height(); i++)
2899 for (unsigned int j = 0; j < im->width(); j++) {
2901 focus::result _focus = focus::get(depths, i, j);
2903 if (!finite(_focus.focal_distance))
2904 continue;
2907 * Data structures for calculating focal statistics.
2910 d2::pixel color, weight;
2911 d2::image_weighted_median *iwm = NULL;
2913 if (_focus.statistic == 1) {
2914 iwm = new d2::image_weighted_median(1, 1, 3, _focus.sample_count);
2918 * Iterate over views for this focus region.
2921 for (unsigned int v = 0; v < _focus.sample_count; v++) {
2923 view_generator::view vw = vg.get_view(_focus.aperture, v, _focus.randomization);
2925 ui::get()->d3_render_status(0, 1, -1, v, i, j, -1);
2929 * Map the focused point to the new view.
2932 point p = vw.get_pt().wp_scaled(_pt.pw_scaled(point(i, j, _focus.focal_distance)));
2935 * Determine weight and color for the given point.
2938 d2::pixel view_weight, view_color;
2940 vw.get_color_and_weight(&view_color, &view_weight, p.xy());
2942 if (!color.finite() || !weight.finite())
2943 continue;
2945 if (_focus.statistic == 0) {
2946 color += view_color;
2947 weight += view_weight;
2948 } else if (_focus.statistic == 1) {
2949 iwm->accumulate(0, 0, v, view_color, view_weight);
2950 } else
2951 assert(0);
2954 if (_focus.statistic == 1) {
2955 weight = iwm->get_weights()->get_pixel(0, 0);
2956 color = iwm->get_pixel(0, 0);
2957 delete iwm;
2960 if (weight.min_norm() < encounter_threshold) {
2961 im->set_pixel(i, j, d2::pixel::zero() / d2::pixel::zero());
2962 } else if (normalize_weights)
2963 im->set_pixel(i, j, color / weight);
2964 else
2965 im->set_pixel(i, j, color);
2968 delete depths;
2970 return im;
2974 * Unfiltered function
2976 static const d2::image *view_nofilter(pt _pt, int n) {
2978 if (!focus::is_trivial())
2979 return view_nofilter_focus(_pt, n);
2981 assert ((unsigned int) n < d2::image_rw::count() || n < 0);
2983 if (n >= 0) {
2984 assert((int) floor(d2::align::of(n).scaled_height())
2985 == (int) floor(_pt.scaled_height()));
2986 assert((int) floor(d2::align::of(n).scaled_width())
2987 == (int) floor(_pt.scaled_width()));
2990 const d2::image *depths = depth(_pt, n);
2992 d2::image *im = d2::new_image_ale_real((int) floor(_pt.scaled_height()),
2993 (int) floor(_pt.scaled_width()), 3);
2995 _pt.view_angle(_pt.view_angle() * VIEW_ANGLE_MULTIPLIER);
2998 * Use adaptive subspace data.
3001 d2::image *weights = d2::new_image_ale_real((int) floor(_pt.scaled_height()),
3002 (int) floor(_pt.scaled_width()), 3);
3005 * Iterate through subspaces.
3008 space::iterate si(_pt.origin());
3010 ui::get()->d3_render_status(0, 0, -1, -1, -1, -1, 0);
3012 view_recurse(0, im, weights, si, _pt);
3014 for (unsigned int i = 0; i < im->height(); i++)
3015 for (unsigned int j = 0; j < im->width(); j++) {
3016 if (weights->get_pixel(i, j).min_norm() < encounter_threshold
3017 || (d3px_count > 0 && isnan(depths->get_chan(i, j, 0)))) {
3018 im->set_pixel(i, j, d2::pixel::zero() / d2::pixel::zero());
3019 weights->set_pixel(i, j, d2::pixel::zero());
3020 } else if (normalize_weights)
3021 im->set_pixel(i, j, (d2::pixel) im->get_pixel(i, j)
3022 / (d2::pixel) weights->get_pixel(i, j));
3025 delete weights;
3027 delete depths;
3029 return im;
3033 * Filtered function.
3035 static const d2::image *view_filter_focus(pt _pt, int n) {
3037 assert ((unsigned int) n < d2::image_rw::count() || n < 0);
3040 * Get depth image for focus region determination.
3043 const d2::image *depths = depth(_pt, n);
3045 unsigned int height = (unsigned int) floor(_pt.scaled_height());
3046 unsigned int width = (unsigned int) floor(_pt.scaled_width());
3049 * Prepare input frame data.
3052 if (tc_multiplier == 0)
3053 al->open_all();
3055 pt *_ptf = new pt[al->count()];
3056 std::vector<space::node *> *fmv = new std::vector<space::node *>[al->count()];
3058 for (unsigned int f = 0; f < al->count(); f++) {
3059 _ptf[f] = al->get(f)->get_t(0);
3060 fmv[f] = most_visible_unscaled(_ptf[f]);
3061 std::sort(fmv[f].begin(), fmv[f].end());
3064 if (tc_multiplier == 0)
3065 al->close_all();
3068 * Open all files for rendering.
3071 d2::image_rw::open_all();
3074 * Prepare data structures for averaging views, as we render
3075 * each view separately. This is spacewise inefficient, but
3076 * is easy to implement given the current operation of the
3077 * renderers.
3080 d2::image_weighted_avg *iwa;
3082 if (d3::focus::uses_medians()) {
3083 iwa = new d2::image_weighted_median(height, width, 3, focus::max_samples());
3084 } else {
3085 iwa = new d2::image_weighted_simple(height, width, 3, new d2::invariant(NULL));
3088 _pt.view_angle(_pt.view_angle() * VIEW_ANGLE_MULTIPLIER);
3091 * Prepare view generator.
3094 view_generator vg(_pt);
3097 * Render views separately. This is spacewise inefficient,
3098 * but is easy to implement given the current operation of the
3099 * renderers.
3102 for (unsigned int v = 0; v < focus::max_samples(); v++) {
3105 * Generate a new 2D renderer for filtering.
3108 d2::render::reset();
3109 d2::render *renderer = d2::render_parse::get(d3chain_type);
3111 renderer->init_point_renderer(height, width, 3);
3114 * Iterate over output points.
3117 for (unsigned int i = 0; i < height; i++)
3118 for (unsigned int j = 0; j < width; j++) {
3120 focus::result _focus = focus::get(depths, i, j);
3122 if (v >= _focus.sample_count)
3123 continue;
3125 if (!finite(_focus.focal_distance))
3126 continue;
3128 view_generator::view vw = vg.get_view(_focus.aperture, v, _focus.randomization);
3130 pt _pt_new = vw.get_pt();
3132 point p = _pt_new.wp_scaled(_pt.pw_scaled(point(i, j, _focus.focal_distance)));
3135 * Determine the most-visible subspace.
3138 space::node *mv = vw.get_most_visible(p.xy());
3140 if (mv == NULL)
3141 continue;
3144 * Get median depth and diff.
3147 d2::pixel depth, diff;
3149 vw.get_median_depth_and_diff(&depth, &diff, p.xy());
3151 if (!depth.finite() || !diff.finite())
3152 continue;
3154 point local_points[3] = {
3155 point(p[0], p[1], ale_real_to_pos(depth[0])),
3156 point(p[0] + 1, p[1], ale_real_to_pos(depth[0] + diff[0])),
3157 point(p[0], p[1] + 1, ale_real_to_pos(depth[0] + diff[1]))
3161 * Iterate over files.
3164 for (unsigned int f = 0; f < d2::image_rw::count(); f++) {
3166 ui::get()->d3_render_status(1, 1, f, v, i, j, -1);
3168 if (!visibility_search(fmv[f], mv))
3169 continue;
3172 * Determine transformation at (i, j). First
3173 * determine transformation from the output to
3174 * the input, then invert this, as we need the
3175 * inverse transformation for filtering.
3178 d2::point remote_points[3] = {
3179 _ptf[f].wp_unscaled(_pt_new.pw_scaled(point(local_points[0]))).xy(),
3180 _ptf[f].wp_unscaled(_pt_new.pw_scaled(point(local_points[1]))).xy(),
3181 _ptf[f].wp_unscaled(_pt_new.pw_scaled(point(local_points[2]))).xy()
3185 * Forward matrix for the linear component of the
3186 * transformation.
3189 d2::point forward_matrix[2] = {
3190 remote_points[1] - remote_points[0],
3191 remote_points[2] - remote_points[0]
3195 * Inverse matrix for the linear component of
3196 * the transformation. Calculate using the
3197 * determinant D.
3200 ale_pos D = forward_matrix[0][0] * forward_matrix[1][1]
3201 - forward_matrix[0][1] * forward_matrix[1][0];
3203 if (D == 0)
3204 continue;
3206 d2::point inverse_matrix[2] = {
3207 d2::point( forward_matrix[1][1] / D, -forward_matrix[1][0] / D),
3208 d2::point(-forward_matrix[0][1] / D, forward_matrix[0][0] / D)
3212 * Determine the projective transformation parameters for the
3213 * inverse transformation.
3216 const d2::image *imf = d2::image_rw::get_open(f);
3218 d2::transformation inv_t = d2::transformation::gpt_identity(imf, 1);
3220 d2::point local_bounds[4];
3222 for (int n = 0; n < 4; n++) {
3223 d2::point remote_bound = d2::point((n == 1 || n == 2) ? imf->height() : 0,
3224 (n == 2 || n == 3) ? imf->width() : 0)
3225 - remote_points[0];
3227 local_bounds[n] = d2::point(i, j)
3228 + d2::point(remote_bound[0] * inverse_matrix[0][0]
3229 + remote_bound[1] * inverse_matrix[1][0],
3230 remote_bound[0] * inverse_matrix[0][1]
3231 + remote_bound[1] * inverse_matrix[1][1]);
3235 if (!local_bounds[0].finite()
3236 || !local_bounds[1].finite()
3237 || !local_bounds[2].finite()
3238 || !local_bounds[3].finite())
3239 continue;
3241 inv_t.gpt_set(local_bounds);
3244 * Perform render step for the given frame,
3245 * transformation, and point.
3248 renderer->point_render(i, j, f, inv_t);
3252 renderer->finish_point_rendering();
3254 const d2::image *im = renderer->get_image();
3255 const d2::image *df = renderer->get_defined();
3257 for (unsigned int i = 0; i < height; i++)
3258 for (unsigned int j = 0; j < width; j++) {
3259 if (((d2::pixel) df->get_pixel(i, j)).finite()
3260 && df->get_pixel(i, j)[0] > 0)
3261 iwa->accumulate(i, j, v, im->get_pixel(i, j), d2::pixel(1, 1, 1));
3266 * Close all files and return the result.
3269 d2::image_rw::close_all();
3271 return iwa;
3274 static const d2::image *view_filter(pt _pt, int n) {
3276 if (!focus::is_trivial())
3277 return view_filter_focus(_pt, n);
3279 assert ((unsigned int) n < d2::image_rw::count() || n < 0);
3282 * Generate a new 2D renderer for filtering.
3285 d2::render::reset();
3286 d2::render *renderer = d2::render_parse::get(d3chain_type);
3289 * Get depth image in order to estimate normals (and hence
3290 * transformations).
3293 const d2::image *depths = depth(_pt, n);
3295 d2::image *median_diffs = depths->fcdiff_median((int) floor(diff_median_radius));
3296 d2::image *median_depths = depths->medians((int) floor(depth_median_radius));
3298 unsigned int height = (unsigned int) floor(_pt.scaled_height());
3299 unsigned int width = (unsigned int) floor(_pt.scaled_width());
3301 renderer->init_point_renderer(height, width, 3);
3303 _pt.view_angle(_pt.view_angle() * VIEW_ANGLE_MULTIPLIER);
3305 std::vector<space::node *> mv = most_visible_scaled(_pt);
3307 for (unsigned int f = 0; f < d2::image_rw::count(); f++) {
3309 if (tc_multiplier == 0)
3310 al->open(f);
3312 pt _ptf = al->get(f)->get_t(0);
3314 std::vector<space::node *> fmv = most_visible_unscaled(_ptf);
3315 std::sort(fmv.begin(), fmv.end());
3317 for (unsigned int i = 0; i < height; i++)
3318 for (unsigned int j = 0; j < width; j++) {
3320 ui::get()->d3_render_status(1, 0, f, -1, i, j, -1);
3323 * Check visibility.
3326 int n = i * width + j;
3328 if (!visibility_search(fmv, mv[n]))
3329 continue;
3332 * Find depth and diff at this point, check for
3333 * undefined values, and generate projections
3334 * of the image corners on the estimated normal
3335 * surface.
3338 d2::pixel depth = median_depths->get_pixel(i, j);
3339 d2::pixel diff = median_diffs->get_pixel(i, j);
3340 // d2::pixel diff = d2::pixel(0, 0, 0);
3342 if (!depth.finite() || !diff.finite())
3343 continue;
3345 point local_points[3] = {
3346 point(i, j, ale_real_to_pos(depth[0])),
3347 point(i + 1, j, ale_real_to_pos(depth[0] + diff[0])),
3348 point(i , j + 1, ale_real_to_pos(depth[0] + diff[1]))
3352 * Determine transformation at (i, j). First
3353 * determine transformation from the output to
3354 * the input, then invert this, as we need the
3355 * inverse transformation for filtering.
3358 d2::point remote_points[3] = {
3359 _ptf.wp_unscaled(_pt.pw_scaled(point(local_points[0]))).xy(),
3360 _ptf.wp_unscaled(_pt.pw_scaled(point(local_points[1]))).xy(),
3361 _ptf.wp_unscaled(_pt.pw_scaled(point(local_points[2]))).xy()
3365 * Forward matrix for the linear component of the
3366 * transformation.
3369 d2::point forward_matrix[2] = {
3370 remote_points[1] - remote_points[0],
3371 remote_points[2] - remote_points[0]
3375 * Inverse matrix for the linear component of
3376 * the transformation. Calculate using the
3377 * determinant D.
3380 ale_pos D = forward_matrix[0][0] * forward_matrix[1][1]
3381 - forward_matrix[0][1] * forward_matrix[1][0];
3383 if (D == 0)
3384 continue;
3386 d2::point inverse_matrix[2] = {
3387 d2::point( forward_matrix[1][1] / D, -forward_matrix[1][0] / D),
3388 d2::point(-forward_matrix[0][1] / D, forward_matrix[0][0] / D)
3392 * Determine the projective transformation parameters for the
3393 * inverse transformation.
3396 const d2::image *imf = d2::image_rw::open(f);
3398 d2::transformation inv_t = d2::transformation::gpt_identity(imf, 1);
3400 d2::point local_bounds[4];
3402 for (int n = 0; n < 4; n++) {
3403 d2::point remote_bound = d2::point((n == 1 || n == 2) ? imf->height() : 0,
3404 (n == 2 || n == 3) ? imf->width() : 0)
3405 - remote_points[0];
3407 local_bounds[n] = local_points[0].xy()
3408 + d2::point(remote_bound[0] * inverse_matrix[0][0]
3409 + remote_bound[1] * inverse_matrix[1][0],
3410 remote_bound[0] * inverse_matrix[0][1]
3411 + remote_bound[1] * inverse_matrix[1][1]);
3414 inv_t.gpt_set(local_bounds);
3416 d2::image_rw::close(f);
3419 * Perform render step for the given frame,
3420 * transformation, and point.
3423 d2::image_rw::open(f);
3424 renderer->point_render(i, j, f, inv_t);
3425 d2::image_rw::close(f);
3428 if (tc_multiplier == 0)
3429 al->close(f);
3432 renderer->finish_point_rendering();
3434 return renderer->get_image();
3438 * Generic function.
3440 static const d2::image *view(pt _pt, int n = -1) {
3442 assert ((unsigned int) n < d2::image_rw::count() || n < 0);
3444 if (use_filter) {
3445 return view_filter(_pt, n);
3446 } else {
3447 return view_nofilter(_pt, n);
3451 static void tcem(double _tcem) {
3452 tc_multiplier = _tcem;
3455 static void oui(unsigned int _oui) {
3456 ou_iterations = _oui;
3459 static void pa(unsigned int _pa) {
3460 pairwise_ambiguity = _pa;
3463 static void pc(const char *_pc) {
3464 pairwise_comparisons = _pc;
3467 static void d3px(int _d3px_count, double *_d3px_parameters) {
3468 d3px_count = _d3px_count;
3469 d3px_parameters = _d3px_parameters;
3472 static void fx(double _fx) {
3473 falloff_exponent = _fx;
3476 static void nw() {
3477 normalize_weights = 1;
3480 static void no_nw() {
3481 normalize_weights = 0;
3484 static void nofilter() {
3485 use_filter = 0;
3488 static void filter() {
3489 use_filter = 1;
3492 static void set_filter_type(const char *type) {
3493 d3chain_type = type;
3496 static void set_subspace_traverse() {
3497 subspace_traverse = 1;
3500 static int excluded(point p) {
3501 for (int n = 0; n < d3px_count; n++) {
3502 double *region = d3px_parameters + (6 * n);
3503 if (p[0] >= region[0]
3504 && p[0] <= region[1]
3505 && p[1] >= region[2]
3506 && p[1] <= region[3]
3507 && p[2] >= region[4]
3508 && p[2] <= region[5])
3509 return 1;
3512 return 0;
3516 * This function returns true if a space is completely excluded.
3518 static int excluded(const space::traverse &st) {
3519 for (int n = 0; n < d3px_count; n++) {
3520 double *region = d3px_parameters + (6 * n);
3521 if (st.get_min()[0] >= region[0]
3522 && st.get_max()[0] <= region[1]
3523 && st.get_min()[1] >= region[2]
3524 && st.get_max()[1] <= region[3]
3525 && st.get_min()[2] >= region[4]
3526 && st.get_max()[2] <= region[5])
3527 return 1;
3530 return 0;
3533 static const d2::image *view(unsigned int n) {
3535 assert (n < d2::image_rw::count());
3537 pt _pt = align::projective(n);
3539 return view(_pt, n);
3542 typedef struct {point iw; point ip, is;} analytic;
3543 typedef std::multimap<ale_real,analytic> score_map;
3544 typedef std::pair<ale_real,analytic> score_map_element;
3547 * Make pt list.
3549 static std::vector<pt> make_pt_list(const char *d_out[], const char *v_out[],
3550 std::map<const char *, pt> *d3_depth_pt,
3551 std::map<const char *, pt> *d3_output_pt) {
3553 std::vector<pt> result;
3555 for (unsigned int n = 0; n < d2::image_rw::count(); n++) {
3556 if (d_out[n] || v_out[n]) {
3557 result.push_back(align::projective(n));
3561 for (std::map<const char *, pt>::iterator i = d3_depth_pt->begin(); i != d3_depth_pt->end(); i++) {
3562 result.push_back(i->second);
3565 for (std::map<const char *, pt>::iterator i = d3_output_pt->begin(); i != d3_output_pt->end(); i++) {
3566 result.push_back(i->second);
3569 return result;
3573 * Get a trilinear coordinate for an anisotropic candidate cell.
3575 static ale_pos get_trilinear_coordinate(point min, point max, pt _pt) {
3577 d2::point local_min, local_max;
3579 local_min = _pt.wp_unscaled(min).xy();
3580 local_max = _pt.wp_unscaled(min).xy();
3582 point cell[2] = {min, max};
3585 * Determine the view-local extrema in 2 dimensions.
3588 for (int r = 1; r < 8; r++) {
3589 point local = _pt.wp_unscaled(point(cell[r>>2][0], cell[(r>>1)%2][1], cell[r%2][2]));
3591 for (int d = 0; d < 2; d++) {
3592 if (local[d] < local_min[d])
3593 local_min[d] = local[d];
3594 if (local[d] > local_max[d])
3595 local_max[d] = local[d];
3596 if (isnan(local[d]))
3597 return local[d];
3601 ale_pos diameter = (local_max - local_min).norm();
3603 return log((double) diameter / sqrt(2)) / log(2);
3607 * Check whether a cell is visible from a given viewpoint. This function
3608 * is guaranteed to return 1 when a cell is visible, but it is not guaranteed
3609 * to return 0 when a cell is invisible.
3611 static int pt_might_be_visible(const pt &viewpoint, point min, point max) {
3613 int doc = (rand() % 100000) ? 0 : 1;
3615 if (doc)
3616 fprintf(stderr, "checking visibility:\n");
3618 point cell[2] = {min, max};
3621 * Cycle through all vertices of the cell to check certain
3622 * properties.
3624 int pos[3] = {0, 0, 0};
3625 int neg[3] = {0, 0, 0};
3626 for (int i = 0; i < 2; i++)
3627 for (int j = 0; j < 2; j++)
3628 for (int k = 0; k < 2; k++) {
3629 point p = viewpoint.wp_unscaled(point(cell[i][0], cell[j][1], cell[k][2]));
3631 if (p[2] < 0 && viewpoint.unscaled_in_bounds(p))
3632 return 1;
3634 if (isnan(p[0])
3635 || isnan(p[1])
3636 || isnan(p[2]))
3637 return 1;
3639 if (p[2] > 0)
3640 for (int d = 0; d < 2; d++)
3641 p[d] *= -1;
3643 if (doc)
3644 fprintf(stderr, "\t[%f %f %f] --> [%f %f %f]\n",
3645 (double) cell[i][0], (double) cell[j][1], (double) cell[k][2],
3646 (double) p[0], (double) p[1], (double) p[2]);
3648 for (int d = 0; d < 3; d++)
3649 if (p[d] >= 0)
3650 pos[d] = 1;
3652 if (p[0] <= viewpoint.unscaled_height() - 1)
3653 neg[0] = 1;
3655 if (p[1] <= viewpoint.unscaled_width() - 1)
3656 neg[1] = 1;
3658 if (p[2] <= 0)
3659 neg[2] = 1;
3662 if (!neg[2])
3663 return 0;
3665 if (!pos[0]
3666 || !neg[0]
3667 || !pos[1]
3668 || !neg[1])
3669 return 0;
3671 return 1;
3675 * Check whether a cell is output-visible.
3677 static int output_might_be_visible(const std::vector<pt> &pt_outputs, point min, point max) {
3678 for (unsigned int n = 0; n < pt_outputs.size(); n++)
3679 if (pt_might_be_visible(pt_outputs[n], min, max))
3680 return 1;
3681 return 0;
3685 * Check whether a cell is input-visible.
3687 static int input_might_be_visible(unsigned int f, point min, point max) {
3688 return pt_might_be_visible(align::projective(f), min, max);
3692 * Return true if a cell fails an output resolution bound.
3694 static int fails_output_resolution_bound(point min, point max, const std::vector<pt> &pt_outputs) {
3695 for (unsigned int n = 0; n < pt_outputs.size(); n++) {
3697 point p = pt_outputs[n].centroid(min, max);
3699 if (!p.defined())
3700 continue;
3702 if (get_trilinear_coordinate(min, max, pt_outputs[n]) < output_decimation_preferred)
3703 return 1;
3706 return 0;
3710 * Check lower-bound resolution constraints
3712 static int exceeds_resolution_lower_bounds(unsigned int f1, unsigned int f2,
3713 point min, point max, const std::vector<pt> &pt_outputs) {
3715 pt _pt = al->get(f1)->get_t(0);
3717 if (get_trilinear_coordinate(min, max, _pt) < input_decimation_lower)
3718 return 1;
3720 if (fails_output_resolution_bound(min, max, pt_outputs))
3721 return 0;
3723 if (get_trilinear_coordinate(min, max, _pt) < primary_decimation_upper)
3724 return 1;
3726 return 0;
3730 * Try the candidate nearest to the specified cell.
3732 static void try_nearest_candidate(unsigned int f1, unsigned int f2, candidates *c, point min, point max) {
3733 point centroid = (max + min) / 2;
3734 pt _pt[2] = { al->get(f1)->get_t(0), al->get(f2)->get_t(0) };
3735 point p[2];
3737 // fprintf(stderr, "[tnc n=%f %f %f x=%f %f %f]\n", min[0], min[1], min[2], max[0], max[1], max[2]);
3740 * Reject clipping plane violations.
3743 if (centroid[2] > front_clip
3744 || centroid[2] < rear_clip)
3745 return;
3748 * Calculate projections.
3751 for (int n = 0; n < 2; n++) {
3753 p[n] = _pt[n].wp_unscaled(centroid);
3755 if (!_pt[n].unscaled_in_bounds(p[n]))
3756 return;
3758 // fprintf(stderr, ":");
3760 if (p[n][2] >= 0)
3761 return;
3765 int tc = (int) round(get_trilinear_coordinate(min, max, _pt[0]));
3766 int stc = (int) round(get_trilinear_coordinate(min, max, _pt[1]));
3768 while (tc < input_decimation_lower || stc < input_decimation_lower) {
3769 tc++;
3770 stc++;
3773 if (tc > primary_decimation_upper)
3774 return;
3777 * Calculate score from color match. Assume for now
3778 * that the transformation can be approximated locally
3779 * with a translation.
3782 ale_pos score = 0;
3783 ale_pos divisor = 0;
3784 ale_real l1_multiplier = 0.125;
3785 lod_image *if1 = al->get(f1);
3786 lod_image *if2 = al->get(f2);
3788 if (if1->in_bounds(p[0].xy())
3789 && if2->in_bounds(p[1].xy())) {
3790 divisor += ale_real_to_pos(1 - l1_multiplier);
3791 score += ale_real_to_pos((1 - l1_multiplier)
3792 * (if1->get_tl(p[0].xy(), tc) - if2->get_tl(p[1].xy(), stc)).normsq());
3795 for (int iii = -1; iii <= 1; iii++)
3796 for (int jjj = -1; jjj <= 1; jjj++) {
3797 d2::point t(iii, jjj);
3799 if (!if1->in_bounds(p[0].xy() + t)
3800 || !if2->in_bounds(p[1].xy() + t))
3801 continue;
3803 divisor += ale_real_to_pos(l1_multiplier);
3804 score += ale_real_to_pos(l1_multiplier
3805 * (if1->get_tl(p[0].xy() + t, tc) - if2->get_tl(p[1].xy() + t, tc)).normsq());
3810 * Include third-camera contributions in the score.
3813 if (tc_multiplier != 0)
3814 for (unsigned int n = 0; n < d2::image_rw::count(); n++) {
3815 if (n == f1 || n == f2)
3816 continue;
3818 lod_image *ifn = al->get(n);
3819 pt _ptn = ifn->get_t(0);
3820 point pn = _ptn.wp_unscaled(centroid);
3822 if (!_ptn.unscaled_in_bounds(pn))
3823 continue;
3825 if (pn[2] >= 0)
3826 continue;
3828 ale_pos ttc = get_trilinear_coordinate(min, max, _ptn);
3830 divisor += tc_multiplier;
3831 score += tc_multiplier
3832 * (if1->get_tl(p[0].xy(), tc) - ifn->get_tl(pn.xy(), ttc)).normsq();
3835 c->add_candidate(p[0], tc, score / divisor);
3839 * Check for cells that are completely clipped.
3841 static int completely_clipped(point min, point max) {
3842 return (min[2] > front_clip
3843 || max[2] < rear_clip);
3847 * Update extremum variables for cell points mapped to a particular view.
3849 static void update_extrema(point min, point max, pt _pt, int *extreme_dim, ale_pos *extreme_ratio) {
3851 point local_min, local_max;
3853 local_min = _pt.wp_unscaled(min);
3854 local_max = _pt.wp_unscaled(min);
3856 point cell[2] = {min, max};
3858 int near_vertex = 0;
3861 * Determine the view-local extrema in all dimensions, and
3862 * determine the vertex of closest z coordinate.
3865 for (int r = 1; r < 8; r++) {
3866 point local = _pt.wp_unscaled(point(cell[r>>2][0], cell[(r>>1)%2][1], cell[r%2][2]));
3868 for (int d = 0; d < 3; d++) {
3869 if (local[d] < local_min[d])
3870 local_min[d] = local[d];
3871 if (local[d] > local_max[d])
3872 local_max[d] = local[d];
3875 if (local[2] == local_max[2])
3876 near_vertex = r;
3879 ale_pos diameter = (local_max.xy() - local_min.xy()).norm();
3882 * Update extrema as necessary for each dimension.
3885 for (int d = 0; d < 3; d++) {
3887 int r = near_vertex;
3889 int p1[3] = {r>>2, (r>>1)%2, r%2};
3890 int p2[3] = {r>>2, (r>>1)%2, r%2};
3892 p2[d] = 1 - p2[d];
3894 ale_pos local_distance = (_pt.wp_unscaled(point(cell[p1[0]][0], cell[p1[1]][1], cell[p1[2]][2])).xy()
3895 - _pt.wp_unscaled(point(cell[p2[0]][0], cell[p2[1]][1], cell[p2[2]][2])).xy()).norm();
3897 if (local_distance / diameter > *extreme_ratio) {
3898 *extreme_ratio = local_distance / diameter;
3899 *extreme_dim = d;
3905 * Get the next split dimension.
3907 static int get_next_split(int f1, int f2, point min, point max, const std::vector<pt> &pt_outputs) {
3908 for (int d = 0; d < 3; d++)
3909 if (isinf(min[d]) || isinf(max[d]))
3910 return space::traverse::get_next_split(min, max);
3912 int extreme_dim = 0;
3913 ale_pos extreme_ratio = 0;
3915 update_extrema(min, max, al->get(f1)->get_t(0), &extreme_dim, &extreme_ratio);
3916 update_extrema(min, max, al->get(f2)->get_t(0), &extreme_dim, &extreme_ratio);
3918 for (unsigned int n = 0; n < pt_outputs.size(); n++) {
3919 update_extrema(min, max, pt_outputs[n], &extreme_dim, &extreme_ratio);
3922 return extreme_dim;
3926 * Find candidates for subspace creation.
3928 static void find_candidates(unsigned int f1, unsigned int f2, candidates *c, point min, point max,
3929 const std::vector<pt> &pt_outputs, int depth = 0) {
3931 int print = 0;
3933 if (min[0] < 20.0001 && max[0] > 20.0001
3934 && min[1] < 20.0001 && max[1] > 20.0001
3935 && min[2] < 0.0001 && max[2] > 0.0001)
3936 print = 1;
3938 if (print) {
3939 for (int i = depth; i > 0; i--) {
3940 fprintf(stderr, "+");
3942 fprintf(stderr, "[fc n=%f %f %f x=%f %f %f]\n",
3943 (double) min[0], (double) min[1], (double) min[2], (double) max[0], (double) max[1], (double) max[2]);
3946 if (completely_clipped(min, max)) {
3947 if (print)
3948 fprintf(stderr, "c");
3949 return;
3952 if (!input_might_be_visible(f1, min, max)
3953 || !input_might_be_visible(f2, min, max)) {
3954 if (print)
3955 fprintf(stderr, "v");
3956 return;
3959 if (output_clip && !output_might_be_visible(pt_outputs, min, max)) {
3960 if (print)
3961 fprintf(stderr, "o");
3962 return;
3965 if (exceeds_resolution_lower_bounds(f1, f2, min, max, pt_outputs)) {
3966 if (!(rand() % 100000))
3967 fprintf(stderr, "([%f %f %f], [%f %f %f]) at %d\n",
3968 (double) min[0], (double) min[1], (double) min[2],
3969 (double) max[0], (double) max[1], (double) max[2],
3970 __LINE__);
3972 if (print)
3973 fprintf(stderr, "t");
3975 try_nearest_candidate(f1, f2, c, min, max);
3976 return;
3979 point new_cells[2][2];
3981 if (!space::traverse::get_next_cells(get_next_split(f1, f2, min, max, pt_outputs), min, max, new_cells)) {
3982 if (print)
3983 fprintf(stderr, "n");
3984 return;
3987 if (print) {
3988 fprintf(stderr, "nc[0][0]=%f %f %f nc[0][1]=%f %f %f nc[1][0]=%f %f %f nc[1][1]=%f %f %f\n",
3989 (double) new_cells[0][0][0],
3990 (double) new_cells[0][0][1],
3991 (double) new_cells[0][0][2],
3992 (double) new_cells[0][1][0],
3993 (double) new_cells[0][1][1],
3994 (double) new_cells[0][1][2],
3995 (double) new_cells[1][0][0],
3996 (double) new_cells[1][0][1],
3997 (double) new_cells[1][0][2],
3998 (double) new_cells[1][1][0],
3999 (double) new_cells[1][1][1],
4000 (double) new_cells[1][1][2]);
4003 find_candidates(f1, f2, c, new_cells[0][0], new_cells[0][1], pt_outputs, depth + 1);
4004 find_candidates(f1, f2, c, new_cells[1][0], new_cells[1][1], pt_outputs, depth + 1);
4008 * Generate a map from scores to 3D points for various depths at point (i, j) in f1, at
4009 * lowest resolution.
4011 static score_map p2f_score_map(unsigned int f1, unsigned int f2, unsigned int i, unsigned int j) {
4013 score_map result;
4015 pt _pt1 = al->get(f1)->get_t(primary_decimation_upper);
4016 pt _pt2 = al->get(f2)->get_t(primary_decimation_upper);
4018 const d2::image *if1 = al->get(f1)->get_image(primary_decimation_upper);
4019 const d2::image *if2 = al->get(f2)->get_image(primary_decimation_upper);
4020 ale_pos pdu_scale = pow(2, primary_decimation_upper);
4023 * Get the pixel color in the primary frame
4026 // d2::pixel color_primary = if1->get_pixel(i, j);
4029 * Map two depths to the secondary frame.
4032 point p1 = _pt2.wp_unscaled(_pt1.pw_unscaled(point(i, j, 1000)));
4033 point p2 = _pt2.wp_unscaled(_pt1.pw_unscaled(point(i, j, -1000)));
4035 // fprintf(stderr, "%d->%d (%d, %d) point pair: (%d, %d, %d -> %f, %f), (%d, %d, %d -> %f, %f)\n",
4036 // f1, f2, i, j, i, j, 1000, p1[0], p1[1], i, j, -1000, p2[0], p2[1]);
4037 // _pt1.debug_output();
4038 // _pt2.debug_output();
4042 * For cases where the mapped points define a
4043 * line and where points on the line fall
4044 * within the defined area of the frame,
4045 * determine the starting point for inspection.
4046 * In other cases, continue to the next pixel.
4049 ale_pos diff_i = p2[0] - p1[0];
4050 ale_pos diff_j = p2[1] - p1[1];
4051 ale_pos slope = diff_j / diff_i;
4053 if (isnan(slope)) {
4054 assert(0);
4055 fprintf(stderr, "%d->%d (%d, %d) has undefined slope\n",
4056 f1, f2, i, j);
4057 return result;
4061 * Make absurdly large/small slopes either infinity, negative infinity, or zero.
4064 if (fabs(slope) > if2->width() * 100) {
4065 double zero = 0;
4066 double one = 1;
4067 double inf = one / zero;
4068 slope = inf;
4069 } else if (slope < 1 / (double) if2->height() / 100
4070 && slope > -1/ (double) if2->height() / 100) {
4071 slope = 0;
4074 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4076 ale_pos top_intersect = p1[1] - p1[0] * slope;
4077 ale_pos lef_intersect = p1[0] - p1[1] / slope;
4078 ale_pos rig_intersect = p1[0] - (p1[1] - if2->width() + 2) / slope;
4079 ale_pos sp_i, sp_j;
4081 // fprintf(stderr, "slope == %f\n", slope);
4084 if (slope == 0) {
4085 // fprintf(stderr, "case 0\n");
4086 sp_i = lef_intersect;
4087 sp_j = 0;
4088 } else if (finite(slope) && top_intersect >= 0 && top_intersect < if2->width() - 1) {
4089 // fprintf(stderr, "case 1\n");
4090 sp_i = 0;
4091 sp_j = top_intersect;
4092 } else if (slope > 0 && lef_intersect >= 0 && lef_intersect <= if2->height() - 1) {
4093 // fprintf(stderr, "case 2\n");
4094 sp_i = lef_intersect;
4095 sp_j = 0;
4096 } else if (slope < 0 && rig_intersect >= 0 && rig_intersect <= if2->height() - 1) {
4097 // fprintf(stderr, "case 3\n");
4098 sp_i = rig_intersect;
4099 sp_j = if2->width() - 2;
4100 } else {
4101 // fprintf(stderr, "case 4\n");
4102 // fprintf(stderr, "%d->%d (%d, %d) does not intersect the defined area\n",
4103 // f1, f2, i, j);
4104 return result;
4108 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4111 * Determine increment values for examining
4112 * point, ensuring that incr_i is always
4113 * positive.
4116 ale_pos incr_i, incr_j;
4118 if (fabs(diff_i) > fabs(diff_j)) {
4119 incr_i = 1;
4120 incr_j = slope;
4121 } else if (slope > 0) {
4122 incr_i = 1 / slope;
4123 incr_j = 1;
4124 } else {
4125 incr_i = -1 / slope;
4126 incr_j = -1;
4129 // fprintf(stderr, "%d->%d (%d, %d) increments are (%f, %f)\n",
4130 // f1, f2, i, j, incr_i, incr_j);
4133 * Examine regions near the projected line.
4136 for (ale_pos ii = sp_i, jj = sp_j;
4137 ii <= if2->height() - 1 && jj <= if2->width() - 1 && ii >= 0 && jj >= 0;
4138 ii += incr_i, jj += incr_j) {
4140 // fprintf(stderr, "%d->%d (%d, %d) checking (%f, %f)\n",
4141 // f1, f2, i, j, ii, jj);
4143 #if 0
4145 * Check for higher, lower, and nearby points.
4147 * Red = 2^0
4148 * Green = 2^1
4149 * Blue = 2^2
4152 int higher = 0, lower = 0, nearby = 0;
4154 for (int iii = 0; iii < 2; iii++)
4155 for (int jjj = 0; jjj < 2; jjj++) {
4156 d2::pixel p = if2->get_pixel((int) floor(ii) + iii, (int) floor(jj) + jjj);
4158 for (int k = 0; k < 3; k++) {
4159 int bitmask = (int) pow(2, k);
4161 if (p[k] > color_primary[k])
4162 higher |= bitmask;
4163 if (p[k] < color_primary[k])
4164 lower |= bitmask;
4165 if (fabs(p[k] - color_primary[k]) < nearness)
4166 nearby |= bitmask;
4171 * If this is not a region of interest,
4172 * then continue.
4176 fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4178 // if (((higher & lower) | nearby) != 0x7)
4179 // continue;
4180 #endif
4181 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4183 // fprintf(stderr, "%d->%d (%d, %d) accepted (%f, %f)\n",
4184 // f1, f2, i, j, ii, jj);
4187 * Create an orthonormal basis to
4188 * determine line intersection.
4191 point bp0 = _pt1.pw_unscaled(point(i, j, 0));
4192 point bp1 = _pt1.pw_unscaled(point(i, j, 10));
4193 point bp2 = _pt2.pw_unscaled(point(ii, jj, 0));
4195 point foo = _pt1.wp_unscaled(bp0);
4196 // fprintf(stderr, "(%d, %d, 0) transformed to world and back is: (%f, %f, %f)\n",
4197 // i, j, foo[0], foo[1], foo[2]);
4199 foo = _pt1.wp_unscaled(bp1);
4200 // fprintf(stderr, "(%d, %d, 10) transformed to world and back is: (%f, %f, %f)\n",
4201 // i, j, foo[0], foo[1], foo[2]);
4203 point b0 = (bp1 - bp0).normalize();
4204 point b1n = bp2 - bp0;
4205 point b1 = (b1n - b1n.dproduct(b0) * b0).normalize();
4206 point b2 = point(0, 0, 0).xproduct(b0, b1).normalize(); // Should already have norm=1
4209 foo = _pt1.wp_unscaled(bp0 + 30 * b0);
4212 * Select a fourth point to define a second line.
4215 point p3 = _pt2.pw_unscaled(point(ii, jj, 10));
4218 * Representation in the new basis.
4221 d2::point nbp0 = d2::point(bp0.dproduct(b0), bp0.dproduct(b1));
4222 // d2::point nbp1 = d2::point(bp1.dproduct(b0), bp1.dproduct(b1));
4223 d2::point nbp2 = d2::point(bp2.dproduct(b0), bp2.dproduct(b1));
4224 d2::point np3 = d2::point( p3.dproduct(b0), p3.dproduct(b1));
4227 * Determine intersection of line
4228 * (nbp0, nbp1), which is parallel to
4229 * b0, with line (nbp2, np3).
4233 * XXX: a stronger check would be
4234 * better here, e.g., involving the
4235 * ratio (np3[0] - nbp2[0]) / (np3[1] -
4236 * nbp2[1]). Also, acceptance of these
4237 * cases is probably better than
4238 * rejection.
4242 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4244 if (np3[1] - nbp2[1] == 0)
4245 continue;
4248 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4250 d2::point intersection = d2::point(nbp2[0]
4251 + (nbp0[1] - nbp2[1]) * (np3[0] - nbp2[0]) / (np3[1] - nbp2[1]),
4252 nbp0[1]);
4254 ale_pos b2_offset = b2.dproduct(bp0);
4257 * Map the intersection back to the world
4258 * basis.
4261 point iw = intersection[0] * b0 + intersection[1] * b1 + b2_offset * b2;
4264 * Reject intersection points behind a
4265 * camera.
4268 point icp = _pt1.wc(iw);
4269 point ics = _pt2.wc(iw);
4272 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4274 if (icp[2] >= 0 || ics[2] >= 0)
4275 continue;
4278 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4281 * Reject clipping plane violations.
4285 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4287 if (iw[2] > front_clip
4288 || iw[2] < rear_clip)
4289 continue;
4292 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4295 * Score the point.
4298 point ip = _pt1.wp_unscaled(iw);
4300 point is = _pt2.wp_unscaled(iw);
4302 analytic _a = { iw, ip, is };
4305 * Calculate score from color match. Assume for now
4306 * that the transformation can be approximated locally
4307 * with a translation.
4310 ale_pos score = 0;
4311 ale_pos divisor = 0;
4312 ale_pos l1_multiplier = 0.125;
4314 if (if1->in_bounds(ip.xy())
4315 && if2->in_bounds(is.xy())
4316 && !d2::render::is_excluded_f(ip.xy() * pdu_scale, f1)
4317 && !d2::render::is_excluded_f(is.xy() * pdu_scale, f2)) {
4318 divisor += 1 - l1_multiplier;
4319 score += (1 - l1_multiplier)
4320 * (ale_pos) ((if1->get_bl(ip.xy()) - if2->get_bl(is.xy())).normsq());
4324 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4326 for (int iii = -1; iii <= 1; iii++)
4327 for (int jjj = -1; jjj <= 1; jjj++) {
4328 d2::point t(iii, jjj);
4330 if (!if1->in_bounds(ip.xy() + t)
4331 || !if2->in_bounds(is.xy() + t)
4332 || d2::render::is_excluded_f(ip.xy() * pdu_scale, f1)
4333 || d2::render::is_excluded_f(is.xy() * pdu_scale, f2))
4334 continue;
4336 divisor += l1_multiplier;
4337 score += l1_multiplier
4338 * (ale_pos) ((if1->get_bl(ip.xy() + t) - if2->get_bl(is.xy() + t)).normsq());
4343 * Include third-camera contributions in the score.
4346 if (tc_multiplier != 0)
4347 for (unsigned int f = 0; f < d2::image_rw::count(); f++) {
4348 if (f == f1 || f == f2)
4349 continue;
4351 const d2::image *if3 = al->get(f)->get_image(primary_decimation_upper);
4352 pt _pt3 = al->get(f)->get_t(primary_decimation_upper);
4354 point p = _pt3.wp_unscaled(iw);
4356 if (!if3->in_bounds(p.xy())
4357 || !if1->in_bounds(ip.xy())
4358 || d2::render::is_excluded_f(p.xy() * pdu_scale, f)
4359 || d2::render::is_excluded_f(ip.xy() * pdu_scale, f1))
4360 continue;
4362 divisor += tc_multiplier;
4363 score += tc_multiplier
4364 * (if1->get_bl(ip.xy()) - if3->get_bl(p.xy())).normsq();
4370 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4373 * Reject points with undefined score.
4377 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4379 if (!finite(score / divisor))
4380 continue;
4383 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4385 #if 0
4387 * XXX: reject points not on the z=-27.882252 plane.
4391 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4393 if (_a.ip[2] > -27 || _a.ip[2] < -28)
4394 continue;
4395 #endif
4398 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4401 * Add the point to the score map.
4404 // d2::pixel c_ip = if1->in_bounds(ip.xy()) ? if1->get_bl(ip.xy())
4405 // : d2::pixel();
4406 // d2::pixel c_is = if2->in_bounds(is.xy()) ? if2->get_bl(is.xy())
4407 // : d2::pixel();
4409 // fprintf(stderr, "Candidate subspace: f1=%u f2=%u i=%u j=%u ii=%f jj=%f"
4410 // "cp=[%f %f %f] cs=[%f %f %f]\n",
4411 // f1, f2, i, j, ii, jj, c_ip[0], c_ip[1], c_ip[2],
4412 // c_is[0], c_is[1], c_is[2]);
4414 result.insert(score_map_element(score / divisor, _a));
4417 // fprintf(stderr, "Iterating through the score map:\n");
4419 // for (score_map::iterator smi = result.begin(); smi != result.end(); smi++) {
4420 // fprintf(stderr, "%f ", smi->first);
4421 // }
4423 // fprintf(stderr, "\n");
4425 return result;
4430 * Attempt to refine space around a point, to high and low resolutions
4431 * resulting in two resolutions in total.
4434 static space::traverse refine_space(point iw, ale_pos target_dim, int use_filler) {
4436 space::traverse st = space::traverse::root();
4438 if (!st.includes(iw)) {
4439 assert(0);
4440 return st;
4443 int lr_done = !use_filler;
4446 * Loop until all resolutions of interest have been generated.
4449 for(;;) {
4451 point p[2] = { st.get_min(), st.get_max() };
4453 ale_pos dim_max = 0;
4455 for (int d = 0; d < 3; d++) {
4456 ale_pos d_value = fabs(p[0][d] - p[1][d]);
4457 if (d_value > dim_max)
4458 dim_max = d_value;
4462 * Generate any new desired spatial registers.
4465 for (int f = 0; f < 2; f++) {
4468 * Low resolution
4471 if (dim_max < 2 * target_dim
4472 && lr_done == 0) {
4473 if (spatial_info_map.find(st.get_node()) == spatial_info_map.end()) {
4474 spatial_info_map[st.get_node()];
4475 ui::get()->d3_increment_spaces();
4477 lr_done = 1;
4481 * High resolution.
4484 if (dim_max < target_dim) {
4485 if (spatial_info_map.find(st.get_node()) == spatial_info_map.end()) {
4486 spatial_info_map[st.get_node()];
4487 ui::get()->d3_increment_spaces();
4489 return st;
4494 * Check precision before analyzing space further.
4497 if (st.precision_wall()) {
4498 fprintf(stderr, "\n\n*** Error: reached subspace precision wall ***\n\n");
4499 assert(0);
4500 return st;
4503 if (st.positive().includes(iw)) {
4504 st = st.positive();
4505 total_tsteps++;
4506 } else if (st.negative().includes(iw)) {
4507 st = st.negative();
4508 total_tsteps++;
4509 } else {
4510 fprintf(stderr, "failed iw = (%f, %f, %f)\n",
4511 (double) iw[0], (double) iw[1], (double) iw[2]);
4512 assert(0);
4518 * Calculate target dimension
4521 static ale_pos calc_target_dim(point iw, pt _pt, const char *d_out[], const char *v_out[],
4522 std::map<const char *, pt> *d3_depth_pt,
4523 std::map<const char *, pt> *d3_output_pt) {
4525 ale_pos result = _pt.distance_1d(iw, primary_decimation_upper);
4527 for (unsigned int n = 0; n < d2::image_rw::count(); n++) {
4528 if (d_out[n] && align::projective(n).distance_1d(iw, 0) < result)
4529 result = align::projective(n).distance_1d(iw, 0);
4530 if (v_out[n] && align::projective(n).distance_1d(iw, 0) < result)
4531 result = align::projective(n).distance_1d(iw, 0);
4534 for (std::map<const char *, pt>::iterator i = d3_output_pt->begin(); i != d3_output_pt->end(); i++) {
4535 if (i->second.distance_1d(iw, 0) < result)
4536 result = i->second.distance_1d(iw, 0);
4539 for (std::map<const char *, pt>::iterator i = d3_depth_pt->begin(); i != d3_depth_pt->end(); i++) {
4540 if (i->second.distance_1d(iw, 0) < result)
4541 result = i->second.distance_1d(iw, 0);
4544 assert (result > 0);
4546 return result;
4550 * Calculate level of detail for a given viewpoint.
4553 static int calc_lod(ale_pos depth1, pt _pt, ale_pos target_dim) {
4554 return (int) round(_pt.trilinear_coordinate(depth1, target_dim * (ale_pos) sqrt(2)));
4558 * Calculate depth range for a given pair of viewpoints.
4561 static ale_pos calc_depth_range(point iw, pt _pt1, pt _pt2) {
4563 point ip = _pt1.wp_unscaled(iw);
4565 ale_pos reference_change = fabs(ip[2] / 1000);
4567 point iw1 = _pt1.pw_scaled(ip + point(0, 0, reference_change));
4568 point iw2 = _pt1.pw_scaled(ip - point(0, 0, reference_change));
4570 point is = _pt2.wc(iw);
4571 point is1 = _pt2.wc(iw1);
4572 point is2 = _pt2.wc(iw2);
4574 assert(is[2] < 0);
4576 ale_pos d1 = (is1.xy() - is.xy()).norm();
4577 ale_pos d2 = (is2.xy() - is.xy()).norm();
4579 // assert (reference_change > 0);
4580 // assert (d1 > 0 || d2 > 0);
4582 if (is1[2] < 0 && is2[2] < 0) {
4584 if (d1 > d2)
4585 return reference_change / d1;
4586 else
4587 return reference_change / d2;
4590 if (is1[2] < 0)
4591 return reference_change / d1;
4593 if (is2[2] < 0)
4594 return reference_change / d2;
4596 return 0;
4600 * Calculate a refined point for a given set of parameters.
4603 static point get_refined_point(pt _pt1, pt _pt2, int i, int j,
4604 int f1, int f2, int lod1, int lod2, ale_pos depth,
4605 ale_pos depth_range) {
4607 d2::pixel comparison_color = al->get(f1)->get_image(lod1)->get_pixel(i, j);
4609 ale_pos best = -1;
4610 ale_pos best_depth = depth;
4612 assert (depth_range > 0);
4614 if (fabs(depth_range) < fabs(depth / 10000))
4615 return _pt1.pw_unscaled(point(i, j, depth));
4617 for (ale_pos d = depth - depth_range; d < depth + depth_range; d += depth_range / 10) {
4619 if (!(d < 0))
4620 continue;
4622 point iw = _pt1.pw_unscaled(point(i, j, d));
4623 point is = _pt2.wp_unscaled(iw);
4625 if (!(is[2] < 0))
4626 continue;
4628 if (!al->get(f2)->get_image(lod2)->in_bounds(is.xy()))
4629 continue;
4631 ale_pos error = (comparison_color - al->get(f2)->get_image(lod2)->get_bl(is.xy())).norm();
4633 if (error < best || best == -1) {
4634 best = error;
4635 best_depth = d;
4639 return _pt1.pw_unscaled(point(i, j, best_depth));
4643 * Analyze space in a manner dependent on the score map.
4646 static void analyze_space_from_map(const char *d_out[], const char *v_out[],
4647 std::map<const char *, pt> *d3_depth_pt,
4648 std::map<const char *, pt> *d3_output_pt,
4649 unsigned int f1, unsigned int f2,
4650 unsigned int i, unsigned int j, score_map _sm, int use_filler) {
4652 int accumulated_ambiguity = 0;
4653 int max_acc_amb = pairwise_ambiguity;
4655 pt _pt1 = al->get(f1)->get_t(0);
4656 pt _pt2 = al->get(f2)->get_t(0);
4658 if (_pt1.scale_2d() != 1)
4659 use_filler = 1;
4661 for(score_map::iterator smi = _sm.begin(); smi != _sm.end(); smi++) {
4663 point iw = smi->second.iw;
4665 if (accumulated_ambiguity++ >= max_acc_amb)
4666 break;
4668 total_ambiguity++;
4670 ale_pos depth1 = _pt1.wc(iw)[2];
4671 ale_pos depth2 = _pt2.wc(iw)[2];
4673 ale_pos target_dim = calc_target_dim(iw, _pt1, d_out, v_out, d3_depth_pt, d3_output_pt);
4675 assert(target_dim > 0);
4677 int lod1 = calc_lod(depth1, _pt1, target_dim);
4678 int lod2 = calc_lod(depth2, _pt2, target_dim);
4680 while (lod1 < input_decimation_lower
4681 || lod2 < input_decimation_lower) {
4682 target_dim *= 2;
4683 lod1 = calc_lod(depth1, _pt1, target_dim);
4684 lod2 = calc_lod(depth2, _pt2, target_dim);
4688 if (lod1 >= (int) al->get(f1)->count()
4689 || lod2 >= (int) al->get(f2)->count())
4690 continue;
4692 int multiplier = (unsigned int) floor(pow(2, primary_decimation_upper - lod1));
4694 ale_pos depth_range = calc_depth_range(iw, _pt1, _pt2);
4696 assert (depth_range > 0);
4698 pt _pt1_lod = al->get(f1)->get_t(lod1);
4699 pt _pt2_lod = al->get(f2)->get_t(lod2);
4701 int im = i * multiplier;
4702 int jm = j * multiplier;
4704 for (int ii = 0; ii < multiplier; ii += 1)
4705 for (int jj = 0; jj < multiplier; jj += 1) {
4707 point refined_point = get_refined_point(_pt1_lod, _pt2_lod, im + ii, jm + jj,
4708 f1, f2, lod1, lod2, depth1, depth_range);
4711 * Re-evaluate target dimension.
4714 ale_pos target_dim_ =
4715 calc_target_dim(refined_point, _pt1, d_out, v_out, d3_depth_pt, d3_output_pt);
4717 ale_pos depth1_ = _pt1.wc(refined_point)[2];
4718 ale_pos depth2_ = _pt2.wc(refined_point)[2];
4720 int lod1_ = calc_lod(depth1_, _pt1, target_dim_);
4721 int lod2_ = calc_lod(depth2_, _pt2, target_dim_);
4723 while (lod1_ < input_decimation_lower
4724 || lod2_ < input_decimation_lower) {
4725 target_dim_ *= 2;
4726 lod1_ = calc_lod(depth1_, _pt1, target_dim_);
4727 lod2_ = calc_lod(depth2_, _pt2, target_dim_);
4731 * Attempt to refine space around the intersection point.
4734 space::traverse st =
4735 refine_space(refined_point, target_dim_, use_filler || _pt1.scale_2d() != 1);
4737 // if (!resolution_ok(al->get(f1)->get_t(0), al->get(f1)->get_t(0).trilinear_coordinate(st))) {
4738 // pt transformation = al->get(f1)->get_t(0);
4739 // ale_pos tc = al->get(f1)->get_t(0).trilinear_coordinate(st);
4741 // fprintf(stderr, "Resolution not ok.\n");
4742 // fprintf(stderr, "pow(2, tc)=%f\n", pow(2, tc));
4743 // fprintf(stderr, "transformation.unscaled_height()=%f\n",
4744 // transformation.unscaled_height());
4745 // fprintf(stderr, "transformation.unscaled_width()=%f\n",
4746 // transformation.unscaled_width());
4747 // fprintf(stderr, "tc=%f", tc);
4748 // fprintf(stderr, "input_decimation_lower - 1.5 = %f\n",
4749 // input_decimation_lower - 1.5);
4751 // }
4753 // assert(resolution_ok(al->get(f1)->get_t(0), al->get(f1)->get_t(0).trilinear_coordinate(st)));
4754 // assert(resolution_ok(al->get(f2)->get_t(0), al->get(f2)->get_t(0).trilinear_coordinate(st)));
4762 * Initialize space and identify regions of interest for the adaptive
4763 * subspace model.
4765 static void make_space(const char *d_out[], const char *v_out[],
4766 std::map<const char *, pt> *d3_depth_pt,
4767 std::map<const char *, pt> *d3_output_pt) {
4769 ui::get()->d3_total_spaces(0);
4772 * Variable indicating whether low-resolution filler space
4773 * is desired to avoid aliased gaps in surfaces.
4776 int use_filler = d3_depth_pt->size() != 0
4777 || d3_output_pt->size() != 0
4778 || output_decimation_preferred > 0
4779 || input_decimation_lower > 0
4780 || !focus::is_trivial()
4781 || !strcmp(pairwise_comparisons, "all");
4783 std::vector<pt> pt_outputs = make_pt_list(d_out, v_out, d3_depth_pt, d3_output_pt);
4786 * Initialize root space.
4789 space::init_root();
4792 * Special handling for experimental option 'subspace_traverse'.
4795 if (subspace_traverse) {
4797 * Subdivide space to resolve intensity matches between pairs
4798 * of frames.
4801 for (unsigned int f1 = 0; f1 < d2::image_rw::count(); f1++) {
4803 if (d3_depth_pt->size() == 0
4804 && d3_output_pt->size() == 0
4805 && d_out[f1] == NULL
4806 && v_out[f1] == NULL)
4807 continue;
4809 if (tc_multiplier == 0)
4810 al->open(f1);
4812 for (unsigned int f2 = 0; f2 < d2::image_rw::count(); f2++) {
4814 if (f1 == f2)
4815 continue;
4817 if (tc_multiplier == 0)
4818 al->open(f2);
4820 candidates *c = new candidates(f1);
4822 find_candidates(f1, f2, c, point::neginf(), point::posinf(), pt_outputs);
4826 c->generate_subspaces();
4828 if (tc_multiplier == 0)
4829 al->close(f2);
4832 if (tc_multiplier == 0)
4833 al->close(f1);
4836 return;
4840 * Subdivide space to resolve intensity matches between pairs
4841 * of frames.
4844 for (unsigned int f1 = 0; f1 < d2::image_rw::count(); f1++)
4845 for (unsigned int f2 = 0; f2 < d2::image_rw::count(); f2++) {
4846 if (f1 == f2)
4847 continue;
4849 if (!d_out[f1] && !v_out[f1] && !d3_depth_pt->size()
4850 && !d3_output_pt->size() && strcmp(pairwise_comparisons, "all"))
4851 continue;
4853 if (tc_multiplier == 0) {
4854 al->open(f1);
4855 al->open(f2);
4859 * Iterate over all points in the primary frame.
4862 ale_pos pdu_scale = pow(2, primary_decimation_upper);
4864 for (unsigned int i = 0; i < al->get(f1)->get_image(primary_decimation_upper)->height(); i++)
4865 for (unsigned int j = 0; j < al->get(f1)->get_image(primary_decimation_upper)->width(); j++) {
4867 if (d2::render::is_excluded_f(d2::point(i, j) * pdu_scale, f1))
4868 continue;
4870 ui::get()->d3_subdivision_status(f1, f2, i, j);
4872 total_pixels++;
4875 * Generate a map from scores to 3D points for
4876 * various depths in f1.
4879 score_map _sm = p2f_score_map(f1, f2, i, j);
4882 * Analyze space in a manner dependent on the score map.
4885 analyze_space_from_map(d_out, v_out, d3_depth_pt, d3_output_pt,
4886 f1, f2, i, j, _sm, use_filler);
4891 * This ordering may encourage image f1 to be cached.
4894 if (tc_multiplier == 0) {
4895 al->close(f2);
4896 al->close(f1);
4903 * Update spatial information structures.
4905 * XXX: the name of this function is horribly misleading. There isn't
4906 * even a 'search depth' any longer, since there is no longer any
4907 * bounded DFS occurring.
4909 static void reduce_cost_to_search_depth(d2::exposure *exp_out, int inc_bit) {
4912 * Subspace model
4915 ui::get()->set_steps(ou_iterations);
4917 for (unsigned int i = 0; i < ou_iterations; i++) {
4918 ui::get()->set_steps_completed(i);
4919 spatial_info_update();
4924 #if 0
4926 * Describe a scene to a renderer
4928 static void describe(render *r) {
4930 #endif
4933 #endif