1 // Copyright 2003, 2004, 2005, 2006 David Hilvert <dhilvert@auricle.dyndns.org>,
2 // <dhilvert@ugcs.caltech.edu>
4 /* This file is part of the Anti-Lamenessing Engine.
6 The Anti-Lamenessing Engine is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 The Anti-Lamenessing Engine is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with the Anti-Lamenessing Engine; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * d3/scene.h: Representation of a 3D scene.
31 * View angle multiplier.
33 * Setting this to a value larger than one can be useful for debugging.
36 #define VIEW_ANGLE_MULTIPLIER 1
43 static ale_pos front_clip
;
44 static ale_pos rear_clip
;
47 * Decimation exponents for geometry
49 static int primary_decimation_upper
;
50 static int input_decimation_lower
;
51 static int output_decimation_preferred
;
56 static int output_clip
;
61 static const char *load_model_name
;
62 static const char *save_model_name
;
65 * Occupancy attenuation
68 static double occ_att
;
71 * Normalization of output by weight
74 static int normalize_weights
;
80 static int use_filter
;
81 static const char *d3chain_type
;
87 static double falloff_exponent
;
90 * Third-camera error multiplier
92 static double tc_multiplier
;
95 * Occupancy update iterations
97 static unsigned int ou_iterations
;
102 static unsigned int pairwise_ambiguity
;
105 * Pairwise comparisons
107 static const char *pairwise_comparisons
;
112 static int d3px_count
;
113 static double *d3px_parameters
;
118 static const ale_real nearness
;
121 * Encounter threshold for defined pixels.
123 static double encounter_threshold
;
126 * Median calculation radii.
128 static double depth_median_radius
;
129 static double diff_median_radius
;
132 * Flag for subspace traversal.
134 static int subspace_traverse
;
137 * Structure to hold input frame information at levels of
138 * detail between full detail and full decimation.
142 unsigned int entries
;
143 std::vector
<const d2::image
*> im
;
144 std::vector
<pt
> transformation
;
150 lod_image(unsigned int _f
) {
155 im
.push_back(d2::image_rw::copy(f
, "3D reference image"));
158 _pt
= d3::align::projective(f
);
159 _pt
.scale(1 / _pt
.scale_2d());
160 transformation
.push_back(_pt
);
162 while (im
.back()->height() > 4
163 && im
.back()->width() > 4) {
165 im
.push_back(im
.back()->scale_by_half("3D, reduced LOD"));
168 _pt
.scale(1 / _pt
.scale_2d() / pow(2, entries
));
169 transformation
.push_back(_pt
);
176 * Get the number of scales
178 unsigned int count() {
185 const d2::image
*get_image(unsigned int i
) {
191 int in_bounds(d2::point p
) {
192 return im
[0]->in_bounds(p
);
196 * Get a 'trilinear' color. We currently don't do interpolation
197 * between levels of detail; hence, it's discontinuous in tl_coord.
199 d2::pixel
get_tl(d2::point p
, ale_pos tl_coord
) {
201 assert(in_bounds(p
));
203 tl_coord
= round(tl_coord
);
205 if (tl_coord
>= entries
)
210 p
= p
/ (ale_pos
) pow(2, tl_coord
);
212 unsigned int itlc
= (unsigned int) tl_coord
;
214 if (p
[0] > im
[itlc
]->height() - 1)
215 p
[0] = im
[itlc
]->height() - 1;
216 if (p
[1] > im
[itlc
]->width() - 1)
217 p
[1] = im
[itlc
]->width() - 1;
222 return im
[itlc
]->get_bl(p
);
225 d2::pixel
get_max_diff(d2::point p
, ale_pos tl_coord
) {
226 assert(in_bounds(p
));
228 tl_coord
= round(tl_coord
);
230 if (tl_coord
>= entries
)
235 p
= p
/ (ale_pos
) pow(2, tl_coord
);
237 unsigned int itlc
= (unsigned int) tl_coord
;
239 if (p
[0] > im
[itlc
]->height() - 1)
240 p
[0] = im
[itlc
]->height() - 1;
241 if (p
[1] > im
[itlc
]->width() - 1)
242 p
[1] = im
[itlc
]->width() - 1;
247 return im
[itlc
]->get_max_diff(p
);
251 * Get the transformation
253 pt
get_t(unsigned int i
) {
256 return transformation
[i
];
260 * Get the camera origin in world coordinates
263 return transformation
[0].origin();
270 for (unsigned int i
= 0; i
< entries
; i
++) {
277 * Structure to hold weight information for reference images.
281 std::vector
<d2::image
*> weights
;
287 void set_image(d2::image
*im
, ale_real value
) {
289 for (unsigned int i
= 0; i
< im
->height(); i
++)
290 for (unsigned int j
= 0; j
< im
->width(); j
++)
291 im
->pix(i
, j
) = d2::pixel(value
, value
, value
);
294 d2::image
*make_image(ale_pos sf
, ale_real init_value
= 0) {
295 d2::image
*result
= new d2::image_ale_real(
296 (unsigned int) ceil(transformation
.unscaled_height() * sf
),
297 (unsigned int) ceil(transformation
.unscaled_width() * sf
), 3);
301 set_image(result
, init_value
);
309 * Explicit weight subtree
313 subtree
*children
[2][2];
315 subtree(ale_real nv
, subtree
*a
, subtree
*b
, subtree
*c
, subtree
*d
) {
324 for (int i
= 0; i
< 2; i
++)
325 for (int j
= 0; j
< 2; j
++)
326 delete children
[i
][j
];
333 ref_weights(unsigned int _f
) {
335 transformation
= d3::align::projective(f
);
340 * Check spatial bounds.
342 int in_spatial_bounds(point p
) {
351 if (p
[0] > transformation
.unscaled_height() - 1)
353 if (p
[1] > transformation
.unscaled_width() - 1)
361 int in_spatial_bounds(const space::traverse
&t
) {
362 point p
= transformation
.centroid(t
);
363 return in_spatial_bounds(p
);
367 * Increase resolution to the given level.
369 void increase_resolution(int tc
, unsigned int i
, unsigned int j
) {
370 d2::image
*im
= weights
[tc
- tc_low
];
372 assert(i
<= im
->height() - 1);
373 assert(j
<= im
->width() - 1);
376 * Check for the cases known to have no lower level of detail.
379 if (im
->pix(i
, j
)[0] == -1)
385 increase_resolution(tc
+ 1, i
/ 2, j
/ 2);
388 * Load the lower-level image structure.
391 d2::image
*iim
= weights
[tc
+ 1 - tc_low
];
394 assert(i
/ 2 <= iim
->height() - 1);
395 assert(j
/ 2 <= iim
->width() - 1);
398 * Check for the case where no lower level of detail is
402 if (iim
->pix(i
/ 2, j
/ 2)[0] == -1)
406 * Spread out the lower level of detail among (uninitialized)
410 for (unsigned int ii
= (i
/ 2) * 2; ii
< (i
/ 2) * 2 + 1; ii
++)
411 for (unsigned int jj
= (j
/ 2) * 2; jj
< (j
/ 2) * 2 + 1; jj
++) {
412 assert(ii
<= im
->height() - 1);
413 assert(jj
<= im
->width() - 1);
414 assert(im
->pix(ii
, jj
)[0] == 0);
416 im
->pix(ii
, jj
) = iim
->pix(i
/ 2, j
/ 2);
420 * Set the lower level of detail to point here.
423 iim
->pix(i
/ 2, j
/ 2)[0] = -1;
427 * Add weights to positive higher-resolution pixels where
428 * found when their current values match the given subtree
429 * values; set negative pixels to zero and return 0 if no
430 * positive higher-resolution pixels are found.
432 int add_partial(int tc
, unsigned int i
, unsigned int j
, ale_real weight
, subtree
*st
) {
433 d2::image
*im
= weights
[tc
- tc_low
];
436 if (i
== im
->height() - 1
437 || j
== im
->width() - 1) {
441 assert(i
<= im
->height() - 1);
442 assert(j
<= im
->width() - 1);
445 * Check for positive values.
448 if (im
->pix(i
, j
)[0] > 0) {
449 if (st
&& st
->node_value
== im
->pix(i
, j
)[0])
450 im
->pix(i
, j
)[0] += weight
* (1 - im
->pix(i
, j
)[0]);
455 * Handle the case where there are no higher levels of detail.
459 if (im
->pix(i
, j
)[0] != 0) {
460 fprintf(stderr
, "failing assertion: im[%d]->pix(%d, %d)[0] == %g\n", tc
, i
, j
,
463 assert(im
->pix(i
, j
)[0] == 0);
468 * Handle the case where higher levels of detail are available.
473 for (int ii
= 0; ii
< 2; ii
++)
474 for (int jj
= 0; jj
< 2; jj
++)
475 success
[ii
][jj
] = add_partial(tc
- 1, i
* 2 + ii
, j
* 2 + jj
, weight
,
476 st
? st
->children
[ii
][jj
] : NULL
);
482 im
->pix(i
, j
)[0] = 0;
486 d2::image
*iim
= weights
[tc
- 1 - tc_low
];
489 for (int ii
= 0; ii
< 2; ii
++)
490 for (int jj
= 0; jj
< 2; jj
++)
491 if (success
[ii
][jj
] == 0) {
492 assert(i
* 2 + ii
< iim
->height());
493 assert(j
* 2 + jj
< iim
->width());
495 iim
->pix(i
* 2 + ii
, j
* 2 + jj
)[0] = weight
;
498 im
->pix(i
, j
)[0] = -1;
506 void add_weight(int tc
, unsigned int i
, unsigned int j
, ale_real weight
, subtree
*st
) {
508 assert (weight
>= 0);
510 d2::image
*im
= weights
[tc
- tc_low
];
513 // fprintf(stderr, "[aw tc=%d i=%d j=%d imax=%d jmax=%d]\n",
514 // tc, i, j, im->height(), im->width());
516 assert(i
<= im
->height() - 1);
517 assert(j
<= im
->width() - 1);
520 * Increase resolution, if necessary
523 increase_resolution(tc
, i
, j
);
526 * Attempt to add the weight at levels of detail
527 * where weight is defined.
530 if (add_partial(tc
, i
, j
, weight
, st
))
534 * If no weights are defined at any level of detail,
535 * then set the weight here.
538 im
->pix(i
, j
)[0] = weight
;
541 void add_weight(int tc
, d2::point p
, ale_real weight
, subtree
*st
) {
543 assert (weight
>= 0);
547 unsigned int i
= (unsigned int) floor(p
[0]);
548 unsigned int j
= (unsigned int) floor(p
[1]);
550 add_weight(tc
, i
, j
, weight
, st
);
553 void add_weight(const space::traverse
&t
, ale_real weight
, subtree
*st
) {
555 assert (weight
>= 0);
560 ale_pos tc
= transformation
.trilinear_coordinate(t
);
561 point p
= transformation
.centroid(t
);
562 assert(in_spatial_bounds(p
));
567 * Establish a reasonable (?) upper bound on resolution.
570 if (tc
< input_decimation_lower
) {
571 weight
/= pow(4, (input_decimation_lower
- tc
));
572 tc
= input_decimation_lower
;
576 * Initialize, if necessary.
580 tc_low
= tc_high
= (int) tc
;
582 ale_real sf
= pow(2, -tc
);
584 weights
.push_back(make_image(sf
));
590 * Check resolution bounds
593 assert (tc_low
<= tc_high
);
596 * Generate additional levels of detail, if necessary.
599 while (tc
< tc_low
) {
602 ale_real sf
= pow(2, -tc_low
);
604 weights
.insert(weights
.begin(), make_image(sf
));
607 while (tc
> tc_high
) {
610 ale_real sf
= pow(2, -tc_high
);
612 weights
.push_back(make_image(sf
, -1));
615 add_weight((int) tc
, p
.xy(), weight
, st
);
621 ale_real
get_weight(int tc
, unsigned int i
, unsigned int j
) {
623 // fprintf(stderr, "[gw tc=%d i=%u j=%u tclow=%d tchigh=%d]\n",
624 // tc, i, j, tc_low, tc_high);
626 if (tc
< tc_low
|| !initialized
)
630 return (get_weight(tc
- 1, i
* 2 + 0, j
* 2 + 0)
631 + get_weight(tc
- 1, i
* 2 + 1, j
* 2 + 0)
632 + get_weight(tc
- 1, i
* 2 + 1, j
* 2 + 1)
633 + get_weight(tc
- 1, i
* 2 + 0, j
* 2 + 1)) / 4;
636 assert(weights
.size() > (unsigned int) (tc
- tc_low
));
638 d2::image
*im
= weights
[tc
- tc_low
];
641 if (i
== im
->height())
643 if (j
== im
->width())
646 assert(i
< im
->height());
647 assert(j
< im
->width());
649 if (im
->pix(i
, j
)[0] == -1) {
650 return (get_weight(tc
- 1, i
* 2 + 0, j
* 2 + 0)
651 + get_weight(tc
- 1, i
* 2 + 1, j
* 2 + 0)
652 + get_weight(tc
- 1, i
* 2 + 1, j
* 2 + 1)
653 + get_weight(tc
- 1, i
* 2 + 0, j
* 2 + 1)) / 4;
656 if (im
->pix(i
, j
)[0] == 0) {
659 if (weights
[tc
- tc_low
+ 1]->pix(i
/ 2, j
/ 2)[0] == -1)
661 return get_weight(tc
+ 1, i
/ 2, j
/ 2);
664 return im
->pix(i
, j
)[0];
667 ale_real
get_weight(int tc
, d2::point p
) {
671 unsigned int i
= (unsigned int) floor(p
[0]);
672 unsigned int j
= (unsigned int) floor(p
[1]);
674 return get_weight(tc
, i
, j
);
677 ale_real
get_weight(const space::traverse
&t
) {
678 ale_pos tc
= transformation
.trilinear_coordinate(t
);
679 point p
= transformation
.centroid(t
);
680 assert(in_spatial_bounds(p
));
691 return get_weight((int) tc
, p
.xy());
695 * Check whether a subtree is simple.
697 int is_simple(subtree
*s
) {
700 if (s
->node_value
== -1
701 && s
->children
[0][0] == NULL
702 && s
->children
[0][1] == NULL
703 && s
->children
[1][0] == NULL
704 && s
->children
[1][1] == NULL
)
711 * Get a weight subtree.
713 subtree
*get_subtree(int tc
, unsigned int i
, unsigned int j
) {
716 * tc > tc_high is handled recursively.
720 subtree
*result
= new subtree(-1,
721 get_subtree(tc
- 1, i
* 2 + 0, j
* 2 + 0),
722 get_subtree(tc
- 1, i
* 2 + 0, j
* 2 + 1),
723 get_subtree(tc
- 1, i
* 2 + 1, j
* 2 + 0),
724 get_subtree(tc
- 1, i
* 2 + 1, j
* 2 + 1));
726 if (is_simple(result
)) {
734 assert(tc
>= tc_low
);
735 assert(weights
[tc
- tc_low
]);
737 d2::image
*im
= weights
[tc
- tc_low
];
740 * Rectangular images will, in general, have
741 * out-of-bounds tree sections. Handle this case.
744 if (i
>= im
->height())
746 if (j
>= im
->width())
750 * -1 weights are handled recursively
753 if (im
->pix(i
, j
)[0] == -1) {
754 subtree
*result
= new subtree(-1,
755 get_subtree(tc
- 1, i
* 2 + 0, j
* 2 + 0),
756 get_subtree(tc
- 1, i
* 2 + 0, j
* 2 + 1),
757 get_subtree(tc
- 1, i
* 2 + 1, j
* 2 + 0),
758 get_subtree(tc
- 1, i
* 2 + 1, j
* 2 + 1));
760 if (is_simple(result
)) {
761 im
->pix(i
, j
)[0] = 0;
770 * Zero weights have NULL subtrees.
773 if (im
->pix(i
, j
)[0] == 0)
777 * Handle the remaining case.
780 return new subtree(im
->pix(i
, j
)[0], NULL
, NULL
, NULL
, NULL
);
783 subtree
*get_subtree(int tc
, d2::point p
) {
786 unsigned int i
= (unsigned int) floor(p
[0]);
787 unsigned int j
= (unsigned int) floor(p
[1]);
789 return get_subtree(tc
, i
, j
);
792 subtree
*get_subtree(const space::traverse
&t
) {
793 ale_pos tc
= transformation
.trilinear_coordinate(t
);
794 point p
= transformation
.centroid(t
);
795 assert(in_spatial_bounds(p
));
800 if (tc
< input_decimation_lower
)
801 tc
= input_decimation_lower
;
808 return get_subtree((int) tc
, p
.xy());
815 for (unsigned int i
= 0; i
< weights
.size(); i
++) {
824 static int resolution_ok(pt transformation
, ale_pos tc
) {
826 if (pow(2, tc
) > transformation
.unscaled_height()
827 || pow(2, tc
) > transformation
.unscaled_width())
830 if (tc
< input_decimation_lower
- 1.5)
837 * Structure to hold input frame information at all levels of detail.
845 std::vector
<lod_image
*> images
;
850 images
.resize(d2::image_rw::count(), NULL
);
853 unsigned int count() {
854 return d2::image_rw::count();
857 void open(unsigned int f
) {
858 assert (images
[f
] == NULL
);
860 if (images
[f
] == NULL
)
861 images
[f
] = new lod_image(f
);
865 for (unsigned int f
= 0; f
< d2::image_rw::count(); f
++)
869 lod_image
*get(unsigned int f
) {
870 assert (images
[f
] != NULL
);
874 void close(unsigned int f
) {
875 assert (images
[f
] != NULL
);
881 for (unsigned int f
= 0; f
< d2::image_rw::count(); f
++)
891 * All levels-of-detail
894 static struct lod_images
*al
;
897 * Data structure for storing best encountered subspace candidates.
900 std::vector
<std::vector
<std::pair
<ale_pos
, ale_real
> > > levels
;
906 * Point p is in world coordinates.
908 void generate_subspace(point iw
, ale_pos diagonal
) {
910 // fprintf(stderr, "[gs iw=%f %f %f d=%f]\n",
911 // iw[0], iw[1], iw[2], diagonal);
913 space::traverse st
= space::traverse::root();
915 if (!st
.includes(iw
)) {
924 * Loop until resolutions of interest have been generated.
929 ale_pos current_diagonal
= (st
.get_max() - st
.get_min()).norm();
931 assert(!isnan(current_diagonal
));
934 * Generate any new desired spatial registers.
941 for (int f
= 0; f
< 2; f
++) {
947 if (current_diagonal
< 2 * diagonal
949 if (spatial_info_map
.find(st
.get_node()) == spatial_info_map
.end()) {
950 spatial_info_map
[st
.get_node()];
951 ui::get()->d3_increment_spaces();
960 if (current_diagonal
< diagonal
962 if (spatial_info_map
.find(st
.get_node()) == spatial_info_map
.end()) {
963 spatial_info_map
[st
.get_node()];
964 ui::get()->d3_increment_spaces();
971 * Check for completion
974 if (highres
&& lowres
)
978 * Check precision before analyzing space further.
981 if (st
.precision_wall()) {
982 fprintf(stderr
, "\n\n*** Error: reached subspace precision wall ***\n\n");
987 if (st
.positive().includes(iw
)) {
990 } else if (st
.negative().includes(iw
)) {
994 fprintf(stderr
, "failed iw = (%f, %f, %f)\n",
995 iw
[0], iw
[1], iw
[2]);
1005 height
= (unsigned int) al
->get(f
)->get_t(0).unscaled_height();
1006 width
= (unsigned int) al
->get(f
)->get_t(0).unscaled_width();
1009 * Is this necessary?
1012 levels
.resize(primary_decimation_upper
- input_decimation_lower
+ 1);
1013 for (int l
= input_decimation_lower
; l
<= primary_decimation_upper
; l
++) {
1014 levels
[l
- input_decimation_lower
].resize((unsigned int) (floor(height
/ pow(2, l
))
1015 * floor(width
/ pow(2, l
))
1016 * pairwise_ambiguity
),
1017 std::pair
<ale_pos
, ale_real
>(0, 0));
1022 * Point p is expected to be in local projective coordinates.
1025 void add_candidate(point p
, int tc
, ale_real score
) {
1026 assert(tc
<= primary_decimation_upper
);
1027 assert(tc
>= input_decimation_lower
);
1031 int i
= (unsigned int) floor(p
[0] / pow(2, tc
));
1032 int j
= (unsigned int) floor(p
[1] / pow(2, tc
));
1034 int swidth
= (int) floor(width
/ pow(2, tc
));
1037 assert(i
< (int) floor(height
/ pow(2, tc
)));
1039 for (unsigned int k
= 0; k
< pairwise_ambiguity
; k
++) {
1040 std::pair
<ale_pos
, ale_real
> *pk
=
1041 &(levels
[tc
- input_decimation_lower
][i
* swidth
* pairwise_ambiguity
+ j
* pairwise_ambiguity
+ k
]);
1043 if (pk
->first
!= 0 && score
>= pk
->second
)
1046 if (i
== 1 && j
== 1 && tc
== 4)
1047 fprintf(stderr
, "[ac p2=%f score=%f]\n", p
[2], score
);
1049 ale_pos tp
= pk
->first
;
1050 ale_real tr
= pk
->second
;
1064 * Generate subspaces for candidates.
1067 void generate_subspaces() {
1069 fprintf(stderr
, "+");
1070 for (int l
= input_decimation_lower
; l
<= primary_decimation_upper
; l
++) {
1071 unsigned int sheight
= (unsigned int) floor(height
/ pow(2, l
));
1072 unsigned int swidth
= (unsigned int) floor(width
/ pow(2, l
));
1074 for (unsigned int i
= 0; i
< sheight
; i
++)
1075 for (unsigned int j
= 0; j
< swidth
; j
++)
1076 for (unsigned int k
= 0; k
< pairwise_ambiguity
; k
++) {
1077 std::pair
<ale_pos
, ale_real
> *pk
=
1078 &(levels
[l
- input_decimation_lower
]
1079 [i
* swidth
* pairwise_ambiguity
+ j
* pairwise_ambiguity
+ k
]);
1081 if (pk
->first
== 0) {
1082 fprintf(stderr
, "o");
1085 fprintf(stderr
, "|");
1088 ale_pos si
= i
* pow(2, l
) + ((l
> 0) ? pow(2, l
- 1) : 0);
1089 ale_pos sj
= j
* pow(2, l
) + ((l
> 0) ? pow(2, l
- 1) : 0);
1091 // fprintf(stderr, "[gss l=%d i=%d j=%d d=%g]\n", l, i, j, pk->first);
1093 point p
= al
->get(image_index
)->get_t(0).pw_unscaled(point(si
, sj
, pk
->first
));
1095 generate_subspace(p
,
1096 al
->get(image_index
)->get_t(0).diagonal_distance_3d(pk
->first
, l
));
1103 * List for calculating weighted median.
1110 ale_real
&_w(unsigned int i
) {
1115 ale_real
&_d(unsigned int i
) {
1117 return data
[i
* 2 + 1];
1120 void increase_capacity() {
1127 data
= (ale_real
*) realloc(data
, sizeof(ale_real
) * 2 * (size
* 1));
1130 assert (size
> used
);
1133 fprintf(stderr
, "Unable to allocate %d bytes of memory\n",
1134 sizeof(ale_real
) * 2 * (size
* 1));
1139 void insert_weight(unsigned int i
, ale_real v
, ale_real w
) {
1140 assert(used
< size
);
1142 for (unsigned int j
= used
; j
> i
; j
--) {
1155 unsigned int get_size() {
1159 unsigned int get_used() {
1164 fprintf(stderr
, "[st %p sz %u el", this, size
);
1165 for (unsigned int i
= 0; i
< used
; i
++)
1166 fprintf(stderr
, " (%f, %f)", _d(i
), _w(i
));
1167 fprintf(stderr
, "]\n");
1174 void insert_weight(ale_real v
, ale_real w
) {
1175 for (unsigned int i
= 0; i
< used
; i
++) {
1182 increase_capacity();
1183 insert_weight(i
, v
, w
);
1188 increase_capacity();
1189 insert_weight(used
, v
, w
);
1193 * Finds the median at half-weight, or between half-weight
1194 * and zero-weight, depending on the attenuation value.
1197 ale_real
find_median(double attenuation
= 0) {
1199 assert(attenuation
>= 0);
1200 assert(attenuation
<= 1);
1204 ale_real undefined
= zero1
/ zero2
;
1206 ale_accum weight_sum
= 0;
1208 for (unsigned int i
= 0; i
< used
; i
++)
1209 weight_sum
+= _w(i
);
1211 // if (weight_sum == 0)
1212 // return undefined;
1214 if (used
== 0 || used
== 1)
1217 if (weight_sum
== 0) {
1218 ale_accum data_sum
= 0;
1219 for (unsigned int i
= 0; i
< used
; i
++)
1221 return data_sum
/ used
;
1225 ale_accum midpoint
= weight_sum
* (0.5 - 0.5 * attenuation
);
1227 ale_accum weight_sum_2
= 0;
1229 for (unsigned int i
= 0; i
< used
&& weight_sum_2
< midpoint
; i
++) {
1230 weight_sum_2
+= _w(i
);
1232 if (weight_sum_2
> midpoint
) {
1234 } else if (weight_sum_2
== midpoint
) {
1235 assert (i
+ 1 < used
);
1236 return (_d(i
) + _d(i
+ 1)) / 2;
1243 wml(int initial_size
= 0) {
1245 // if (initial_size == 0) {
1246 // initial_size = (int) (d2::image_rw::count() * 1.5);
1249 size
= initial_size
;
1253 data
= (ale_real
*) malloc(size
* sizeof(ale_real
) * 2);
1261 * copy constructor. This is required to avoid undesired frees.
1267 data
= (ale_real
*) malloc(size
* sizeof(ale_real
) * 2);
1270 memcpy(data
, w
.data
, size
* sizeof(ale_real
) * 2);
1279 * Class for information regarding spatial regions of interest.
1281 * This class is configured for convenience in cases where sampling is
1282 * performed using an approximation of the fine:box:1,triangle:2 chain.
1283 * In this case, the *_1 variables would store the fine data and the
1284 * *_2 variables would store the coarse data. Other uses are also
1288 class spatial_info
{
1290 * Map channel value --> weight.
1292 wml color_weights_1
[3];
1293 wml color_weights_2
[3];
1301 * Map occupancy value --> weight.
1303 wml occupancy_weights_1
;
1304 wml occupancy_weights_2
;
1307 * Current occupancy value.
1315 unsigned int pocc_density
;
1316 unsigned int socc_density
;
1319 * Insert a weight into a list.
1321 void insert_weight(wml
*m
, ale_real v
, ale_real w
) {
1322 m
->insert_weight(v
, w
);
1326 * Find the median of a weighted list. Uses NaN for undefined.
1328 ale_real
find_median(wml
*m
, double attenuation
= 0) {
1329 return m
->find_median(attenuation
);
1337 color
= d2::pixel::zero();
1344 * Accumulate color; primary data set.
1346 void accumulate_color_1(int f
, d2::pixel color
, d2::pixel weight
) {
1347 for (int k
= 0; k
< 3; k
++)
1348 insert_weight(&color_weights_1
[k
], color
[k
], weight
[k
]);
1352 * Accumulate color; secondary data set.
1354 void accumulate_color_2(d2::pixel color
, d2::pixel weight
) {
1355 for (int k
= 0; k
< 3; k
++)
1356 insert_weight(&color_weights_2
[k
], color
[k
], weight
[k
]);
1360 * Accumulate occupancy; primary data set.
1362 void accumulate_occupancy_1(int f
, ale_real occupancy
, ale_real weight
) {
1363 insert_weight(&occupancy_weights_1
, occupancy
, weight
);
1367 * Accumulate occupancy; secondary data set.
1369 void accumulate_occupancy_2(ale_real occupancy
, ale_real weight
) {
1370 insert_weight(&occupancy_weights_2
, occupancy
, weight
);
1372 if (occupancy
== 0 || occupancy_weights_2
.get_size() < 96)
1375 // fprintf(stderr, "%p updated socc with: %f %f\n", this, occupancy, weight);
1376 // occupancy_weights_2.print_info();
1380 * Update color (and clear accumulation structures).
1382 void update_color() {
1383 for (int d
= 0; d
< 3; d
++) {
1384 ale_real c
= find_median(&color_weights_1
[d
]);
1386 c
= find_median(&color_weights_2
[d
]);
1392 color_weights_1
[d
].clear();
1393 color_weights_2
[d
].clear();
1398 * Update occupancy (and clear accumulation structures).
1400 void update_occupancy() {
1401 ale_real o
= find_median(&occupancy_weights_1
, occ_att
);
1403 o
= find_median(&occupancy_weights_2
, occ_att
);
1409 pocc_density
= occupancy_weights_1
.get_used();
1410 socc_density
= occupancy_weights_2
.get_used();
1412 occupancy_weights_1
.clear();
1413 occupancy_weights_2
.clear();
1418 * Get current color.
1420 d2::pixel
get_color() {
1425 * Get current occupancy.
1427 ale_real
get_occupancy() {
1428 assert (finite(occupancy
));
1433 * Get primary color density.
1436 unsigned int get_pocc_density() {
1437 return pocc_density
;
1440 unsigned int get_socc_density() {
1441 return socc_density
;
1446 * Map spatial regions of interest to spatial info structures. XXX:
1447 * This may get very poor cache behavior in comparison with, say, an
1448 * array. Unfortunately, there is no immediately obvious array
1449 * representation. If some kind of array representation were adopted,
1450 * it would probably cluster regions of similar depth from the
1451 * perspective of the typical camera. In particular, for a
1452 * stereoscopic view, depth ordering for two random points tends to be
1453 * similar between cameras, I think. Unfortunately, it is never
1454 * identical for all points (unless cameras are co-located). One
1455 * possible approach would be to order based on, say, camera 0's idea
1459 #if !defined(HASH_MAP_GNU) && !defined(HASH_MAP_STD)
1460 typedef std::map
<struct space::node
*, spatial_info
> spatial_info_map_t
;
1461 #elif defined(HASH_MAP_GNU)
1464 size_t operator()(struct space::node
*n
) const
1466 return __gnu_cxx::hash
<long>()((long) n
);
1469 typedef __gnu_cxx::hash_map
<struct space::node
*, spatial_info
, node_hash
> spatial_info_map_t
;
1470 #elif defined(HASH_MAP_STD)
1471 typedef std::hash_map
<struct space::node
*, spatial_info
> spatial_info_map_t
;
1474 static spatial_info_map_t spatial_info_map
;
1479 * Debugging variables.
1482 static unsigned long total_ambiguity
;
1483 static unsigned long total_pixels
;
1484 static unsigned long total_divisions
;
1485 static unsigned long total_tsteps
;
1491 static void et(double et_parameter
) {
1492 encounter_threshold
= et_parameter
;
1495 static void dmr(double dmr_parameter
) {
1496 depth_median_radius
= dmr_parameter
;
1499 static void fmr(double fmr_parameter
) {
1500 diff_median_radius
= fmr_parameter
;
1503 static void load_model(const char *name
) {
1504 load_model_name
= name
;
1507 static void save_model(const char *name
) {
1508 save_model_name
= name
;
1511 static void fc(ale_pos fc
) {
1515 static void di_upper(ale_pos _dgi
) {
1516 primary_decimation_upper
= (int) round(_dgi
);
1519 static void do_try(ale_pos _dgo
) {
1520 output_decimation_preferred
= (int) round(_dgo
);
1523 static void di_lower(ale_pos _idiv
) {
1524 input_decimation_lower
= (int) round(_idiv
);
1531 static void no_oc() {
1535 static void rc(ale_pos rc
) {
1540 * Initialize 3D scene from 2D scene, using 2D and 3D alignment
1543 static void init_from_d2() {
1546 * Rear clip value of 0 is converted to infinity.
1549 if (rear_clip
== 0) {
1553 rear_clip
= one
/ zero
;
1554 assert(isinf(rear_clip
) && rear_clip
> 0);
1558 * Scale and translate clipping plane depths.
1561 ale_pos cp_scalar
= d3::align::projective(0).wc(point(0, 0, 0))[2];
1563 front_clip
= front_clip
* cp_scalar
- cp_scalar
;
1564 rear_clip
= rear_clip
* cp_scalar
- cp_scalar
;
1567 * Allocate image structures.
1570 al
= new lod_images
;
1572 if (tc_multiplier
!= 0) {
1578 * Perform spatial_info updating on a given subspace, for given
1581 static void subspace_info_update(space::iterate si
, int f
, ref_weights
*weights
) {
1585 space::traverse st
= si
.get();
1588 * Reject out-of-bounds spaces.
1590 if (!weights
->in_spatial_bounds(st
)) {
1596 * Skip spaces with no color information.
1598 * XXX: This could be more efficient, perhaps.
1601 if (spatial_info_map
.count(st
.get_node()) == 0) {
1606 ui::get()->d3_increment_space_num();
1610 * Get in-bounds centroid, if one exists.
1613 point p
= al
->get(f
)->get_t(0).centroid(st
);
1621 * Get information on the subspace.
1624 spatial_info
*sn
= &spatial_info_map
[st
.get_node()];
1625 d2::pixel color
= sn
->get_color();
1626 ale_real occupancy
= sn
->get_occupancy();
1629 * Store current weight so we can later check for
1630 * modification by higher-resolution subspaces.
1633 ref_weights::subtree
*tree
= weights
->get_subtree(st
);
1636 * Check for higher resolution subspaces, and
1637 * update the space iterator.
1640 if (st
.get_node()->positive
1641 || st
.get_node()->negative
) {
1644 * Cleave space for the higher-resolution pass,
1645 * skipping the current space, since we will
1646 * process that later.
1649 space::iterate cleaved_space
= si
.cleave();
1651 cleaved_space
.next();
1653 subspace_info_update(cleaved_space
, f
, weights
);
1660 * Add new data on the subspace and update weights.
1663 ale_pos tc
= al
->get(f
)->get_t(0).trilinear_coordinate(st
);
1664 d2::pixel pcolor
= al
->get(f
)->get_tl(p
.xy(), tc
);
1665 d2::pixel colordiff
= (color
- pcolor
) * (ale_real
) 256;
1667 if (falloff_exponent
!= 0) {
1668 d2::pixel max_diff
= al
->get(f
)->get_max_diff(p
.xy(), tc
) * (ale_real
) 256;
1670 for (int k
= 0; k
< 3; k
++)
1671 if (max_diff
[k
] > 1)
1672 colordiff
[k
] /= pow(max_diff
[k
], falloff_exponent
);
1676 * Determine the probability of encounter.
1679 d2::pixel encounter
= d2::pixel(1, 1, 1) * (1 - weights
->get_weight(st
));
1685 weights
->add_weight(st
, occupancy
, tree
);
1688 * Delete the subtree, if necessary.
1694 * Check for cases in which the subspace should not be
1698 if (!resolution_ok(al
->get(f
)->get_t(0), tc
))
1701 if (d2::render::is_excluded_f(p
.xy(), f
))
1708 sn
->accumulate_color_1(f
, pcolor
, encounter
);
1709 d2::pixel channel_occ
= pexp(-colordiff
* colordiff
);
1711 ale_accum occ
= channel_occ
[0];
1713 for (int k
= 1; k
< 3; k
++)
1714 if (channel_occ
[k
] < occ
)
1715 occ
= channel_occ
[k
];
1717 sn
->accumulate_occupancy_1(f
, occ
, encounter
[0]);
1723 * Run a single iteration of the spatial_info update cycle.
1725 static void spatial_info_update() {
1727 * Iterate through each frame.
1729 for (unsigned int f
= 0; f
< d2::image_rw::count(); f
++) {
1731 ui::get()->d3_occupancy_status(f
);
1734 * Open the frame and transformation.
1737 if (tc_multiplier
== 0)
1741 * Allocate weights data structure for storing encounter
1745 ref_weights
*weights
= new ref_weights(f
);
1748 * Call subspace_info_update for the root space.
1751 subspace_info_update(space::iterate(al
->get(f
)->origin()), f
, weights
);
1760 * Close the frame and transformation.
1763 if (tc_multiplier
== 0)
1768 * Update all spatial_info structures.
1770 for (spatial_info_map_t::iterator i
= spatial_info_map
.begin(); i
!= spatial_info_map
.end(); i
++) {
1771 i
->second
.update_color();
1772 i
->second
.update_occupancy();
1774 // d2::pixel color = i->second.get_color();
1776 // fprintf(stderr, "space p=%p updated to c=[%f %f %f] o=%f\n",
1777 // i->first, color[0], color[1], color[2],
1778 // i->second.get_occupancy());
1783 * Support function for view() and depth(). This function
1784 * always performs exclusion.
1787 static const void view_recurse(int type
, d2::image
*im
, d2::image
*weights
, space::iterate si
, pt _pt
,
1788 int prune
= 0, d2::point pl
= d2::point(0, 0), d2::point ph
= d2::point(0, 0)) {
1789 while (!si
.done()) {
1790 space::traverse st
= si
.get();
1793 * Remove excluded regions.
1805 if (prune
&& !_pt
.check_inclusion_scaled(st
, pl
, ph
)) {
1811 * XXX: This could be more efficient, perhaps.
1814 if (spatial_info_map
.count(st
.get_node()) == 0) {
1819 ui::get()->d3_increment_space_num();
1821 spatial_info sn
= spatial_info_map
[st
.get_node()];
1824 * Get information on the subspace.
1827 d2::pixel color
= sn
.get_color();
1828 // d2::pixel color = d2::pixel(1, 1, 1) * (double) (((unsigned int) (st.get_node()) / sizeof(space)) % 65535);
1829 ale_real occupancy
= sn
.get_occupancy();
1832 * Determine the view-local bounding box for the
1838 _pt
.get_view_local_bb_scaled(st
, bb
);
1847 || max
[1] < pl
[1]) {
1868 * Data structure to check modification of weights by
1869 * higher-resolution subspaces.
1872 std::queue
<d2::pixel
> weight_queue
;
1875 * Check for higher resolution subspaces, and
1876 * update the space iterator.
1879 if (st
.get_node()->positive
1880 || st
.get_node()->negative
) {
1883 * Store information about current weights,
1884 * so we will know which areas have been
1885 * covered by higher-resolution subspaces.
1888 for (int i
= (int) ceil(min
[0]); i
<= (int) floor(max
[0]); i
++)
1889 for (int j
= (int) ceil(min
[1]); j
<= (int) floor(max
[1]); j
++)
1890 weight_queue
.push(weights
->get_pixel(i
, j
));
1893 * Cleave space for the higher-resolution pass,
1894 * skipping the current space, since we will
1895 * process that afterward.
1898 space::iterate cleaved_space
= si
.cleave();
1900 cleaved_space
.next();
1902 view_recurse(type
, im
, weights
, cleaved_space
, _pt
, prune
, pl
, ph
);
1910 * Iterate over pixels in the bounding box, finding
1911 * pixels that intersect the subspace. XXX: assume
1912 * for now that all pixels in the bounding box
1913 * intersect the subspace.
1916 for (int i
= (int) ceil(min
[0]); i
<= (int) floor(max
[0]); i
++)
1917 for (int j
= (int) ceil(min
[1]); j
<= (int) floor(max
[1]); j
++) {
1920 * Check for higher-resolution updates.
1923 if (weight_queue
.size()) {
1924 if (weight_queue
.front() != weights
->get_pixel(i
, j
)) {
1932 * Determine the probability of encounter.
1935 d2::pixel encounter
= (d2::pixel(1, 1, 1)
1936 - weights
->get_pixel(i
, j
))
1949 weights
->pix(i
, j
) += encounter
;
1950 im
->pix(i
, j
) += encounter
* color
;
1952 } else if (type
== 1) {
1955 * Weighted (transparent) depth display
1958 ale_pos depth_value
= _pt
.wp_scaled(st
.get_min())[2];
1959 weights
->pix(i
, j
) += encounter
;
1960 im
->pix(i
, j
) += encounter
* depth_value
;
1962 } else if (type
== 2) {
1965 * Ambiguity (ambivalence) measure.
1968 weights
->pix(i
, j
) = d2::pixel(1, 1, 1);
1969 im
->pix(i
, j
) += 0.1 * d2::pixel(1, 1, 1);
1971 } else if (type
== 3) {
1974 * Closeness measure.
1977 ale_pos depth_value
= _pt
.wp_scaled(st
.get_min())[2];
1978 if (weights
->pix(i
, j
)[0] == 0) {
1979 weights
->pix(i
, j
) = d2::pixel(1, 1, 1);
1980 im
->pix(i
, j
) = d2::pixel(1, 1, 1) * depth_value
;
1981 } else if (im
->pix(i
, j
)[2] < depth_value
) {
1982 im
->pix(i
, j
) = d2::pixel(1, 1, 1) * depth_value
;
1987 } else if (type
== 4) {
1990 * Weighted (transparent) contribution display
1993 ale_pos contribution_value
= sn
.get_pocc_density() /* + sn.get_socc_density() */;
1994 weights
->pix(i
, j
) += encounter
;
1995 im
->pix(i
, j
) += encounter
* contribution_value
;
1997 assert (finite(encounter
[0]));
1998 assert (finite(contribution_value
));
2000 } else if (type
== 5) {
2003 * Weighted (transparent) occupancy display
2006 ale_pos contribution_value
= occupancy
;
2007 weights
->pix(i
, j
) += encounter
;
2008 im
->pix(i
, j
) += encounter
* contribution_value
;
2010 } else if (type
== 6) {
2013 * (Depth, xres, yres) triple
2016 ale_pos depth_value
= _pt
.wp_scaled(st
.get_min())[2];
2017 weights
->pix(i
, j
)[0] += encounter
[0];
2018 if (weights
->pix(i
, j
)[1] < encounter
[0]) {
2019 weights
->pix(i
, j
)[1] = encounter
[0];
2020 im
->pix(i
, j
)[0] = weights
->pix(i
, j
)[1] * depth_value
;
2021 im
->pix(i
, j
)[1] = max
[0] - min
[0];
2022 im
->pix(i
, j
)[2] = max
[1] - min
[1];
2025 } else if (type
== 7) {
2028 * (xoff, yoff, 0) triple
2031 weights
->pix(i
, j
)[0] += encounter
[0];
2032 if (weights
->pix(i
, j
)[1] < encounter
[0]) {
2033 weights
->pix(i
, j
)[1] = encounter
[0];
2034 im
->pix(i
, j
)[0] = i
- min
[0];
2035 im
->pix(i
, j
)[1] = j
- min
[1];
2036 im
->pix(i
, j
)[2] = 0;
2039 } else if (type
== 8) {
2042 * Value = 1 for any intersected space.
2045 weights
->pix(i
, j
) = d2::pixel(1, 1, 1);
2046 im
->pix(i
, j
) = d2::pixel(1, 1, 1);
2048 } else if (type
== 9) {
2051 * Number of contributions for the nearest space.
2054 if (weights
->pix(i
, j
)[0] == 1)
2057 weights
->pix(i
, j
) = d2::pixel(1, 1, 1);
2058 im
->pix(i
, j
) = d2::pixel(1, 1, 1) * (sn
.get_pocc_density() * 0.1);
2067 * Generate an depth image from a specified view.
2069 static const d2::image
*depth(pt _pt
, int n
= -1, int prune
= 0,
2070 d2::point pl
= d2::point(0, 0), d2::point ph
= d2::point(0, 0)) {
2071 assert ((unsigned int) n
< d2::image_rw::count() || n
< 0);
2073 _pt
.view_angle(_pt
.view_angle() * VIEW_ANGLE_MULTIPLIER
);
2076 assert((int) floor(d2::align::of(n
).scaled_height())
2077 == (int) floor(_pt
.scaled_height()));
2078 assert((int) floor(d2::align::of(n
).scaled_width())
2079 == (int) floor(_pt
.scaled_width()));
2082 d2::image
*im1
, *im2
, *im3
, *weights
;;
2086 im1
= new d2::image_ale_real((int) floor(ph
[0] - pl
[0]) + 1,
2087 (int) floor(ph
[1] - pl
[1]) + 1, 3);
2089 im2
= new d2::image_ale_real((int) floor(ph
[0] - pl
[0]) + 1,
2090 (int) floor(ph
[1] - pl
[1]) + 1, 3);
2092 im3
= new d2::image_ale_real((int) floor(ph
[0] - pl
[0]) + 1,
2093 (int) floor(ph
[1] - pl
[1]) + 1, 3);
2095 weights
= new d2::image_ale_real((int) floor(ph
[0] - pl
[0]) + 1,
2096 (int) floor(ph
[1] - pl
[1]) + 1, 3);
2100 im1
= new d2::image_ale_real((int) floor(_pt
.scaled_height()),
2101 (int) floor(_pt
.scaled_width()), 3);
2103 im2
= new d2::image_ale_real((int) floor(_pt
.scaled_height()),
2104 (int) floor(_pt
.scaled_width()), 3);
2106 im3
= new d2::image_ale_real((int) floor(_pt
.scaled_height()),
2107 (int) floor(_pt
.scaled_width()), 3);
2109 weights
= new d2::image_ale_real((int) floor(_pt
.scaled_height()),
2110 (int) floor(_pt
.scaled_width()), 3);
2114 * Iterate through subspaces.
2117 space::iterate
si(_pt
.origin());
2119 view_recurse(6, im1
, weights
, si
, _pt
, prune
, pl
, ph
);
2124 weights
= new d2::image_ale_real((int) floor(ph
[0] - pl
[0]) + 1,
2125 (int) floor(ph
[1] - pl
[1]) + 1, 3);
2127 weights
= new d2::image_ale_real((int) floor(_pt
.scaled_height()),
2128 (int) floor(_pt
.scaled_width()), 3);
2132 view_recurse(7, im2
, weights
, si
, _pt
, prune
, pl
, ph
);
2134 view_recurse(8, im2
, weights
, si
, _pt
, prune
, pl
, ph
);
2139 * Normalize depths by weights
2142 if (normalize_weights
)
2143 for (unsigned int i
= 0; i
< im1
->height(); i
++)
2144 for (unsigned int j
= 0; j
< im1
->width(); j
++)
2145 im1
->pix(i
, j
)[0] /= weights
->pix(i
, j
)[1];
2148 for (unsigned int i
= 0; i
< im1
->height(); i
++)
2149 for (unsigned int j
= 0; j
< im1
->width(); j
++) {
2152 * Handle interpolation.
2157 d2::point
res(im1
->pix(i
, j
)[1], im1
->pix(i
, j
)[2]);
2159 for (int d
= 0; d
< 2; d
++) {
2161 if (im2
->pix(i
, j
)[d
] < res
[d
] / 2)
2162 x
[d
] = (ale_pos
) (d
?j
:i
) - res
[d
] / 2 - im2
->pix(i
, j
)[d
];
2164 x
[d
] = (ale_pos
) (d
?j
:i
) + res
[d
] / 2 - im2
->pix(i
, j
)[d
];
2166 blx
[d
] = 1 - ((d
?j
:i
) - x
[d
]) / res
[d
];
2169 ale_real depth_val
= 0;
2170 ale_real depth_weight
= 0;
2172 for (int ii
= 0; ii
< 2; ii
++)
2173 for (int jj
= 0; jj
< 2; jj
++) {
2174 d2::point p
= x
+ d2::point(ii
, jj
) * res
;
2175 if (im1
->in_bounds(p
)) {
2177 ale_real d
= im1
->get_bl(p
)[0];
2182 ale_real w
= ((ii
? (1 - blx
[0]) : blx
[0]) * (jj
? (1 - blx
[1]) : blx
[1]));
2188 ale_real depth
= depth_val
/ depth_weight
;
2191 * Handle encounter thresholds
2194 if (weights
->pix(i
, j
)[0] < encounter_threshold
) {
2195 im3
->pix(i
, j
) = d2::pixel::zero() / d2::pixel::zero();
2197 im3
->pix(i
, j
) = d2::pixel(1, 1, 1) * depth
;
2208 static const d2::image
*depth(unsigned int n
) {
2210 assert (n
< d2::image_rw::count());
2212 pt _pt
= align::projective(n
);
2214 return depth(_pt
, n
);
2219 * This function always performs exclusion.
2222 static space::node
*most_visible_pointwise(d2::pixel
*weight
, space::iterate si
, pt _pt
, d2::point p
) {
2224 space::node
*result
= NULL
;
2226 while (!si
.done()) {
2227 space::traverse st
= si
.get();
2230 * Prune certain regions known to be uninteresting.
2233 if (excluded(st
) || !_pt
.check_inclusion_scaled(st
, p
)) {
2239 * XXX: This could be more efficient, perhaps.
2242 if (spatial_info_map
.count(st
.get_node()) == 0) {
2247 spatial_info sn
= spatial_info_map
[st
.get_node()];
2250 * Get information on the subspace.
2253 ale_real occupancy
= sn
.get_occupancy();
2256 * Preserve current weight in order to check for
2257 * modification by higher-resolution subspaces.
2260 d2::pixel old_weight
= *weight
;
2263 * Check for higher resolution subspaces, and
2264 * update the space iterator.
2267 if (st
.get_node()->positive
2268 || st
.get_node()->negative
) {
2271 * Cleave space for the higher-resolution pass,
2272 * skipping the current space, since we will
2273 * process that afterward.
2276 space::iterate cleaved_space
= si
.cleave();
2278 cleaved_space
.next();
2280 space::node
*r
= most_visible_pointwise(weight
, cleaved_space
, _pt
, p
);
2282 if (old_weight
[1] != (*weight
)[1])
2291 * Check for higher-resolution updates.
2294 if (old_weight
!= *weight
)
2298 * Determine the probability of encounter.
2301 ale_pos encounter
= (1 - (*weight
)[0]) * occupancy
;
2304 * (*weight)[0] stores the cumulative weight; (*weight)[1] stores the maximum.
2307 if (encounter
> (*weight
)[1]) {
2308 result
= st
.get_node();
2309 (*weight
)[1] = encounter
;
2312 (*weight
)[0] += encounter
;
2319 * This function performs exclusion iff SCALED is true.
2321 static void most_visible_generic(std::vector
<space::node
*> &results
, d2::image
*weights
,
2322 space::iterate si
, pt _pt
, int scaled
) {
2324 assert (results
.size() == weights
->height() * weights
->width());
2326 while (!si
.done()) {
2327 space::traverse st
= si
.get();
2329 if (scaled
&& excluded(st
)) {
2335 * XXX: This could be more efficient, perhaps.
2338 if (spatial_info_map
.count(st
.get_node()) == 0) {
2343 spatial_info sn
= spatial_info_map
[st
.get_node()];
2346 * Get information on the subspace.
2349 ale_real occupancy
= sn
.get_occupancy();
2352 * Determine the view-local bounding box for the
2358 _pt
.get_view_local_bb_scaled(st
, bb
);
2364 * Data structure to check modification of weights by
2365 * higher-resolution subspaces.
2368 std::queue
<d2::pixel
> weight_queue
;
2371 * Check for higher resolution subspaces, and
2372 * update the space iterator.
2375 if (st
.get_node()->positive
2376 || st
.get_node()->negative
) {
2379 * Store information about current weights,
2380 * so we will know which areas have been
2381 * covered by higher-resolution subspaces.
2384 for (int i
= (int) ceil(min
[0]); i
<= (int) floor(max
[0]); i
++)
2385 for (int j
= (int) ceil(min
[1]); j
<= (int) floor(max
[1]); j
++)
2386 weight_queue
.push(weights
->get_pixel(i
, j
));
2389 * Cleave space for the higher-resolution pass,
2390 * skipping the current space, since we will
2391 * process that afterward.
2394 space::iterate cleaved_space
= si
.cleave();
2396 cleaved_space
.next();
2398 most_visible_generic(results
, weights
, cleaved_space
, _pt
, scaled
);
2406 * Iterate over pixels in the bounding box, finding
2407 * pixels that intersect the subspace. XXX: assume
2408 * for now that all pixels in the bounding box
2409 * intersect the subspace.
2412 for (int i
= (int) ceil(min
[0]); i
<= (int) floor(max
[0]); i
++)
2413 for (int j
= (int) ceil(min
[1]); j
<= (int) floor(max
[1]); j
++) {
2416 * Check for higher-resolution updates.
2419 if (weight_queue
.size()) {
2420 if (weight_queue
.front() != weights
->get_pixel(i
, j
)) {
2428 * Determine the probability of encounter.
2431 ale_pos encounter
= (1 - weights
->get_pixel(i
, j
)[0]) * occupancy
;
2434 * weights[0] stores the cumulative weight; weights[1] stores the maximum.
2437 if (encounter
> weights
->get_pixel(i
, j
)[1]
2438 || results
[i
* weights
->width() + j
] == NULL
) {
2439 results
[i
* weights
->width() + j
] = st
.get_node();
2440 weights
->chan(i
, j
, 1) = encounter
;
2443 weights
->chan(i
, j
, 0) += encounter
;
2448 static std::vector
<space::node
*> most_visible_scaled(pt _pt
) {
2449 d2::image
*weights
= new d2::image_ale_real((int) floor(_pt
.scaled_height()),
2450 (int) floor(_pt
.scaled_width()), 3);
2451 std::vector
<space::node
*> results
;
2453 results
.resize(weights
->height() * weights
->width(), 0);
2455 most_visible_generic(results
, weights
, space::iterate(_pt
.origin()), _pt
, 1);
2460 static std::vector
<space::node
*> most_visible_unscaled(pt _pt
) {
2461 d2::image
*weights
= new d2::image_ale_real((int) floor(_pt
.unscaled_height()),
2462 (int) floor(_pt
.unscaled_width()), 3);
2463 std::vector
<space::node
*> results
;
2465 results
.resize(weights
->height() * weights
->width(), 0);
2467 most_visible_generic(results
, weights
, space::iterate(_pt
.origin()), _pt
, 0);
2472 static const int visibility_search(const std::vector
<space::node
*> &fmv
, space::node
*mv
) {
2477 if (std::binary_search(fmv
.begin(), fmv
.end(), mv
))
2480 return (visibility_search(fmv
, mv
->positive
)
2481 || visibility_search(fmv
, mv
->negative
));
2486 * Class to generate focal sample views.
2489 class view_generator
{
2492 * Original projective transformation.
2498 * Data type for shared view data.
2503 std::vector
<space::node
*> mv
;
2505 d2::image
*color_weights
;
2506 const d2::image
*_depth
;
2507 d2::image
*median_depth
;
2508 d2::image
*median_diff
;
2511 shared_view(pt _pt
) {
2514 color_weights
= NULL
;
2516 median_depth
= NULL
;
2520 shared_view(const shared_view
©_origin
) {
2521 _pt
= copy_origin
._pt
;
2522 mv
= copy_origin
.mv
;
2524 color_weights
= NULL
;
2526 median_depth
= NULL
;
2533 delete color_weights
;
2535 delete median_depth
;
2538 void get_view_recurse(d2::image
*data
, d2::image
*weights
, int type
) {
2540 * Iterate through subspaces.
2543 space::iterate
si(_pt
.origin());
2545 ui::get()->d3_render_status(0, 0, -1, -1, -1, -1, 0);
2547 view_recurse(type
, data
, weights
, si
, _pt
);
2551 color
= new d2::image_ale_real((int) floor(_pt
.scaled_height()),
2552 (int) floor(_pt
.scaled_width()), 3);
2554 color_weights
= new d2::image_ale_real((int) floor(_pt
.scaled_height()),
2555 (int) floor(_pt
.scaled_width()), 3);
2557 get_view_recurse(color
, color_weights
, 0);
2561 _depth
= depth(_pt
, -1);
2564 void init_medians() {
2570 median_diff
= _depth
->fcdiff_median((int) floor(diff_median_radius
));
2571 median_depth
= _depth
->medians((int) floor(depth_median_radius
));
2573 assert(median_diff
);
2574 assert(median_depth
);
2582 space::node
*get_most_visible(unsigned int i
, unsigned int j
) {
2583 unsigned int height
= (int) floor(_pt
.scaled_height());
2584 unsigned int width
= (int) floor(_pt
.scaled_width());
2586 if (i
< 0 || i
>= height
2587 || j
< 0 || j
>= width
) {
2591 if (mv
.size() == 0) {
2592 mv
= most_visible_scaled(_pt
);
2595 assert (mv
.size() > i
* width
+ j
);
2597 return mv
[i
* width
+ j
];
2600 space::node
*get_most_visible(d2::point p
) {
2601 unsigned int i
= (unsigned int) round (p
[0]);
2602 unsigned int j
= (unsigned int) round (p
[1]);
2604 return get_most_visible(i
, j
);
2607 d2::pixel
get_color(unsigned int i
, unsigned int j
) {
2608 if (color
== NULL
) {
2612 assert (color
!= NULL
);
2614 return color
->get_pixel(i
, j
);
2617 d2::pixel
get_depth(unsigned int i
, unsigned int j
) {
2618 if (_depth
== NULL
) {
2622 assert (_depth
!= NULL
);
2624 return _depth
->get_pixel(i
, j
);
2627 void get_median_depth_and_diff(d2::pixel
*t
, d2::pixel
*f
, unsigned int i
, unsigned int j
) {
2628 if (median_depth
== NULL
&& median_diff
== NULL
)
2631 assert (median_depth
&& median_diff
);
2633 if (i
< 0 || i
>= median_depth
->height()
2634 || j
< 0 || j
>= median_depth
->width()) {
2635 *t
= d2::pixel::undefined();
2636 *f
= d2::pixel::undefined();
2638 *t
= median_depth
->get_pixel(i
, j
);
2639 *f
= median_diff
->get_pixel(i
, j
);
2643 void get_color_and_weight(d2::pixel
*c
, d2::pixel
*w
, d2::point p
) {
2644 if (color
== NULL
) {
2648 assert (color
!= NULL
);
2650 if (!color
->in_bounds(p
)) {
2651 *c
= d2::pixel::undefined();
2652 *w
= d2::pixel::undefined();
2654 *c
= color
->get_bl(p
);
2655 *w
= color_weights
->get_bl(p
);
2659 d2::pixel
get_depth(d2::point p
) {
2660 if (_depth
== NULL
) {
2664 assert (_depth
!= NULL
);
2666 if (!_depth
->in_bounds(p
)) {
2667 return d2::pixel::undefined();
2670 return _depth
->get_bl(p
);
2673 void get_median_depth_and_diff(d2::pixel
*t
, d2::pixel
*f
, d2::point p
) {
2674 if (median_diff
== NULL
&& median_depth
== NULL
)
2677 assert (median_diff
!= NULL
&& median_depth
!= NULL
);
2679 if (!median_diff
->in_bounds(p
)) {
2680 *t
= d2::pixel::undefined();
2681 *f
= d2::pixel::undefined();
2683 *t
= median_depth
->get_bl(p
);
2684 *f
= median_diff
->get_bl(p
);
2691 * Shared view array, indexed by aperture diameter and view number.
2694 std::map
<ale_pos
, std::vector
<shared_view
> > aperture_to_shared_views_map
;
2697 * Method to generate a new stochastic focal view.
2700 pt
get_new_view(ale_pos aperture
) {
2702 ale_pos ofx
= aperture
;
2703 ale_pos ofy
= aperture
;
2705 while (ofx
* ofx
+ ofy
* ofy
> aperture
* aperture
/ 4) {
2706 ofx
= (rand() * aperture
) / RAND_MAX
- aperture
/ 2;
2707 ofy
= (rand() * aperture
) / RAND_MAX
- aperture
/ 2;
2711 * Generate a new view from the given offset.
2714 point new_view
= original_pt
.cw(point(ofx
, ofy
, 0));
2715 pt _pt_new
= original_pt
;
2716 for (int d
= 0; d
< 3; d
++)
2717 _pt_new
.e().set_translation(d
, -new_view
[d
]);
2734 view(shared_view
*sv
, pt _pt
= pt()) {
2737 this->_pt
= sv
->get_pt();
2747 space::node
*get_most_visible(unsigned int i
, unsigned int j
) {
2749 return sv
->get_most_visible(i
, j
);
2752 space::node
*get_most_visible(d2::point p
) {
2754 return sv
->get_most_visible(p
);
2757 d2::pixel
weight(0, 0, 0);
2759 return most_visible_pointwise(&weight
, space::iterate(_pt
.origin()), _pt
, p
);
2763 d2::pixel
get_color(unsigned int i
, unsigned int j
) {
2764 return sv
->get_color(i
, j
);
2767 void get_color_and_weight(d2::pixel
*color
, d2::pixel
*weight
, d2::point p
) {
2769 sv
->get_color_and_weight(color
, weight
, p
);
2774 * Determine weight and color for the given point.
2777 d2::image
*im_point
= new d2::image_ale_real(1, 1, 3);
2778 d2::image
*wt_point
= new d2::image_ale_real(1, 1, 3);
2780 view_recurse(0, im_point
, wt_point
, space::iterate(_pt
.origin()), _pt
, 1, p
, p
);
2782 *color
= im_point
->pix(0, 0);
2783 *weight
= wt_point
->pix(0, 0);
2791 d2::pixel
get_depth(unsigned int i
, unsigned int j
) {
2793 return sv
->get_depth(i
, j
);
2796 void get_median_depth_and_diff(d2::pixel
*depth
, d2::pixel
*diff
, unsigned int i
, unsigned int j
) {
2798 sv
->get_median_depth_and_diff(depth
, diff
, i
, j
);
2801 void get_median_depth_and_diff(d2::pixel
*_depth
, d2::pixel
*_diff
, d2::point p
) {
2803 sv
->get_median_depth_and_diff(_depth
, _diff
, p
);
2808 * Generate a local depth image of required radius.
2813 if (diff_median_radius
+ 1 > radius
)
2814 radius
= diff_median_radius
+ 1;
2815 if (depth_median_radius
> radius
)
2816 radius
= depth_median_radius
;
2818 d2::point pl
= p
- d2::point(radius
, radius
);
2819 d2::point ph
= p
+ d2::point(radius
, radius
);
2820 const d2::image
*local_depth
= depth(_pt
, -1, 1, pl
, ph
);
2823 * Find depth and diff at this point, check for
2824 * undefined values, and generate projections
2825 * of the image corners on the estimated normal
2829 d2::image
*median_diffs
= local_depth
->fcdiff_median((int) floor(diff_median_radius
));
2830 d2::image
*median_depths
= local_depth
->medians((int) floor(depth_median_radius
));
2832 *_depth
= median_depths
->pix((int) radius
, (int) radius
);
2833 *_diff
= median_diffs
->pix((int) radius
, (int) radius
);
2835 delete median_diffs
;
2836 delete median_depths
;
2841 view
get_view(ale_pos aperture
, unsigned index
, unsigned int randomization
) {
2842 if (randomization
== 0) {
2844 while (aperture_to_shared_views_map
[aperture
].size() <= index
) {
2845 aperture_to_shared_views_map
[aperture
].push_back(shared_view(get_new_view(aperture
)));
2848 return view(&(aperture_to_shared_views_map
[aperture
][index
]));
2851 return view(NULL
, get_new_view(aperture
));
2854 view_generator(pt original_pt
) {
2855 this->original_pt
= original_pt
;
2860 * Unfiltered function
2862 static const d2::image
*view_nofilter_focus(pt _pt
, int n
) {
2864 assert ((unsigned int) n
< d2::image_rw::count() || n
< 0);
2867 assert((int) floor(d2::align::of(n
).scaled_height())
2868 == (int) floor(_pt
.scaled_height()));
2869 assert((int) floor(d2::align::of(n
).scaled_width())
2870 == (int) floor(_pt
.scaled_width()));
2873 const d2::image
*depths
= depth(_pt
, n
);
2875 d2::image
*im
= new d2::image_ale_real((int) floor(_pt
.scaled_height()),
2876 (int) floor(_pt
.scaled_width()), 3);
2878 _pt
.view_angle(_pt
.view_angle() * VIEW_ANGLE_MULTIPLIER
);
2880 view_generator
vg(_pt
);
2882 for (unsigned int i
= 0; i
< im
->height(); i
++)
2883 for (unsigned int j
= 0; j
< im
->width(); j
++) {
2885 focus::result _focus
= focus::get(depths
, i
, j
);
2887 if (!finite(_focus
.focal_distance
))
2891 * Data structures for calculating focal statistics.
2894 d2::pixel color
, weight
;
2895 d2::image_weighted_median
*iwm
= NULL
;
2897 if (_focus
.statistic
== 1) {
2898 iwm
= new d2::image_weighted_median(1, 1, 3, _focus
.sample_count
);
2902 * Iterate over views for this focus region.
2905 for (unsigned int v
= 0; v
< _focus
.sample_count
; v
++) {
2907 view_generator::view vw
= vg
.get_view(_focus
.aperture
, v
, _focus
.randomization
);
2909 ui::get()->d3_render_status(0, 1, -1, v
, i
, j
, -1);
2913 * Map the focused point to the new view.
2916 point p
= vw
.get_pt().wp_scaled(_pt
.pw_scaled(point(i
, j
, _focus
.focal_distance
)));
2919 * Determine weight and color for the given point.
2922 d2::pixel view_weight
, view_color
;
2924 vw
.get_color_and_weight(&view_color
, &view_weight
, p
.xy());
2926 if (!color
.finite() || !weight
.finite())
2929 if (_focus
.statistic
== 0) {
2930 color
+= view_color
;
2931 weight
+= view_weight
;
2932 } else if (_focus
.statistic
== 1) {
2933 iwm
->accumulate(0, 0, v
, view_color
, view_weight
);
2938 if (_focus
.statistic
== 1) {
2939 weight
= iwm
->get_weights()->get_pixel(0, 0);
2940 color
= iwm
->get_pixel(0, 0);
2944 if (weight
.min_norm() < encounter_threshold
) {
2945 im
->pix(i
, j
) = d2::pixel::zero() / d2::pixel::zero();
2946 } else if (normalize_weights
)
2947 im
->pix(i
, j
) = color
/ weight
;
2949 im
->pix(i
, j
) = color
;
2958 * Unfiltered function
2960 static const d2::image
*view_nofilter(pt _pt
, int n
) {
2962 if (!focus::is_trivial())
2963 return view_nofilter_focus(_pt
, n
);
2965 assert ((unsigned int) n
< d2::image_rw::count() || n
< 0);
2968 assert((int) floor(d2::align::of(n
).scaled_height())
2969 == (int) floor(_pt
.scaled_height()));
2970 assert((int) floor(d2::align::of(n
).scaled_width())
2971 == (int) floor(_pt
.scaled_width()));
2974 const d2::image
*depths
= depth(_pt
, n
);
2976 d2::image
*im
= new d2::image_ale_real((int) floor(_pt
.scaled_height()),
2977 (int) floor(_pt
.scaled_width()), 3);
2979 _pt
.view_angle(_pt
.view_angle() * VIEW_ANGLE_MULTIPLIER
);
2982 * Use adaptive subspace data.
2985 d2::image
*weights
= new d2::image_ale_real((int) floor(_pt
.scaled_height()),
2986 (int) floor(_pt
.scaled_width()), 3);
2989 * Iterate through subspaces.
2992 space::iterate
si(_pt
.origin());
2994 ui::get()->d3_render_status(0, 0, -1, -1, -1, -1, 0);
2996 view_recurse(0, im
, weights
, si
, _pt
);
2998 for (unsigned int i
= 0; i
< im
->height(); i
++)
2999 for (unsigned int j
= 0; j
< im
->width(); j
++) {
3000 if (weights
->pix(i
, j
).min_norm() < encounter_threshold
3001 || (d3px_count
> 0 && isnan(depths
->pix(i
, j
)[0]))) {
3002 im
->pix(i
, j
) = d2::pixel::zero() / d2::pixel::zero();
3003 weights
->pix(i
, j
) = d2::pixel::zero();
3004 } else if (normalize_weights
)
3005 im
->pix(i
, j
) /= weights
->pix(i
, j
);
3016 * Filtered function.
3018 static const d2::image
*view_filter_focus(pt _pt
, int n
) {
3020 assert ((unsigned int) n
< d2::image_rw::count() || n
< 0);
3023 * Get depth image for focus region determination.
3026 const d2::image
*depths
= depth(_pt
, n
);
3028 unsigned int height
= (unsigned int) floor(_pt
.scaled_height());
3029 unsigned int width
= (unsigned int) floor(_pt
.scaled_width());
3032 * Prepare input frame data.
3035 if (tc_multiplier
== 0)
3038 pt
*_ptf
= new pt
[al
->count()];
3039 std::vector
<space::node
*> *fmv
= new std::vector
<space::node
*>[al
->count()];
3041 for (unsigned int f
= 0; f
< al
->count(); f
++) {
3042 _ptf
[f
] = al
->get(f
)->get_t(0);
3043 fmv
[f
] = most_visible_unscaled(_ptf
[f
]);
3044 std::sort(fmv
[f
].begin(), fmv
[f
].end());
3047 if (tc_multiplier
== 0)
3051 * Open all files for rendering.
3054 d2::image_rw::open_all();
3057 * Prepare data structures for averaging views, as we render
3058 * each view separately. This is spacewise inefficient, but
3059 * is easy to implement given the current operation of the
3063 d2::image_weighted_avg
*iwa
;
3065 if (d3::focus::uses_medians()) {
3066 iwa
= new d2::image_weighted_median(height
, width
, 3, focus::max_samples());
3068 iwa
= new d2::image_weighted_simple(height
, width
, 3, new d2::invariant(NULL
));
3071 _pt
.view_angle(_pt
.view_angle() * VIEW_ANGLE_MULTIPLIER
);
3074 * Prepare view generator.
3077 view_generator
vg(_pt
);
3080 * Render views separately. This is spacewise inefficient,
3081 * but is easy to implement given the current operation of the
3085 for (unsigned int v
= 0; v
< focus::max_samples(); v
++) {
3088 * Generate a new 2D renderer for filtering.
3091 d2::render::reset();
3092 d2::render
*renderer
= d2::render_parse::get(d3chain_type
);
3094 renderer
->init_point_renderer(height
, width
, 3);
3097 * Iterate over output points.
3100 for (unsigned int i
= 0; i
< height
; i
++)
3101 for (unsigned int j
= 0; j
< width
; j
++) {
3103 focus::result _focus
= focus::get(depths
, i
, j
);
3105 if (v
>= _focus
.sample_count
)
3108 if (!finite(_focus
.focal_distance
))
3111 view_generator::view vw
= vg
.get_view(_focus
.aperture
, v
, _focus
.randomization
);
3113 pt _pt_new
= vw
.get_pt();
3115 point p
= _pt_new
.wp_scaled(_pt
.pw_scaled(point(i
, j
, _focus
.focal_distance
)));
3118 * Determine the most-visible subspace.
3121 space::node
*mv
= vw
.get_most_visible(p
.xy());
3127 * Get median depth and diff.
3130 d2::pixel depth
, diff
;
3132 vw
.get_median_depth_and_diff(&depth
, &diff
, p
.xy());
3134 if (!depth
.finite() || !diff
.finite())
3137 point local_points
[3] = {
3138 point(p
[0], p
[1], depth
[0]),
3139 point(p
[0] + 1, p
[1], depth
[0] + diff
[0]),
3140 point(p
[0], p
[1] + 1, depth
[0] + diff
[1])
3144 * Iterate over files.
3147 for (unsigned int f
= 0; f
< d2::image_rw::count(); f
++) {
3149 ui::get()->d3_render_status(1, 1, f
, v
, i
, j
, -1);
3151 if (!visibility_search(fmv
[f
], mv
))
3155 * Determine transformation at (i, j). First
3156 * determine transformation from the output to
3157 * the input, then invert this, as we need the
3158 * inverse transformation for filtering.
3161 d2::point remote_points
[3] = {
3162 _ptf
[f
].wp_unscaled(_pt_new
.pw_scaled(point(local_points
[0]))).xy(),
3163 _ptf
[f
].wp_unscaled(_pt_new
.pw_scaled(point(local_points
[1]))).xy(),
3164 _ptf
[f
].wp_unscaled(_pt_new
.pw_scaled(point(local_points
[2]))).xy()
3168 * Forward matrix for the linear component of the
3172 d2::point forward_matrix
[2] = {
3173 remote_points
[1] - remote_points
[0],
3174 remote_points
[2] - remote_points
[0]
3178 * Inverse matrix for the linear component of
3179 * the transformation. Calculate using the
3183 ale_pos D
= forward_matrix
[0][0] * forward_matrix
[1][1]
3184 - forward_matrix
[0][1] * forward_matrix
[1][0];
3189 d2::point inverse_matrix
[2] = {
3190 d2::point( forward_matrix
[1][1] / D
, -forward_matrix
[1][0] / D
),
3191 d2::point(-forward_matrix
[0][1] / D
, forward_matrix
[0][0] / D
)
3195 * Determine the projective transformation parameters for the
3196 * inverse transformation.
3199 const d2::image
*imf
= d2::image_rw::get_open(f
);
3201 d2::transformation inv_t
= d2::transformation::gpt_identity(imf
, 1);
3203 d2::point local_bounds
[4];
3205 for (int n
= 0; n
< 4; n
++) {
3206 d2::point remote_bound
= d2::point((n
== 1 || n
== 2) ? imf
->height() : 0,
3207 (n
== 2 || n
== 3) ? imf
->width() : 0)
3210 local_bounds
[n
] = d2::point(i
, j
)
3211 + d2::point(remote_bound
[0] * inverse_matrix
[0][0]
3212 + remote_bound
[1] * inverse_matrix
[1][0],
3213 remote_bound
[0] * inverse_matrix
[0][1]
3214 + remote_bound
[1] * inverse_matrix
[1][1]);
3218 if (!local_bounds
[0].finite()
3219 || !local_bounds
[1].finite()
3220 || !local_bounds
[2].finite()
3221 || !local_bounds
[3].finite())
3224 inv_t
.gpt_set(local_bounds
);
3227 * Perform render step for the given frame,
3228 * transformation, and point.
3231 renderer
->point_render(i
, j
, f
, inv_t
);
3235 renderer
->finish_point_rendering();
3237 const d2::image
*im
= renderer
->get_image();
3238 const d2::image
*df
= renderer
->get_defined();
3240 for (unsigned int i
= 0; i
< height
; i
++)
3241 for (unsigned int j
= 0; j
< width
; j
++) {
3242 if (df
->get_pixel(i
, j
).finite()
3243 && df
->get_pixel(i
, j
)[0] > 0)
3244 iwa
->accumulate(i
, j
, v
, im
->get_pixel(i
, j
), d2::pixel(1, 1, 1));
3249 * Close all files and return the result.
3252 d2::image_rw::close_all();
3257 static const d2::image
*view_filter(pt _pt
, int n
) {
3259 if (!focus::is_trivial())
3260 return view_filter_focus(_pt
, n
);
3262 assert ((unsigned int) n
< d2::image_rw::count() || n
< 0);
3265 * Generate a new 2D renderer for filtering.
3268 d2::render::reset();
3269 d2::render
*renderer
= d2::render_parse::get(d3chain_type
);
3272 * Get depth image in order to estimate normals (and hence
3276 const d2::image
*depths
= depth(_pt
, n
);
3278 d2::image
*median_diffs
= depths
->fcdiff_median((int) floor(diff_median_radius
));
3279 d2::image
*median_depths
= depths
->medians((int) floor(depth_median_radius
));
3281 unsigned int height
= (unsigned int) floor(_pt
.scaled_height());
3282 unsigned int width
= (unsigned int) floor(_pt
.scaled_width());
3284 renderer
->init_point_renderer(height
, width
, 3);
3286 _pt
.view_angle(_pt
.view_angle() * VIEW_ANGLE_MULTIPLIER
);
3288 std::vector
<space::node
*> mv
= most_visible_scaled(_pt
);
3290 for (unsigned int f
= 0; f
< d2::image_rw::count(); f
++) {
3292 if (tc_multiplier
== 0)
3295 pt _ptf
= al
->get(f
)->get_t(0);
3297 std::vector
<space::node
*> fmv
= most_visible_unscaled(_ptf
);
3298 std::sort(fmv
.begin(), fmv
.end());
3300 for (unsigned int i
= 0; i
< height
; i
++)
3301 for (unsigned int j
= 0; j
< width
; j
++) {
3303 ui::get()->d3_render_status(1, 0, f
, -1, i
, j
, -1);
3309 int n
= i
* width
+ j
;
3311 if (!visibility_search(fmv
, mv
[n
]))
3315 * Find depth and diff at this point, check for
3316 * undefined values, and generate projections
3317 * of the image corners on the estimated normal
3321 d2::pixel depth
= median_depths
->pix(i
, j
);
3322 d2::pixel diff
= median_diffs
->pix(i
, j
);
3323 // d2::pixel diff = d2::pixel(0, 0, 0);
3325 if (!depth
.finite() || !diff
.finite())
3328 point local_points
[3] = {
3329 point(i
, j
, depth
[0]),
3330 point(i
+ 1, j
, depth
[0] + diff
[0]),
3331 point(i
, j
+ 1, depth
[0] + diff
[1])
3335 * Determine transformation at (i, j). First
3336 * determine transformation from the output to
3337 * the input, then invert this, as we need the
3338 * inverse transformation for filtering.
3341 d2::point remote_points
[3] = {
3342 _ptf
.wp_unscaled(_pt
.pw_scaled(point(local_points
[0]))).xy(),
3343 _ptf
.wp_unscaled(_pt
.pw_scaled(point(local_points
[1]))).xy(),
3344 _ptf
.wp_unscaled(_pt
.pw_scaled(point(local_points
[2]))).xy()
3348 * Forward matrix for the linear component of the
3352 d2::point forward_matrix
[2] = {
3353 remote_points
[1] - remote_points
[0],
3354 remote_points
[2] - remote_points
[0]
3358 * Inverse matrix for the linear component of
3359 * the transformation. Calculate using the
3363 ale_pos D
= forward_matrix
[0][0] * forward_matrix
[1][1]
3364 - forward_matrix
[0][1] * forward_matrix
[1][0];
3369 d2::point inverse_matrix
[2] = {
3370 d2::point( forward_matrix
[1][1] / D
, -forward_matrix
[1][0] / D
),
3371 d2::point(-forward_matrix
[0][1] / D
, forward_matrix
[0][0] / D
)
3375 * Determine the projective transformation parameters for the
3376 * inverse transformation.
3379 const d2::image
*imf
= d2::image_rw::open(f
);
3381 d2::transformation inv_t
= d2::transformation::gpt_identity(imf
, 1);
3383 d2::point local_bounds
[4];
3385 for (int n
= 0; n
< 4; n
++) {
3386 d2::point remote_bound
= d2::point((n
== 1 || n
== 2) ? imf
->height() : 0,
3387 (n
== 2 || n
== 3) ? imf
->width() : 0)
3390 local_bounds
[n
] = local_points
[0].xy()
3391 + d2::point(remote_bound
[0] * inverse_matrix
[0][0]
3392 + remote_bound
[1] * inverse_matrix
[1][0],
3393 remote_bound
[0] * inverse_matrix
[0][1]
3394 + remote_bound
[1] * inverse_matrix
[1][1]);
3397 inv_t
.gpt_set(local_bounds
);
3399 d2::image_rw::close(f
);
3402 * Perform render step for the given frame,
3403 * transformation, and point.
3406 d2::image_rw::open(f
);
3407 renderer
->point_render(i
, j
, f
, inv_t
);
3408 d2::image_rw::close(f
);
3411 if (tc_multiplier
== 0)
3415 renderer
->finish_point_rendering();
3417 return renderer
->get_image();
3423 static const d2::image
*view(pt _pt
, int n
= -1) {
3425 assert ((unsigned int) n
< d2::image_rw::count() || n
< 0);
3428 return view_filter(_pt
, n
);
3430 return view_nofilter(_pt
, n
);
3434 static void tcem(double _tcem
) {
3435 tc_multiplier
= _tcem
;
3438 static void oui(unsigned int _oui
) {
3439 ou_iterations
= _oui
;
3442 static void pa(unsigned int _pa
) {
3443 pairwise_ambiguity
= _pa
;
3446 static void pc(const char *_pc
) {
3447 pairwise_comparisons
= _pc
;
3450 static void d3px(int _d3px_count
, double *_d3px_parameters
) {
3451 d3px_count
= _d3px_count
;
3452 d3px_parameters
= _d3px_parameters
;
3455 static void fx(double _fx
) {
3456 falloff_exponent
= _fx
;
3460 normalize_weights
= 1;
3463 static void no_nw() {
3464 normalize_weights
= 0;
3467 static void nofilter() {
3471 static void filter() {
3475 static void set_filter_type(const char *type
) {
3476 d3chain_type
= type
;
3479 static void set_subspace_traverse() {
3480 subspace_traverse
= 1;
3483 static int excluded(point p
) {
3484 for (int n
= 0; n
< d3px_count
; n
++) {
3485 double *region
= d3px_parameters
+ (6 * n
);
3486 if (p
[0] >= region
[0]
3487 && p
[0] <= region
[1]
3488 && p
[1] >= region
[2]
3489 && p
[1] <= region
[3]
3490 && p
[2] >= region
[4]
3491 && p
[2] <= region
[5])
3499 * This function returns true if a space is completely excluded.
3501 static int excluded(const space::traverse
&st
) {
3502 for (int n
= 0; n
< d3px_count
; n
++) {
3503 double *region
= d3px_parameters
+ (6 * n
);
3504 if (st
.get_min()[0] >= region
[0]
3505 && st
.get_max()[0] <= region
[1]
3506 && st
.get_min()[1] >= region
[2]
3507 && st
.get_max()[1] <= region
[3]
3508 && st
.get_min()[2] >= region
[4]
3509 && st
.get_max()[2] <= region
[5])
3516 static const d2::image
*view(unsigned int n
) {
3518 assert (n
< d2::image_rw::count());
3520 pt _pt
= align::projective(n
);
3522 return view(_pt
, n
);
3525 typedef struct {point iw
; point ip
, is
;} analytic
;
3526 typedef std::multimap
<ale_real
,analytic
> score_map
;
3527 typedef std::pair
<ale_real
,analytic
> score_map_element
;
3532 static std::vector
<pt
> make_pt_list(const char *d_out
[], const char *v_out
[],
3533 std::map
<const char *, pt
> *d3_depth_pt
,
3534 std::map
<const char *, pt
> *d3_output_pt
) {
3536 std::vector
<pt
> result
;
3538 for (unsigned int n
= 0; n
< d2::image_rw::count(); n
++) {
3539 if (d_out
[n
] || v_out
[n
]) {
3540 result
.push_back(align::projective(n
));
3544 for (std::map
<const char *, pt
>::iterator i
= d3_depth_pt
->begin(); i
!= d3_depth_pt
->end(); i
++) {
3545 result
.push_back(i
->second
);
3548 for (std::map
<const char *, pt
>::iterator i
= d3_output_pt
->begin(); i
!= d3_output_pt
->end(); i
++) {
3549 result
.push_back(i
->second
);
3556 * Get a trilinear coordinate for an anisotropic candidate cell.
3558 static ale_pos
get_trilinear_coordinate(point min
, point max
, pt _pt
) {
3560 d2::point local_min
, local_max
;
3562 local_min
= _pt
.wp_unscaled(min
).xy();
3563 local_max
= _pt
.wp_unscaled(min
).xy();
3565 point cell
[2] = {min
, max
};
3568 * Determine the view-local extrema in 2 dimensions.
3571 for (int r
= 1; r
< 8; r
++) {
3572 point local
= _pt
.wp_unscaled(point(cell
[r
>>2][0], cell
[(r
>>1)%2][1], cell
[r
%2][2]));
3574 for (int d
= 0; d
< 2; d
++) {
3575 if (local
[d
] < local_min
[d
])
3576 local_min
[d
] = local
[d
];
3577 if (local
[d
] > local_max
[d
])
3578 local_max
[d
] = local
[d
];
3579 if (isnan(local
[d
]))
3584 ale_pos diameter
= (local_max
- local_min
).norm();
3586 return log(diameter
/ sqrt(2)) / log(2);
3590 * Check whether a cell is visible from a given viewpoint. This function
3591 * is guaranteed to return 1 when a cell is visible, but it is not guaranteed
3592 * to return 0 when a cell is invisible.
3594 static int pt_might_be_visible(const pt
&viewpoint
, point min
, point max
) {
3596 int doc
= (rand() % 100000) ? 0 : 1;
3599 fprintf(stderr
, "checking visibility:\n");
3601 point cell
[2] = {min
, max
};
3604 * Cycle through all vertices of the cell to check certain
3607 int pos
[3] = {0, 0, 0};
3608 int neg
[3] = {0, 0, 0};
3609 for (int i
= 0; i
< 2; i
++)
3610 for (int j
= 0; j
< 2; j
++)
3611 for (int k
= 0; k
< 2; k
++) {
3612 point p
= viewpoint
.wp_unscaled(point(cell
[i
][0], cell
[j
][1], cell
[k
][2]));
3614 if (p
[2] < 0 && viewpoint
.unscaled_in_bounds(p
))
3623 for (int d
= 0; d
< 2; d
++)
3627 fprintf(stderr
, "\t[%f %f %f] --> [%f %f %f]\n",
3628 cell
[i
][0], cell
[j
][1], cell
[k
][2],
3631 for (int d
= 0; d
< 3; d
++)
3635 if (p
[0] <= viewpoint
.unscaled_height() - 1)
3638 if (p
[1] <= viewpoint
.unscaled_width() - 1)
3658 * Check whether a cell is output-visible.
3660 static int output_might_be_visible(const std::vector
<pt
> &pt_outputs
, point min
, point max
) {
3661 for (unsigned int n
= 0; n
< pt_outputs
.size(); n
++)
3662 if (pt_might_be_visible(pt_outputs
[n
], min
, max
))
3668 * Check whether a cell is input-visible.
3670 static int input_might_be_visible(unsigned int f
, point min
, point max
) {
3671 return pt_might_be_visible(align::projective(f
), min
, max
);
3675 * Return true if a cell fails an output resolution bound.
3677 static int fails_output_resolution_bound(point min
, point max
, const std::vector
<pt
> &pt_outputs
) {
3678 for (unsigned int n
= 0; n
< pt_outputs
.size(); n
++) {
3680 point p
= pt_outputs
[n
].centroid(min
, max
);
3685 if (get_trilinear_coordinate(min
, max
, pt_outputs
[n
]) < output_decimation_preferred
)
3693 * Check lower-bound resolution constraints
3695 static int exceeds_resolution_lower_bounds(unsigned int f1
, unsigned int f2
,
3696 point min
, point max
, const std::vector
<pt
> &pt_outputs
) {
3698 pt _pt
= al
->get(f1
)->get_t(0);
3700 if (get_trilinear_coordinate(min
, max
, _pt
) < input_decimation_lower
)
3703 if (fails_output_resolution_bound(min
, max
, pt_outputs
))
3706 if (get_trilinear_coordinate(min
, max
, _pt
) < primary_decimation_upper
)
3713 * Try the candidate nearest to the specified cell.
3715 static void try_nearest_candidate(unsigned int f1
, unsigned int f2
, candidates
*c
, point min
, point max
) {
3716 point centroid
= (max
+ min
) / 2;
3717 pt _pt
[2] = { al
->get(f1
)->get_t(0), al
->get(f2
)->get_t(0) };
3720 // fprintf(stderr, "[tnc n=%f %f %f x=%f %f %f]\n", min[0], min[1], min[2], max[0], max[1], max[2]);
3723 * Reject clipping plane violations.
3726 if (centroid
[2] > front_clip
3727 || centroid
[2] < rear_clip
)
3731 * Calculate projections.
3734 for (int n
= 0; n
< 2; n
++) {
3736 p
[n
] = _pt
[n
].wp_unscaled(centroid
);
3738 if (!_pt
[n
].unscaled_in_bounds(p
[n
]))
3741 // fprintf(stderr, ":");
3748 int tc
= (int) round(get_trilinear_coordinate(min
, max
, _pt
[0]));
3749 int stc
= (int) round(get_trilinear_coordinate(min
, max
, _pt
[1]));
3751 while (tc
< input_decimation_lower
|| stc
< input_decimation_lower
) {
3756 if (tc
> primary_decimation_upper
)
3760 * Calculate score from color match. Assume for now
3761 * that the transformation can be approximated locally
3762 * with a translation.
3766 ale_pos divisor
= 0;
3767 ale_pos l1_multiplier
= 0.125;
3768 lod_image
*if1
= al
->get(f1
);
3769 lod_image
*if2
= al
->get(f2
);
3771 if (if1
->in_bounds(p
[0].xy())
3772 && if2
->in_bounds(p
[1].xy())) {
3773 divisor
+= 1 - l1_multiplier
;
3774 score
+= (1 - l1_multiplier
)
3775 * (if1
->get_tl(p
[0].xy(), tc
) - if2
->get_tl(p
[1].xy(), stc
)).normsq();
3778 for (int iii
= -1; iii
<= 1; iii
++)
3779 for (int jjj
= -1; jjj
<= 1; jjj
++) {
3780 d2::point
t(iii
, jjj
);
3782 if (!if1
->in_bounds(p
[0].xy() + t
)
3783 || !if2
->in_bounds(p
[1].xy() + t
))
3786 divisor
+= l1_multiplier
;
3787 score
+= l1_multiplier
3788 * (if1
->get_tl(p
[0].xy() + t
, tc
) - if2
->get_tl(p
[1].xy() + t
, tc
)).normsq();
3793 * Include third-camera contributions in the score.
3796 if (tc_multiplier
!= 0)
3797 for (unsigned int n
= 0; n
< d2::image_rw::count(); n
++) {
3798 if (n
== f1
|| n
== f2
)
3801 lod_image
*ifn
= al
->get(n
);
3802 pt _ptn
= ifn
->get_t(0);
3803 point pn
= _ptn
.wp_unscaled(centroid
);
3805 if (!_ptn
.unscaled_in_bounds(pn
))
3811 ale_pos ttc
= get_trilinear_coordinate(min
, max
, _ptn
);
3813 divisor
+= tc_multiplier
;
3814 score
+= tc_multiplier
3815 * (if1
->get_tl(p
[0].xy(), tc
) - ifn
->get_tl(pn
.xy(), ttc
)).normsq();
3818 c
->add_candidate(p
[0], tc
, score
/ divisor
);
3822 * Check for cells that are completely clipped.
3824 static int completely_clipped(point min
, point max
) {
3825 return (min
[2] > front_clip
3826 || max
[2] < rear_clip
);
3830 * Update extremum variables for cell points mapped to a particular view.
3832 static void update_extrema(point min
, point max
, pt _pt
, int *extreme_dim
, ale_pos
*extreme_ratio
) {
3834 point local_min
, local_max
;
3836 local_min
= _pt
.wp_unscaled(min
);
3837 local_max
= _pt
.wp_unscaled(min
);
3839 point cell
[2] = {min
, max
};
3841 int near_vertex
= 0;
3844 * Determine the view-local extrema in all dimensions, and
3845 * determine the vertex of closest z coordinate.
3848 for (int r
= 1; r
< 8; r
++) {
3849 point local
= _pt
.wp_unscaled(point(cell
[r
>>2][0], cell
[(r
>>1)%2][1], cell
[r
%2][2]));
3851 for (int d
= 0; d
< 3; d
++) {
3852 if (local
[d
] < local_min
[d
])
3853 local_min
[d
] = local
[d
];
3854 if (local
[d
] > local_max
[d
])
3855 local_max
[d
] = local
[d
];
3858 if (local
[2] == local_max
[2])
3862 ale_pos diameter
= (local_max
.xy() - local_min
.xy()).norm();
3865 * Update extrema as necessary for each dimension.
3868 for (int d
= 0; d
< 3; d
++) {
3870 int r
= near_vertex
;
3872 int p1
[3] = {r
>>2, (r
>>1)%2, r
%2};
3873 int p2
[3] = {r
>>2, (r
>>1)%2, r
%2};
3877 ale_pos local_distance
= (_pt
.wp_unscaled(point(cell
[p1
[0]][0], cell
[p1
[1]][1], cell
[p1
[2]][2])).xy()
3878 - _pt
.wp_unscaled(point(cell
[p2
[0]][0], cell
[p2
[1]][1], cell
[p2
[2]][2])).xy()).norm();
3880 if (local_distance
/ diameter
> *extreme_ratio
) {
3881 *extreme_ratio
= local_distance
/ diameter
;
3888 * Get the next split dimension.
3890 static int get_next_split(int f1
, int f2
, point min
, point max
, const std::vector
<pt
> &pt_outputs
) {
3891 for (int d
= 0; d
< 3; d
++)
3892 if (isinf(min
[d
]) || isinf(max
[d
]))
3893 return space::traverse::get_next_split(min
, max
);
3895 int extreme_dim
= 0;
3896 ale_pos extreme_ratio
= 0;
3898 update_extrema(min
, max
, al
->get(f1
)->get_t(0), &extreme_dim
, &extreme_ratio
);
3899 update_extrema(min
, max
, al
->get(f2
)->get_t(0), &extreme_dim
, &extreme_ratio
);
3901 for (unsigned int n
= 0; n
< pt_outputs
.size(); n
++) {
3902 update_extrema(min
, max
, pt_outputs
[n
], &extreme_dim
, &extreme_ratio
);
3909 * Find candidates for subspace creation.
3911 static void find_candidates(unsigned int f1
, unsigned int f2
, candidates
*c
, point min
, point max
,
3912 const std::vector
<pt
> &pt_outputs
, int depth
= 0) {
3916 if (min
[0] < 20.0001 && max
[0] > 20.0001
3917 && min
[1] < 20.0001 && max
[1] > 20.0001
3918 && min
[2] < 0.0001 && max
[2] > 0.0001)
3922 for (int i
= depth
; i
> 0; i
--) {
3923 fprintf(stderr
, "+");
3925 fprintf(stderr
, "[fc n=%f %f %f x=%f %f %f]\n",
3926 min
[0], min
[1], min
[2], max
[0], max
[1], max
[2]);
3929 if (completely_clipped(min
, max
)) {
3931 fprintf(stderr
, "c");
3935 if (!input_might_be_visible(f1
, min
, max
)
3936 || !input_might_be_visible(f2
, min
, max
)) {
3938 fprintf(stderr
, "v");
3942 if (output_clip
&& !output_might_be_visible(pt_outputs
, min
, max
)) {
3944 fprintf(stderr
, "o");
3948 if (exceeds_resolution_lower_bounds(f1
, f2
, min
, max
, pt_outputs
)) {
3949 if (!(rand() % 100000))
3950 fprintf(stderr
, "([%f %f %f], [%f %f %f]) at %d\n",
3951 min
[0], min
[1], min
[2],
3952 max
[0], max
[1], max
[2],
3956 fprintf(stderr
, "t");
3958 try_nearest_candidate(f1
, f2
, c
, min
, max
);
3962 point new_cells
[2][2];
3964 if (!space::traverse::get_next_cells(get_next_split(f1
, f2
, min
, max
, pt_outputs
), min
, max
, new_cells
)) {
3966 fprintf(stderr
, "n");
3971 fprintf(stderr
, "nc[0][0]=%f %f %f nc[0][1]=%f %f %f nc[1][0]=%f %f %f nc[1][1]=%f %f %f\n",
3983 new_cells
[1][1][2]);
3986 find_candidates(f1
, f2
, c
, new_cells
[0][0], new_cells
[0][1], pt_outputs
, depth
+ 1);
3987 find_candidates(f1
, f2
, c
, new_cells
[1][0], new_cells
[1][1], pt_outputs
, depth
+ 1);
3991 * Generate a map from scores to 3D points for various depths at point (i, j) in f1, at
3992 * lowest resolution.
3994 static score_map
p2f_score_map(unsigned int f1
, unsigned int f2
, unsigned int i
, unsigned int j
) {
3998 pt _pt1
= al
->get(f1
)->get_t(primary_decimation_upper
);
3999 pt _pt2
= al
->get(f2
)->get_t(primary_decimation_upper
);
4001 const d2::image
*if1
= al
->get(f1
)->get_image(primary_decimation_upper
);
4002 const d2::image
*if2
= al
->get(f2
)->get_image(primary_decimation_upper
);
4003 ale_pos pdu_scale
= pow(2, primary_decimation_upper
);
4006 * Get the pixel color in the primary frame
4009 // d2::pixel color_primary = if1->get_pixel(i, j);
4012 * Map two depths to the secondary frame.
4015 point p1
= _pt2
.wp_unscaled(_pt1
.pw_unscaled(point(i
, j
, 1000)));
4016 point p2
= _pt2
.wp_unscaled(_pt1
.pw_unscaled(point(i
, j
, -1000)));
4018 // fprintf(stderr, "%d->%d (%d, %d) point pair: (%d, %d, %d -> %f, %f), (%d, %d, %d -> %f, %f)\n",
4019 // f1, f2, i, j, i, j, 1000, p1[0], p1[1], i, j, -1000, p2[0], p2[1]);
4020 // _pt1.debug_output();
4021 // _pt2.debug_output();
4025 * For cases where the mapped points define a
4026 * line and where points on the line fall
4027 * within the defined area of the frame,
4028 * determine the starting point for inspection.
4029 * In other cases, continue to the next pixel.
4032 ale_pos diff_i
= p2
[0] - p1
[0];
4033 ale_pos diff_j
= p2
[1] - p1
[1];
4034 ale_pos slope
= diff_j
/ diff_i
;
4038 fprintf(stderr
, "%d->%d (%d, %d) has undefined slope\n",
4044 * Make absurdly large/small slopes either infinity, negative infinity, or zero.
4047 if (fabs(slope
) > if2
->width() * 100) {
4050 double inf
= one
/ zero
;
4052 } else if (slope
< 1 / (double) if2
->height() / 100
4053 && slope
> -1/ (double) if2
->height() / 100) {
4057 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4059 ale_pos top_intersect
= p1
[1] - p1
[0] * slope
;
4060 ale_pos lef_intersect
= p1
[0] - p1
[1] / slope
;
4061 ale_pos rig_intersect
= p1
[0] - (p1
[1] - if2
->width() + 2) / slope
;
4064 // fprintf(stderr, "slope == %f\n", slope);
4068 // fprintf(stderr, "case 0\n");
4069 sp_i
= lef_intersect
;
4071 } else if (finite(slope
) && top_intersect
>= 0 && top_intersect
< if2
->width() - 1) {
4072 // fprintf(stderr, "case 1\n");
4074 sp_j
= top_intersect
;
4075 } else if (slope
> 0 && lef_intersect
>= 0 && lef_intersect
<= if2
->height() - 1) {
4076 // fprintf(stderr, "case 2\n");
4077 sp_i
= lef_intersect
;
4079 } else if (slope
< 0 && rig_intersect
>= 0 && rig_intersect
<= if2
->height() - 1) {
4080 // fprintf(stderr, "case 3\n");
4081 sp_i
= rig_intersect
;
4082 sp_j
= if2
->width() - 2;
4084 // fprintf(stderr, "case 4\n");
4085 // fprintf(stderr, "%d->%d (%d, %d) does not intersect the defined area\n",
4091 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4094 * Determine increment values for examining
4095 * point, ensuring that incr_i is always
4099 ale_pos incr_i
, incr_j
;
4101 if (fabs(diff_i
) > fabs(diff_j
)) {
4104 } else if (slope
> 0) {
4108 incr_i
= -1 / slope
;
4112 // fprintf(stderr, "%d->%d (%d, %d) increments are (%f, %f)\n",
4113 // f1, f2, i, j, incr_i, incr_j);
4116 * Examine regions near the projected line.
4119 for (ale_pos ii
= sp_i
, jj
= sp_j
;
4120 ii
<= if2
->height() - 1 && jj
<= if2
->width() - 1 && ii
>= 0 && jj
>= 0;
4121 ii
+= incr_i
, jj
+= incr_j
) {
4123 // fprintf(stderr, "%d->%d (%d, %d) checking (%f, %f)\n",
4124 // f1, f2, i, j, ii, jj);
4128 * Check for higher, lower, and nearby points.
4135 int higher
= 0, lower
= 0, nearby
= 0;
4137 for (int iii
= 0; iii
< 2; iii
++)
4138 for (int jjj
= 0; jjj
< 2; jjj
++) {
4139 d2::pixel p
= if2
->get_pixel((int) floor(ii
) + iii
, (int) floor(jj
) + jjj
);
4141 for (int k
= 0; k
< 3; k
++) {
4142 int bitmask
= (int) pow(2, k
);
4144 if (p
[k
] > color_primary
[k
])
4146 if (p
[k
] < color_primary
[k
])
4148 if (fabs(p
[k
] - color_primary
[k
]) < nearness
)
4154 * If this is not a region of interest,
4159 fprintf(stderr
, "score map (%u, %u) line %u\n", i
, j
, __LINE__
);
4161 // if (((higher & lower) | nearby) != 0x7)
4164 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4166 // fprintf(stderr, "%d->%d (%d, %d) accepted (%f, %f)\n",
4167 // f1, f2, i, j, ii, jj);
4170 * Create an orthonormal basis to
4171 * determine line intersection.
4174 point bp0
= _pt1
.pw_unscaled(point(i
, j
, 0));
4175 point bp1
= _pt1
.pw_unscaled(point(i
, j
, 10));
4176 point bp2
= _pt2
.pw_unscaled(point(ii
, jj
, 0));
4178 point foo
= _pt1
.wp_unscaled(bp0
);
4179 // fprintf(stderr, "(%d, %d, 0) transformed to world and back is: (%f, %f, %f)\n",
4180 // i, j, foo[0], foo[1], foo[2]);
4182 foo
= _pt1
.wp_unscaled(bp1
);
4183 // fprintf(stderr, "(%d, %d, 10) transformed to world and back is: (%f, %f, %f)\n",
4184 // i, j, foo[0], foo[1], foo[2]);
4186 point b0
= (bp1
- bp0
).normalize();
4187 point b1n
= bp2
- bp0
;
4188 point b1
= (b1n
- b1n
.dproduct(b0
) * b0
).normalize();
4189 point b2
= point(0, 0, 0).xproduct(b0
, b1
).normalize(); // Should already have norm=1
4192 foo
= _pt1
.wp_unscaled(bp0
+ 30 * b0
);
4195 * Select a fourth point to define a second line.
4198 point p3
= _pt2
.pw_unscaled(point(ii
, jj
, 10));
4201 * Representation in the new basis.
4204 d2::point nbp0
= d2::point(bp0
.dproduct(b0
), bp0
.dproduct(b1
));
4205 // d2::point nbp1 = d2::point(bp1.dproduct(b0), bp1.dproduct(b1));
4206 d2::point nbp2
= d2::point(bp2
.dproduct(b0
), bp2
.dproduct(b1
));
4207 d2::point np3
= d2::point( p3
.dproduct(b0
), p3
.dproduct(b1
));
4210 * Determine intersection of line
4211 * (nbp0, nbp1), which is parallel to
4212 * b0, with line (nbp2, np3).
4216 * XXX: a stronger check would be
4217 * better here, e.g., involving the
4218 * ratio (np3[0] - nbp2[0]) / (np3[1] -
4219 * nbp2[1]). Also, acceptance of these
4220 * cases is probably better than
4225 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4227 if (np3
[1] - nbp2
[1] == 0)
4231 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4233 d2::point intersection
= d2::point(nbp2
[0]
4234 + (nbp0
[1] - nbp2
[1]) * (np3
[0] - nbp2
[0]) / (np3
[1] - nbp2
[1]),
4237 ale_pos b2_offset
= b2
.dproduct(bp0
);
4240 * Map the intersection back to the world
4244 point iw
= intersection
[0] * b0
+ intersection
[1] * b1
+ b2_offset
* b2
;
4247 * Reject intersection points behind a
4251 point icp
= _pt1
.wc(iw
);
4252 point ics
= _pt2
.wc(iw
);
4255 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4257 if (icp
[2] >= 0 || ics
[2] >= 0)
4261 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4264 * Reject clipping plane violations.
4268 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4270 if (iw
[2] > front_clip
4271 || iw
[2] < rear_clip
)
4275 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4281 point ip
= _pt1
.wp_unscaled(iw
);
4283 point is
= _pt2
.wp_unscaled(iw
);
4285 analytic _a
= { iw
, ip
, is
};
4288 * Calculate score from color match. Assume for now
4289 * that the transformation can be approximated locally
4290 * with a translation.
4294 ale_pos divisor
= 0;
4295 ale_pos l1_multiplier
= 0.125;
4297 if (if1
->in_bounds(ip
.xy())
4298 && if2
->in_bounds(is
.xy())
4299 && !d2::render::is_excluded_f(ip
.xy() * pdu_scale
, f1
)
4300 && !d2::render::is_excluded_f(is
.xy() * pdu_scale
, f2
)) {
4301 divisor
+= 1 - l1_multiplier
;
4302 score
+= (1 - l1_multiplier
)
4303 * (if1
->get_bl(ip
.xy()) - if2
->get_bl(is
.xy())).normsq();
4307 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4309 for (int iii
= -1; iii
<= 1; iii
++)
4310 for (int jjj
= -1; jjj
<= 1; jjj
++) {
4311 d2::point
t(iii
, jjj
);
4313 if (!if1
->in_bounds(ip
.xy() + t
)
4314 || !if2
->in_bounds(is
.xy() + t
)
4315 || d2::render::is_excluded_f(ip
.xy() * pdu_scale
, f1
)
4316 || d2::render::is_excluded_f(is
.xy() * pdu_scale
, f2
))
4319 divisor
+= l1_multiplier
;
4320 score
+= l1_multiplier
4321 * (if1
->get_bl(ip
.xy() + t
) - if2
->get_bl(is
.xy() + t
)).normsq();
4326 * Include third-camera contributions in the score.
4329 if (tc_multiplier
!= 0)
4330 for (unsigned int f
= 0; f
< d2::image_rw::count(); f
++) {
4331 if (f
== f1
|| f
== f2
)
4334 const d2::image
*if3
= al
->get(f
)->get_image(primary_decimation_upper
);
4335 pt _pt3
= al
->get(f
)->get_t(primary_decimation_upper
);
4337 point p
= _pt3
.wp_unscaled(iw
);
4339 if (!if3
->in_bounds(p
.xy())
4340 || !if1
->in_bounds(ip
.xy())
4341 || d2::render::is_excluded_f(p
.xy() * pdu_scale
, f
)
4342 || d2::render::is_excluded_f(ip
.xy() * pdu_scale
, f1
))
4345 divisor
+= tc_multiplier
;
4346 score
+= tc_multiplier
4347 * (if1
->get_bl(ip
.xy()) - if3
->get_bl(p
.xy())).normsq();
4353 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4356 * Reject points with undefined score.
4360 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4362 if (!finite(score
/ divisor
))
4366 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4370 * XXX: reject points not on the z=-27.882252 plane.
4374 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4376 if (_a
.ip
[2] > -27 || _a
.ip
[2] < -28)
4381 // fprintf(stderr, "score map (%u, %u) line %u\n", i, j, __LINE__);
4384 * Add the point to the score map.
4387 // d2::pixel c_ip = if1->in_bounds(ip.xy()) ? if1->get_bl(ip.xy())
4389 // d2::pixel c_is = if2->in_bounds(is.xy()) ? if2->get_bl(is.xy())
4392 // fprintf(stderr, "Candidate subspace: f1=%u f2=%u i=%u j=%u ii=%f jj=%f"
4393 // "cp=[%f %f %f] cs=[%f %f %f]\n",
4394 // f1, f2, i, j, ii, jj, c_ip[0], c_ip[1], c_ip[2],
4395 // c_is[0], c_is[1], c_is[2]);
4397 result
.insert(score_map_element(score
/ divisor
, _a
));
4400 // fprintf(stderr, "Iterating through the score map:\n");
4402 // for (score_map::iterator smi = result.begin(); smi != result.end(); smi++) {
4403 // fprintf(stderr, "%f ", smi->first);
4406 // fprintf(stderr, "\n");
4413 * Attempt to refine space around a point, to high and low resolutions
4414 * resulting in two resolutions in total.
4417 static space::traverse
refine_space(point iw
, ale_pos target_dim
, int use_filler
) {
4419 space::traverse st
= space::traverse::root();
4421 if (!st
.includes(iw
)) {
4426 int lr_done
= !use_filler
;
4429 * Loop until all resolutions of interest have been generated.
4434 point p
[2] = { st
.get_min(), st
.get_max() };
4436 ale_pos dim_max
= 0;
4438 for (int d
= 0; d
< 3; d
++) {
4439 ale_pos d_value
= fabs(p
[0][d
] - p
[1][d
]);
4440 if (d_value
> dim_max
)
4445 * Generate any new desired spatial registers.
4448 for (int f
= 0; f
< 2; f
++) {
4454 if (dim_max
< 2 * target_dim
4456 if (spatial_info_map
.find(st
.get_node()) == spatial_info_map
.end()) {
4457 spatial_info_map
[st
.get_node()];
4458 ui::get()->d3_increment_spaces();
4467 if (dim_max
< target_dim
) {
4468 if (spatial_info_map
.find(st
.get_node()) == spatial_info_map
.end()) {
4469 spatial_info_map
[st
.get_node()];
4470 ui::get()->d3_increment_spaces();
4477 * Check precision before analyzing space further.
4480 if (st
.precision_wall()) {
4481 fprintf(stderr
, "\n\n*** Error: reached subspace precision wall ***\n\n");
4486 if (st
.positive().includes(iw
)) {
4489 } else if (st
.negative().includes(iw
)) {
4493 fprintf(stderr
, "failed iw = (%f, %f, %f)\n",
4494 iw
[0], iw
[1], iw
[2]);
4501 * Calculate target dimension
4504 static ale_pos
calc_target_dim(point iw
, pt _pt
, const char *d_out
[], const char *v_out
[],
4505 std::map
<const char *, pt
> *d3_depth_pt
,
4506 std::map
<const char *, pt
> *d3_output_pt
) {
4508 ale_pos result
= _pt
.distance_1d(iw
, primary_decimation_upper
);
4510 for (unsigned int n
= 0; n
< d2::image_rw::count(); n
++) {
4511 if (d_out
[n
] && align::projective(n
).distance_1d(iw
, 0) < result
)
4512 result
= align::projective(n
).distance_1d(iw
, 0);
4513 if (v_out
[n
] && align::projective(n
).distance_1d(iw
, 0) < result
)
4514 result
= align::projective(n
).distance_1d(iw
, 0);
4517 for (std::map
<const char *, pt
>::iterator i
= d3_output_pt
->begin(); i
!= d3_output_pt
->end(); i
++) {
4518 if (i
->second
.distance_1d(iw
, 0) < result
)
4519 result
= i
->second
.distance_1d(iw
, 0);
4522 for (std::map
<const char *, pt
>::iterator i
= d3_depth_pt
->begin(); i
!= d3_depth_pt
->end(); i
++) {
4523 if (i
->second
.distance_1d(iw
, 0) < result
)
4524 result
= i
->second
.distance_1d(iw
, 0);
4527 assert (result
> 0);
4533 * Calculate level of detail for a given viewpoint.
4536 static int calc_lod(ale_pos depth1
, pt _pt
, ale_pos target_dim
) {
4537 return (int) round(_pt
.trilinear_coordinate(depth1
, target_dim
* sqrt(2)));
4541 * Calculate depth range for a given pair of viewpoints.
4544 static ale_pos
calc_depth_range(point iw
, pt _pt1
, pt _pt2
) {
4546 point ip
= _pt1
.wp_unscaled(iw
);
4548 ale_pos reference_change
= fabs(ip
[2] / 1000);
4550 point iw1
= _pt1
.pw_scaled(ip
+ point(0, 0, reference_change
));
4551 point iw2
= _pt1
.pw_scaled(ip
- point(0, 0, reference_change
));
4553 point is
= _pt2
.wc(iw
);
4554 point is1
= _pt2
.wc(iw1
);
4555 point is2
= _pt2
.wc(iw2
);
4559 ale_pos d1
= (is1
.xy() - is
.xy()).norm();
4560 ale_pos d2
= (is2
.xy() - is
.xy()).norm();
4562 // assert (reference_change > 0);
4563 // assert (d1 > 0 || d2 > 0);
4565 if (is1
[2] < 0 && is2
[2] < 0) {
4568 return reference_change
/ d1
;
4570 return reference_change
/ d2
;
4574 return reference_change
/ d1
;
4577 return reference_change
/ d2
;
4583 * Calculate a refined point for a given set of parameters.
4586 static point
get_refined_point(pt _pt1
, pt _pt2
, int i
, int j
,
4587 int f1
, int f2
, int lod1
, int lod2
, ale_pos depth
,
4588 ale_pos depth_range
) {
4590 d2::pixel comparison_color
= al
->get(f1
)->get_image(lod1
)->get_pixel(i
, j
);
4593 ale_pos best_depth
= depth
;
4595 assert (depth_range
> 0);
4597 if (fabs(depth_range
) < fabs(depth
/ 10000))
4598 return _pt1
.pw_unscaled(point(i
, j
, depth
));
4600 for (ale_pos d
= depth
- depth_range
; d
< depth
+ depth_range
; d
+= depth_range
/ 10) {
4605 point iw
= _pt1
.pw_unscaled(point(i
, j
, d
));
4606 point is
= _pt2
.wp_unscaled(iw
);
4611 if (!al
->get(f2
)->get_image(lod2
)->in_bounds(is
.xy()))
4614 ale_pos error
= (comparison_color
- al
->get(f2
)->get_image(lod2
)->get_bl(is
.xy())).norm();
4616 if (error
< best
|| best
== -1) {
4622 return _pt1
.pw_unscaled(point(i
, j
, best_depth
));
4626 * Analyze space in a manner dependent on the score map.
4629 static void analyze_space_from_map(const char *d_out
[], const char *v_out
[],
4630 std::map
<const char *, pt
> *d3_depth_pt
,
4631 std::map
<const char *, pt
> *d3_output_pt
,
4632 unsigned int f1
, unsigned int f2
,
4633 unsigned int i
, unsigned int j
, score_map _sm
, int use_filler
) {
4635 int accumulated_ambiguity
= 0;
4636 int max_acc_amb
= pairwise_ambiguity
;
4638 pt _pt1
= al
->get(f1
)->get_t(0);
4639 pt _pt2
= al
->get(f2
)->get_t(0);
4641 if (_pt1
.scale_2d() != 1)
4644 for(score_map::iterator smi
= _sm
.begin(); smi
!= _sm
.end(); smi
++) {
4646 point iw
= smi
->second
.iw
;
4648 if (accumulated_ambiguity
++ >= max_acc_amb
)
4653 ale_pos depth1
= _pt1
.wc(iw
)[2];
4654 ale_pos depth2
= _pt2
.wc(iw
)[2];
4656 ale_pos target_dim
= calc_target_dim(iw
, _pt1
, d_out
, v_out
, d3_depth_pt
, d3_output_pt
);
4658 assert(target_dim
> 0);
4660 int lod1
= calc_lod(depth1
, _pt1
, target_dim
);
4661 int lod2
= calc_lod(depth2
, _pt2
, target_dim
);
4663 while (lod1
< input_decimation_lower
4664 || lod2
< input_decimation_lower
) {
4666 lod1
= calc_lod(depth1
, _pt1
, target_dim
);
4667 lod2
= calc_lod(depth2
, _pt2
, target_dim
);
4671 if (lod1
>= (int) al
->get(f1
)->count()
4672 || lod2
>= (int) al
->get(f2
)->count())
4675 int multiplier
= (unsigned int) floor(pow(2, primary_decimation_upper
- lod1
));
4677 ale_pos depth_range
= calc_depth_range(iw
, _pt1
, _pt2
);
4679 assert (depth_range
> 0);
4681 pt _pt1_lod
= al
->get(f1
)->get_t(lod1
);
4682 pt _pt2_lod
= al
->get(f2
)->get_t(lod2
);
4684 int im
= i
* multiplier
;
4685 int jm
= j
* multiplier
;
4687 for (int ii
= 0; ii
< multiplier
; ii
+= 1)
4688 for (int jj
= 0; jj
< multiplier
; jj
+= 1) {
4690 point refined_point
= get_refined_point(_pt1_lod
, _pt2_lod
, im
+ ii
, jm
+ jj
,
4691 f1
, f2
, lod1
, lod2
, depth1
, depth_range
);
4694 * Re-evaluate target dimension.
4697 ale_pos target_dim_
=
4698 calc_target_dim(refined_point
, _pt1
, d_out
, v_out
, d3_depth_pt
, d3_output_pt
);
4700 ale_pos depth1_
= _pt1
.wc(refined_point
)[2];
4701 ale_pos depth2_
= _pt2
.wc(refined_point
)[2];
4703 int lod1_
= calc_lod(depth1_
, _pt1
, target_dim_
);
4704 int lod2_
= calc_lod(depth2_
, _pt2
, target_dim_
);
4706 while (lod1_
< input_decimation_lower
4707 || lod2_
< input_decimation_lower
) {
4709 lod1_
= calc_lod(depth1_
, _pt1
, target_dim_
);
4710 lod2_
= calc_lod(depth2_
, _pt2
, target_dim_
);
4714 * Attempt to refine space around the intersection point.
4717 space::traverse st
=
4718 refine_space(refined_point
, target_dim_
, use_filler
|| _pt1
.scale_2d() != 1);
4720 // if (!resolution_ok(al->get(f1)->get_t(0), al->get(f1)->get_t(0).trilinear_coordinate(st))) {
4721 // pt transformation = al->get(f1)->get_t(0);
4722 // ale_pos tc = al->get(f1)->get_t(0).trilinear_coordinate(st);
4724 // fprintf(stderr, "Resolution not ok.\n");
4725 // fprintf(stderr, "pow(2, tc)=%f\n", pow(2, tc));
4726 // fprintf(stderr, "transformation.unscaled_height()=%f\n",
4727 // transformation.unscaled_height());
4728 // fprintf(stderr, "transformation.unscaled_width()=%f\n",
4729 // transformation.unscaled_width());
4730 // fprintf(stderr, "tc=%f", tc);
4731 // fprintf(stderr, "input_decimation_lower - 1.5 = %f\n",
4732 // input_decimation_lower - 1.5);
4736 // assert(resolution_ok(al->get(f1)->get_t(0), al->get(f1)->get_t(0).trilinear_coordinate(st)));
4737 // assert(resolution_ok(al->get(f2)->get_t(0), al->get(f2)->get_t(0).trilinear_coordinate(st)));
4745 * Initialize space and identify regions of interest for the adaptive
4748 static void make_space(const char *d_out
[], const char *v_out
[],
4749 std::map
<const char *, pt
> *d3_depth_pt
,
4750 std::map
<const char *, pt
> *d3_output_pt
) {
4752 ui::get()->d3_total_spaces(0);
4755 * Variable indicating whether low-resolution filler space
4756 * is desired to avoid aliased gaps in surfaces.
4759 int use_filler
= d3_depth_pt
->size() != 0
4760 || d3_output_pt
->size() != 0
4761 || output_decimation_preferred
> 0
4762 || input_decimation_lower
> 0
4763 || !focus::is_trivial()
4764 || !strcmp(pairwise_comparisons
, "all");
4766 std::vector
<pt
> pt_outputs
= make_pt_list(d_out
, v_out
, d3_depth_pt
, d3_output_pt
);
4769 * Initialize root space.
4775 * Special handling for experimental option 'subspace_traverse'.
4778 if (subspace_traverse
) {
4780 * Subdivide space to resolve intensity matches between pairs
4784 for (unsigned int f1
= 0; f1
< d2::image_rw::count(); f1
++) {
4786 if (d3_depth_pt
->size() == 0
4787 && d3_output_pt
->size() == 0
4788 && d_out
[f1
] == NULL
4789 && v_out
[f1
] == NULL
)
4792 if (tc_multiplier
== 0)
4795 for (unsigned int f2
= 0; f2
< d2::image_rw::count(); f2
++) {
4800 if (tc_multiplier
== 0)
4803 candidates
*c
= new candidates(f1
);
4805 find_candidates(f1
, f2
, c
, point::neginf(), point::posinf(), pt_outputs
);
4809 c
->generate_subspaces();
4811 if (tc_multiplier
== 0)
4815 if (tc_multiplier
== 0)
4823 * Subdivide space to resolve intensity matches between pairs
4827 for (unsigned int f1
= 0; f1
< d2::image_rw::count(); f1
++)
4828 for (unsigned int f2
= 0; f2
< d2::image_rw::count(); f2
++) {
4832 if (!d_out
[f1
] && !v_out
[f1
] && !d3_depth_pt
->size()
4833 && !d3_output_pt
->size() && strcmp(pairwise_comparisons
, "all"))
4836 if (tc_multiplier
== 0) {
4842 * Iterate over all points in the primary frame.
4845 ale_pos pdu_scale
= pow(2, primary_decimation_upper
);
4847 for (unsigned int i
= 0; i
< al
->get(f1
)->get_image(primary_decimation_upper
)->height(); i
++)
4848 for (unsigned int j
= 0; j
< al
->get(f1
)->get_image(primary_decimation_upper
)->width(); j
++) {
4850 if (d2::render::is_excluded_f(d2::point(i
, j
) * pdu_scale
, f1
))
4853 ui::get()->d3_subdivision_status(f1
, f2
, i
, j
);
4858 * Generate a map from scores to 3D points for
4859 * various depths in f1.
4862 score_map _sm
= p2f_score_map(f1
, f2
, i
, j
);
4865 * Analyze space in a manner dependent on the score map.
4868 analyze_space_from_map(d_out
, v_out
, d3_depth_pt
, d3_output_pt
,
4869 f1
, f2
, i
, j
, _sm
, use_filler
);
4874 * This ordering may encourage image f1 to be cached.
4877 if (tc_multiplier
== 0) {
4886 * Update spatial information structures.
4888 * XXX: the name of this function is horribly misleading. There isn't
4889 * even a 'search depth' any longer, since there is no longer any
4890 * bounded DFS occurring.
4892 static void reduce_cost_to_search_depth(d2::exposure
*exp_out
, int inc_bit
) {
4898 ui::get()->set_steps(ou_iterations
);
4900 for (unsigned int i
= 0; i
< ou_iterations
; i
++) {
4901 ui::get()->set_steps_completed(i
);
4902 spatial_info_update();
4909 * Describe a scene to a renderer
4911 static void describe(render
*r
) {