Bullet 2.85 update
[Torque-3d.git] / Engine / lib / bullet / src / BulletCollision / BroadphaseCollision / btQuantizedBvh.cpp
blob93de4999885233019ac74d091569ae6a66da4b6e
1 /*
2 Bullet Continuous Collision Detection and Physics Library
3 Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/
5 This software is provided 'as-is', without any express or implied warranty.
6 In no event will the authors be held liable for any damages arising from the use of this software.
7 Permission is granted to anyone to use this software for any purpose,
8 including commercial applications, and to alter it and redistribute it freely,
9 subject to the following restrictions:
11 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
12 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
13 3. This notice may not be removed or altered from any source distribution.
16 #include "btQuantizedBvh.h"
18 #include "LinearMath/btAabbUtil2.h"
19 #include "LinearMath/btIDebugDraw.h"
20 #include "LinearMath/btSerializer.h"
22 #define RAYAABB2
24 btQuantizedBvh::btQuantizedBvh() :
25 m_bulletVersion(BT_BULLET_VERSION),
26 m_useQuantization(false),
27 //m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY)
28 m_traversalMode(TRAVERSAL_STACKLESS)
29 //m_traversalMode(TRAVERSAL_RECURSIVE)
30 ,m_subtreeHeaderCount(0) //PCK: add this line
32 m_bvhAabbMin.setValue(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY);
33 m_bvhAabbMax.setValue(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY);
40 void btQuantizedBvh::buildInternal()
42 ///assumes that caller filled in the m_quantizedLeafNodes
43 m_useQuantization = true;
44 int numLeafNodes = 0;
46 if (m_useQuantization)
48 //now we have an array of leafnodes in m_leafNodes
49 numLeafNodes = m_quantizedLeafNodes.size();
51 m_quantizedContiguousNodes.resize(2*numLeafNodes);
55 m_curNodeIndex = 0;
57 buildTree(0,numLeafNodes);
59 ///if the entire tree is small then subtree size, we need to create a header info for the tree
60 if(m_useQuantization && !m_SubtreeHeaders.size())
62 btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
63 subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[0]);
64 subtree.m_rootNodeIndex = 0;
65 subtree.m_subtreeSize = m_quantizedContiguousNodes[0].isLeafNode() ? 1 : m_quantizedContiguousNodes[0].getEscapeIndex();
68 //PCK: update the copy of the size
69 m_subtreeHeaderCount = m_SubtreeHeaders.size();
71 //PCK: clear m_quantizedLeafNodes and m_leafNodes, they are temporary
72 m_quantizedLeafNodes.clear();
73 m_leafNodes.clear();
78 ///just for debugging, to visualize the individual patches/subtrees
79 #ifdef DEBUG_PATCH_COLORS
80 btVector3 color[4]=
82 btVector3(1,0,0),
83 btVector3(0,1,0),
84 btVector3(0,0,1),
85 btVector3(0,1,1)
87 #endif //DEBUG_PATCH_COLORS
91 void btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin,const btVector3& bvhAabbMax,btScalar quantizationMargin)
93 //enlarge the AABB to avoid division by zero when initializing the quantization values
94 btVector3 clampValue(quantizationMargin,quantizationMargin,quantizationMargin);
95 m_bvhAabbMin = bvhAabbMin - clampValue;
96 m_bvhAabbMax = bvhAabbMax + clampValue;
97 btVector3 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
98 m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize;
100 m_useQuantization = true;
103 unsigned short vecIn[3];
104 btVector3 v;
106 quantize(vecIn,m_bvhAabbMin,false);
107 v = unQuantize(vecIn);
108 m_bvhAabbMin.setMin(v-clampValue);
110 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
111 m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize;
113 quantize(vecIn,m_bvhAabbMax,true);
114 v = unQuantize(vecIn);
115 m_bvhAabbMax.setMax(v+clampValue);
117 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
118 m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize;
125 btQuantizedBvh::~btQuantizedBvh()
129 #ifdef DEBUG_TREE_BUILDING
130 int gStackDepth = 0;
131 int gMaxStackDepth = 0;
132 #endif //DEBUG_TREE_BUILDING
134 void btQuantizedBvh::buildTree (int startIndex,int endIndex)
136 #ifdef DEBUG_TREE_BUILDING
137 gStackDepth++;
138 if (gStackDepth > gMaxStackDepth)
139 gMaxStackDepth = gStackDepth;
140 #endif //DEBUG_TREE_BUILDING
143 int splitAxis, splitIndex, i;
144 int numIndices =endIndex-startIndex;
145 int curIndex = m_curNodeIndex;
147 btAssert(numIndices>0);
149 if (numIndices==1)
151 #ifdef DEBUG_TREE_BUILDING
152 gStackDepth--;
153 #endif //DEBUG_TREE_BUILDING
155 assignInternalNodeFromLeafNode(m_curNodeIndex,startIndex);
157 m_curNodeIndex++;
158 return;
160 //calculate Best Splitting Axis and where to split it. Sort the incoming 'leafNodes' array within range 'startIndex/endIndex'.
162 splitAxis = calcSplittingAxis(startIndex,endIndex);
164 splitIndex = sortAndCalcSplittingIndex(startIndex,endIndex,splitAxis);
166 int internalNodeIndex = m_curNodeIndex;
168 //set the min aabb to 'inf' or a max value, and set the max aabb to a -inf/minimum value.
169 //the aabb will be expanded during buildTree/mergeInternalNodeAabb with actual node values
170 setInternalNodeAabbMin(m_curNodeIndex,m_bvhAabbMax);//can't use btVector3(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY)) because of quantization
171 setInternalNodeAabbMax(m_curNodeIndex,m_bvhAabbMin);//can't use btVector3(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY)) because of quantization
174 for (i=startIndex;i<endIndex;i++)
176 mergeInternalNodeAabb(m_curNodeIndex,getAabbMin(i),getAabbMax(i));
179 m_curNodeIndex++;
182 //internalNode->m_escapeIndex;
184 int leftChildNodexIndex = m_curNodeIndex;
186 //build left child tree
187 buildTree(startIndex,splitIndex);
189 int rightChildNodexIndex = m_curNodeIndex;
190 //build right child tree
191 buildTree(splitIndex,endIndex);
193 #ifdef DEBUG_TREE_BUILDING
194 gStackDepth--;
195 #endif //DEBUG_TREE_BUILDING
197 int escapeIndex = m_curNodeIndex - curIndex;
199 if (m_useQuantization)
201 //escapeIndex is the number of nodes of this subtree
202 const int sizeQuantizedNode =sizeof(btQuantizedBvhNode);
203 const int treeSizeInBytes = escapeIndex * sizeQuantizedNode;
204 if (treeSizeInBytes > MAX_SUBTREE_SIZE_IN_BYTES)
206 updateSubtreeHeaders(leftChildNodexIndex,rightChildNodexIndex);
208 } else
213 setInternalNodeEscapeIndex(internalNodeIndex,escapeIndex);
217 void btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex)
219 btAssert(m_useQuantization);
221 btQuantizedBvhNode& leftChildNode = m_quantizedContiguousNodes[leftChildNodexIndex];
222 int leftSubTreeSize = leftChildNode.isLeafNode() ? 1 : leftChildNode.getEscapeIndex();
223 int leftSubTreeSizeInBytes = leftSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode));
225 btQuantizedBvhNode& rightChildNode = m_quantizedContiguousNodes[rightChildNodexIndex];
226 int rightSubTreeSize = rightChildNode.isLeafNode() ? 1 : rightChildNode.getEscapeIndex();
227 int rightSubTreeSizeInBytes = rightSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode));
229 if(leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
231 btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
232 subtree.setAabbFromQuantizeNode(leftChildNode);
233 subtree.m_rootNodeIndex = leftChildNodexIndex;
234 subtree.m_subtreeSize = leftSubTreeSize;
237 if(rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
239 btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
240 subtree.setAabbFromQuantizeNode(rightChildNode);
241 subtree.m_rootNodeIndex = rightChildNodexIndex;
242 subtree.m_subtreeSize = rightSubTreeSize;
245 //PCK: update the copy of the size
246 m_subtreeHeaderCount = m_SubtreeHeaders.size();
250 int btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis)
252 int i;
253 int splitIndex =startIndex;
254 int numIndices = endIndex - startIndex;
255 btScalar splitValue;
257 btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
258 for (i=startIndex;i<endIndex;i++)
260 btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
261 means+=center;
263 means *= (btScalar(1.)/(btScalar)numIndices);
265 splitValue = means[splitAxis];
267 //sort leafNodes so all values larger then splitValue comes first, and smaller values start from 'splitIndex'.
268 for (i=startIndex;i<endIndex;i++)
270 btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
271 if (center[splitAxis] > splitValue)
273 //swap
274 swapLeafNodes(i,splitIndex);
275 splitIndex++;
279 //if the splitIndex causes unbalanced trees, fix this by using the center in between startIndex and endIndex
280 //otherwise the tree-building might fail due to stack-overflows in certain cases.
281 //unbalanced1 is unsafe: it can cause stack overflows
282 //bool unbalanced1 = ((splitIndex==startIndex) || (splitIndex == (endIndex-1)));
284 //unbalanced2 should work too: always use center (perfect balanced trees)
285 //bool unbalanced2 = true;
287 //this should be safe too:
288 int rangeBalancedIndices = numIndices/3;
289 bool unbalanced = ((splitIndex<=(startIndex+rangeBalancedIndices)) || (splitIndex >=(endIndex-1-rangeBalancedIndices)));
291 if (unbalanced)
293 splitIndex = startIndex+ (numIndices>>1);
296 bool unbal = (splitIndex==startIndex) || (splitIndex == (endIndex));
297 (void)unbal;
298 btAssert(!unbal);
300 return splitIndex;
304 int btQuantizedBvh::calcSplittingAxis(int startIndex,int endIndex)
306 int i;
308 btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
309 btVector3 variance(btScalar(0.),btScalar(0.),btScalar(0.));
310 int numIndices = endIndex-startIndex;
312 for (i=startIndex;i<endIndex;i++)
314 btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
315 means+=center;
317 means *= (btScalar(1.)/(btScalar)numIndices);
319 for (i=startIndex;i<endIndex;i++)
321 btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
322 btVector3 diff2 = center-means;
323 diff2 = diff2 * diff2;
324 variance += diff2;
326 variance *= (btScalar(1.)/ ((btScalar)numIndices-1) );
328 return variance.maxAxis();
333 void btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
335 //either choose recursive traversal (walkTree) or stackless (walkStacklessTree)
337 if (m_useQuantization)
339 ///quantize query AABB
340 unsigned short int quantizedQueryAabbMin[3];
341 unsigned short int quantizedQueryAabbMax[3];
342 quantizeWithClamp(quantizedQueryAabbMin,aabbMin,0);
343 quantizeWithClamp(quantizedQueryAabbMax,aabbMax,1);
345 switch (m_traversalMode)
347 case TRAVERSAL_STACKLESS:
348 walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,0,m_curNodeIndex);
349 break;
350 case TRAVERSAL_STACKLESS_CACHE_FRIENDLY:
351 walkStacklessQuantizedTreeCacheFriendly(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
352 break;
353 case TRAVERSAL_RECURSIVE:
355 const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[0];
356 walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
358 break;
359 default:
360 //unsupported
361 btAssert(0);
363 } else
365 walkStacklessTree(nodeCallback,aabbMin,aabbMax);
370 int maxIterations = 0;
373 void btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
375 btAssert(!m_useQuantization);
377 const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
378 int escapeIndex, curIndex = 0;
379 int walkIterations = 0;
380 bool isLeafNode;
381 //PCK: unsigned instead of bool
382 unsigned aabbOverlap;
384 while (curIndex < m_curNodeIndex)
386 //catch bugs in tree data
387 btAssert (walkIterations < m_curNodeIndex);
389 walkIterations++;
390 aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg);
391 isLeafNode = rootNode->m_escapeIndex == -1;
393 //PCK: unsigned instead of bool
394 if (isLeafNode && (aabbOverlap != 0))
396 nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex);
399 //PCK: unsigned instead of bool
400 if ((aabbOverlap != 0) || isLeafNode)
402 rootNode++;
403 curIndex++;
404 } else
406 escapeIndex = rootNode->m_escapeIndex;
407 rootNode += escapeIndex;
408 curIndex += escapeIndex;
411 if (maxIterations < walkIterations)
412 maxIterations = walkIterations;
417 ///this was the original recursive traversal, before we optimized towards stackless traversal
418 void btQuantizedBvh::walkTree(btOptimizedBvhNode* rootNode,btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
420 bool isLeafNode, aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMin,rootNode->m_aabbMax);
421 if (aabbOverlap)
423 isLeafNode = (!rootNode->m_leftChild && !rootNode->m_rightChild);
424 if (isLeafNode)
426 nodeCallback->processNode(rootNode);
427 } else
429 walkTree(rootNode->m_leftChild,nodeCallback,aabbMin,aabbMax);
430 walkTree(rootNode->m_rightChild,nodeCallback,aabbMin,aabbMax);
437 void btQuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
439 btAssert(m_useQuantization);
441 bool isLeafNode;
442 //PCK: unsigned instead of bool
443 unsigned aabbOverlap;
445 //PCK: unsigned instead of bool
446 aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,currentNode->m_quantizedAabbMin,currentNode->m_quantizedAabbMax);
447 isLeafNode = currentNode->isLeafNode();
449 //PCK: unsigned instead of bool
450 if (aabbOverlap != 0)
452 if (isLeafNode)
454 nodeCallback->processNode(currentNode->getPartId(),currentNode->getTriangleIndex());
455 } else
457 //process left and right children
458 const btQuantizedBvhNode* leftChildNode = currentNode+1;
459 walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
461 const btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode+1:leftChildNode+leftChildNode->getEscapeIndex();
462 walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
469 void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const
471 btAssert(!m_useQuantization);
473 const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
474 int escapeIndex, curIndex = 0;
475 int walkIterations = 0;
476 bool isLeafNode;
477 //PCK: unsigned instead of bool
478 unsigned aabbOverlap=0;
479 unsigned rayBoxOverlap=0;
480 btScalar lambda_max = 1.0;
482 /* Quick pruning by quantized box */
483 btVector3 rayAabbMin = raySource;
484 btVector3 rayAabbMax = raySource;
485 rayAabbMin.setMin(rayTarget);
486 rayAabbMax.setMax(rayTarget);
488 /* Add box cast extents to bounding box */
489 rayAabbMin += aabbMin;
490 rayAabbMax += aabbMax;
492 #ifdef RAYAABB2
493 btVector3 rayDir = (rayTarget-raySource);
494 rayDir.normalize ();
495 lambda_max = rayDir.dot(rayTarget-raySource);
496 ///what about division by zero? --> just set rayDirection[i] to 1.0
497 btVector3 rayDirectionInverse;
498 rayDirectionInverse[0] = rayDir[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[0];
499 rayDirectionInverse[1] = rayDir[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[1];
500 rayDirectionInverse[2] = rayDir[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[2];
501 unsigned int sign[3] = { rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0};
502 #endif
504 btVector3 bounds[2];
506 while (curIndex < m_curNodeIndex)
508 btScalar param = 1.0;
509 //catch bugs in tree data
510 btAssert (walkIterations < m_curNodeIndex);
512 walkIterations++;
514 bounds[0] = rootNode->m_aabbMinOrg;
515 bounds[1] = rootNode->m_aabbMaxOrg;
516 /* Add box cast extents */
517 bounds[0] -= aabbMax;
518 bounds[1] -= aabbMin;
520 aabbOverlap = TestAabbAgainstAabb2(rayAabbMin,rayAabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg);
521 //perhaps profile if it is worth doing the aabbOverlap test first
523 #ifdef RAYAABB2
524 ///careful with this check: need to check division by zero (above) and fix the unQuantize method
525 ///thanks Joerg/hiker for the reproduction case!
526 ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
527 rayBoxOverlap = aabbOverlap ? btRayAabb2 (raySource, rayDirectionInverse, sign, bounds, param, 0.0f, lambda_max) : false;
529 #else
530 btVector3 normal;
531 rayBoxOverlap = btRayAabb(raySource, rayTarget,bounds[0],bounds[1],param, normal);
532 #endif
534 isLeafNode = rootNode->m_escapeIndex == -1;
536 //PCK: unsigned instead of bool
537 if (isLeafNode && (rayBoxOverlap != 0))
539 nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex);
542 //PCK: unsigned instead of bool
543 if ((rayBoxOverlap != 0) || isLeafNode)
545 rootNode++;
546 curIndex++;
547 } else
549 escapeIndex = rootNode->m_escapeIndex;
550 rootNode += escapeIndex;
551 curIndex += escapeIndex;
554 if (maxIterations < walkIterations)
555 maxIterations = walkIterations;
561 void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const
563 btAssert(m_useQuantization);
565 int curIndex = startNodeIndex;
566 int walkIterations = 0;
567 int subTreeSize = endNodeIndex - startNodeIndex;
568 (void)subTreeSize;
570 const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
571 int escapeIndex;
573 bool isLeafNode;
574 //PCK: unsigned instead of bool
575 unsigned boxBoxOverlap = 0;
576 unsigned rayBoxOverlap = 0;
578 btScalar lambda_max = 1.0;
580 #ifdef RAYAABB2
581 btVector3 rayDirection = (rayTarget-raySource);
582 rayDirection.normalize ();
583 lambda_max = rayDirection.dot(rayTarget-raySource);
584 ///what about division by zero? --> just set rayDirection[i] to 1.0
585 rayDirection[0] = rayDirection[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[0];
586 rayDirection[1] = rayDirection[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[1];
587 rayDirection[2] = rayDirection[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[2];
588 unsigned int sign[3] = { rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0};
589 #endif
591 /* Quick pruning by quantized box */
592 btVector3 rayAabbMin = raySource;
593 btVector3 rayAabbMax = raySource;
594 rayAabbMin.setMin(rayTarget);
595 rayAabbMax.setMax(rayTarget);
597 /* Add box cast extents to bounding box */
598 rayAabbMin += aabbMin;
599 rayAabbMax += aabbMax;
601 unsigned short int quantizedQueryAabbMin[3];
602 unsigned short int quantizedQueryAabbMax[3];
603 quantizeWithClamp(quantizedQueryAabbMin,rayAabbMin,0);
604 quantizeWithClamp(quantizedQueryAabbMax,rayAabbMax,1);
606 while (curIndex < endNodeIndex)
609 //#define VISUALLY_ANALYZE_BVH 1
610 #ifdef VISUALLY_ANALYZE_BVH
611 //some code snippet to debugDraw aabb, to visually analyze bvh structure
612 static int drawPatch = 0;
613 //need some global access to a debugDrawer
614 extern btIDebugDraw* debugDrawerPtr;
615 if (curIndex==drawPatch)
617 btVector3 aabbMin,aabbMax;
618 aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
619 aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
620 btVector3 color(1,0,0);
621 debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
623 #endif//VISUALLY_ANALYZE_BVH
625 //catch bugs in tree data
626 btAssert (walkIterations < subTreeSize);
628 walkIterations++;
629 //PCK: unsigned instead of bool
630 // only interested if this is closer than any previous hit
631 btScalar param = 1.0;
632 rayBoxOverlap = 0;
633 boxBoxOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
634 isLeafNode = rootNode->isLeafNode();
635 if (boxBoxOverlap)
637 btVector3 bounds[2];
638 bounds[0] = unQuantize(rootNode->m_quantizedAabbMin);
639 bounds[1] = unQuantize(rootNode->m_quantizedAabbMax);
640 /* Add box cast extents */
641 bounds[0] -= aabbMax;
642 bounds[1] -= aabbMin;
643 btVector3 normal;
644 #if 0
645 bool ra2 = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0, lambda_max);
646 bool ra = btRayAabb (raySource, rayTarget, bounds[0], bounds[1], param, normal);
647 if (ra2 != ra)
649 printf("functions don't match\n");
651 #endif
652 #ifdef RAYAABB2
653 ///careful with this check: need to check division by zero (above) and fix the unQuantize method
654 ///thanks Joerg/hiker for the reproduction case!
655 ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
657 //BT_PROFILE("btRayAabb2");
658 rayBoxOverlap = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max);
660 #else
661 rayBoxOverlap = true;//btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
662 #endif
665 if (isLeafNode && rayBoxOverlap)
667 nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
670 //PCK: unsigned instead of bool
671 if ((rayBoxOverlap != 0) || isLeafNode)
673 rootNode++;
674 curIndex++;
675 } else
677 escapeIndex = rootNode->getEscapeIndex();
678 rootNode += escapeIndex;
679 curIndex += escapeIndex;
682 if (maxIterations < walkIterations)
683 maxIterations = walkIterations;
687 void btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const
689 btAssert(m_useQuantization);
691 int curIndex = startNodeIndex;
692 int walkIterations = 0;
693 int subTreeSize = endNodeIndex - startNodeIndex;
694 (void)subTreeSize;
696 const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
697 int escapeIndex;
699 bool isLeafNode;
700 //PCK: unsigned instead of bool
701 unsigned aabbOverlap;
703 while (curIndex < endNodeIndex)
706 //#define VISUALLY_ANALYZE_BVH 1
707 #ifdef VISUALLY_ANALYZE_BVH
708 //some code snippet to debugDraw aabb, to visually analyze bvh structure
709 static int drawPatch = 0;
710 //need some global access to a debugDrawer
711 extern btIDebugDraw* debugDrawerPtr;
712 if (curIndex==drawPatch)
714 btVector3 aabbMin,aabbMax;
715 aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
716 aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
717 btVector3 color(1,0,0);
718 debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
720 #endif//VISUALLY_ANALYZE_BVH
722 //catch bugs in tree data
723 btAssert (walkIterations < subTreeSize);
725 walkIterations++;
726 //PCK: unsigned instead of bool
727 aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
728 isLeafNode = rootNode->isLeafNode();
730 if (isLeafNode && aabbOverlap)
732 nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
735 //PCK: unsigned instead of bool
736 if ((aabbOverlap != 0) || isLeafNode)
738 rootNode++;
739 curIndex++;
740 } else
742 escapeIndex = rootNode->getEscapeIndex();
743 rootNode += escapeIndex;
744 curIndex += escapeIndex;
747 if (maxIterations < walkIterations)
748 maxIterations = walkIterations;
752 //This traversal can be called from Playstation 3 SPU
753 void btQuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
755 btAssert(m_useQuantization);
757 int i;
760 for (i=0;i<this->m_SubtreeHeaders.size();i++)
762 const btBvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
764 //PCK: unsigned instead of bool
765 unsigned overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,subtree.m_quantizedAabbMin,subtree.m_quantizedAabbMax);
766 if (overlap != 0)
768 walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,
769 subtree.m_rootNodeIndex,
770 subtree.m_rootNodeIndex+subtree.m_subtreeSize);
776 void btQuantizedBvh::reportRayOverlappingNodex (btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const
778 reportBoxCastOverlappingNodex(nodeCallback,raySource,rayTarget,btVector3(0,0,0),btVector3(0,0,0));
782 void btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin,const btVector3& aabbMax) const
784 //always use stackless
786 if (m_useQuantization)
788 walkStacklessQuantizedTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
790 else
792 walkStacklessTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
796 //recursive traversal
797 btVector3 qaabbMin = raySource;
798 btVector3 qaabbMax = raySource;
799 qaabbMin.setMin(rayTarget);
800 qaabbMax.setMax(rayTarget);
801 qaabbMin += aabbMin;
802 qaabbMax += aabbMax;
803 reportAabbOverlappingNodex(nodeCallback,qaabbMin,qaabbMax);
810 void btQuantizedBvh::swapLeafNodes(int i,int splitIndex)
812 if (m_useQuantization)
814 btQuantizedBvhNode tmp = m_quantizedLeafNodes[i];
815 m_quantizedLeafNodes[i] = m_quantizedLeafNodes[splitIndex];
816 m_quantizedLeafNodes[splitIndex] = tmp;
817 } else
819 btOptimizedBvhNode tmp = m_leafNodes[i];
820 m_leafNodes[i] = m_leafNodes[splitIndex];
821 m_leafNodes[splitIndex] = tmp;
825 void btQuantizedBvh::assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex)
827 if (m_useQuantization)
829 m_quantizedContiguousNodes[internalNode] = m_quantizedLeafNodes[leafNodeIndex];
830 } else
832 m_contiguousNodes[internalNode] = m_leafNodes[leafNodeIndex];
836 //PCK: include
837 #include <new>
839 #if 0
840 //PCK: consts
841 static const unsigned BVH_ALIGNMENT = 16;
842 static const unsigned BVH_ALIGNMENT_MASK = BVH_ALIGNMENT-1;
844 static const unsigned BVH_ALIGNMENT_BLOCKS = 2;
845 #endif
848 unsigned int btQuantizedBvh::getAlignmentSerializationPadding()
850 // I changed this to 0 since the extra padding is not needed or used.
851 return 0;//BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT;
854 unsigned btQuantizedBvh::calculateSerializeBufferSize() const
856 unsigned baseSize = sizeof(btQuantizedBvh) + getAlignmentSerializationPadding();
857 baseSize += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
858 if (m_useQuantization)
860 return baseSize + m_curNodeIndex * sizeof(btQuantizedBvhNode);
862 return baseSize + m_curNodeIndex * sizeof(btOptimizedBvhNode);
865 bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian) const
867 btAssert(m_subtreeHeaderCount == m_SubtreeHeaders.size());
868 m_subtreeHeaderCount = m_SubtreeHeaders.size();
870 /* if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
872 ///check alignedment for buffer?
873 btAssert(0);
874 return false;
878 btQuantizedBvh *targetBvh = (btQuantizedBvh *)o_alignedDataBuffer;
880 // construct the class so the virtual function table, etc will be set up
881 // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
882 new (targetBvh) btQuantizedBvh;
884 if (i_swapEndian)
886 targetBvh->m_curNodeIndex = static_cast<int>(btSwapEndian(m_curNodeIndex));
889 btSwapVector3Endian(m_bvhAabbMin,targetBvh->m_bvhAabbMin);
890 btSwapVector3Endian(m_bvhAabbMax,targetBvh->m_bvhAabbMax);
891 btSwapVector3Endian(m_bvhQuantization,targetBvh->m_bvhQuantization);
893 targetBvh->m_traversalMode = (btTraversalMode)btSwapEndian(m_traversalMode);
894 targetBvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(m_subtreeHeaderCount));
896 else
898 targetBvh->m_curNodeIndex = m_curNodeIndex;
899 targetBvh->m_bvhAabbMin = m_bvhAabbMin;
900 targetBvh->m_bvhAabbMax = m_bvhAabbMax;
901 targetBvh->m_bvhQuantization = m_bvhQuantization;
902 targetBvh->m_traversalMode = m_traversalMode;
903 targetBvh->m_subtreeHeaderCount = m_subtreeHeaderCount;
906 targetBvh->m_useQuantization = m_useQuantization;
908 unsigned char *nodeData = (unsigned char *)targetBvh;
909 nodeData += sizeof(btQuantizedBvh);
911 unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
912 nodeData += sizeToAdd;
914 int nodeCount = m_curNodeIndex;
916 if (m_useQuantization)
918 targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
920 if (i_swapEndian)
922 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
924 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
925 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
926 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
928 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
929 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
930 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
932 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
935 else
937 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
940 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0];
941 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1];
942 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2];
944 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0];
945 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1];
946 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2];
948 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex;
953 nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
955 // this clears the pointer in the member variable it doesn't really do anything to the data
956 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
957 // so the memory (which is not freed) is left alone
958 targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(NULL, 0, 0);
960 else
962 targetBvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
964 if (i_swapEndian)
966 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
968 btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMinOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
969 btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMaxOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
971 targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_escapeIndex));
972 targetBvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_subPart));
973 targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_triangleIndex));
976 else
978 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
980 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg = m_contiguousNodes[nodeIndex].m_aabbMinOrg;
981 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg = m_contiguousNodes[nodeIndex].m_aabbMaxOrg;
983 targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = m_contiguousNodes[nodeIndex].m_escapeIndex;
984 targetBvh->m_contiguousNodes[nodeIndex].m_subPart = m_contiguousNodes[nodeIndex].m_subPart;
985 targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = m_contiguousNodes[nodeIndex].m_triangleIndex;
988 nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
990 // this clears the pointer in the member variable it doesn't really do anything to the data
991 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
992 // so the memory (which is not freed) is left alone
993 targetBvh->m_contiguousNodes.initializeFromBuffer(NULL, 0, 0);
996 sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
997 nodeData += sizeToAdd;
999 // Now serialize the subtree headers
1000 targetBvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, m_subtreeHeaderCount, m_subtreeHeaderCount);
1001 if (i_swapEndian)
1003 for (int i = 0; i < m_subtreeHeaderCount; i++)
1005 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1006 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1007 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1009 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1010 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1011 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1013 targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_rootNodeIndex));
1014 targetBvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_subtreeSize));
1017 else
1019 for (int i = 0; i < m_subtreeHeaderCount; i++)
1021 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = (m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1022 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = (m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1023 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = (m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1025 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = (m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1026 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = (m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1027 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = (m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1029 targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = (m_SubtreeHeaders[i].m_rootNodeIndex);
1030 targetBvh->m_SubtreeHeaders[i].m_subtreeSize = (m_SubtreeHeaders[i].m_subtreeSize);
1032 // need to clear padding in destination buffer
1033 targetBvh->m_SubtreeHeaders[i].m_padding[0] = 0;
1034 targetBvh->m_SubtreeHeaders[i].m_padding[1] = 0;
1035 targetBvh->m_SubtreeHeaders[i].m_padding[2] = 0;
1038 nodeData += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
1040 // this clears the pointer in the member variable it doesn't really do anything to the data
1041 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
1042 // so the memory (which is not freed) is left alone
1043 targetBvh->m_SubtreeHeaders.initializeFromBuffer(NULL, 0, 0);
1045 // this wipes the virtual function table pointer at the start of the buffer for the class
1046 *((void**)o_alignedDataBuffer) = NULL;
1048 return true;
1051 btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
1054 if (i_alignedDataBuffer == NULL)// || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
1056 return NULL;
1058 btQuantizedBvh *bvh = (btQuantizedBvh *)i_alignedDataBuffer;
1060 if (i_swapEndian)
1062 bvh->m_curNodeIndex = static_cast<int>(btSwapEndian(bvh->m_curNodeIndex));
1064 btUnSwapVector3Endian(bvh->m_bvhAabbMin);
1065 btUnSwapVector3Endian(bvh->m_bvhAabbMax);
1066 btUnSwapVector3Endian(bvh->m_bvhQuantization);
1068 bvh->m_traversalMode = (btTraversalMode)btSwapEndian(bvh->m_traversalMode);
1069 bvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(bvh->m_subtreeHeaderCount));
1072 unsigned int calculatedBufSize = bvh->calculateSerializeBufferSize();
1073 btAssert(calculatedBufSize <= i_dataBufferSize);
1075 if (calculatedBufSize > i_dataBufferSize)
1077 return NULL;
1080 unsigned char *nodeData = (unsigned char *)bvh;
1081 nodeData += sizeof(btQuantizedBvh);
1083 unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1084 nodeData += sizeToAdd;
1086 int nodeCount = bvh->m_curNodeIndex;
1088 // Must call placement new to fill in virtual function table, etc, but we don't want to overwrite most data, so call a special version of the constructor
1089 // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
1090 new (bvh) btQuantizedBvh(*bvh, false);
1092 if (bvh->m_useQuantization)
1094 bvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1096 if (i_swapEndian)
1098 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1100 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
1101 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
1102 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
1104 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
1105 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
1106 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
1108 bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
1111 nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
1113 else
1115 bvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1117 if (i_swapEndian)
1119 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1121 btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
1122 btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
1124 bvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_escapeIndex));
1125 bvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_subPart));
1126 bvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_triangleIndex));
1129 nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
1132 sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1133 nodeData += sizeToAdd;
1135 // Now serialize the subtree headers
1136 bvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, bvh->m_subtreeHeaderCount, bvh->m_subtreeHeaderCount);
1137 if (i_swapEndian)
1139 for (int i = 0; i < bvh->m_subtreeHeaderCount; i++)
1141 bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1142 bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1143 bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1145 bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1146 bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1147 bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1149 bvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_rootNodeIndex));
1150 bvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_subtreeSize));
1154 return bvh;
1157 // Constructor that prevents btVector3's default constructor from being called
1158 btQuantizedBvh::btQuantizedBvh(btQuantizedBvh &self, bool /* ownsMemory */) :
1159 m_bvhAabbMin(self.m_bvhAabbMin),
1160 m_bvhAabbMax(self.m_bvhAabbMax),
1161 m_bvhQuantization(self.m_bvhQuantization),
1162 m_bulletVersion(BT_BULLET_VERSION)
1167 void btQuantizedBvh::deSerializeFloat(struct btQuantizedBvhFloatData& quantizedBvhFloatData)
1169 m_bvhAabbMax.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMax);
1170 m_bvhAabbMin.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMin);
1171 m_bvhQuantization.deSerializeFloat(quantizedBvhFloatData.m_bvhQuantization);
1173 m_curNodeIndex = quantizedBvhFloatData.m_curNodeIndex;
1174 m_useQuantization = quantizedBvhFloatData.m_useQuantization!=0;
1177 int numElem = quantizedBvhFloatData.m_numContiguousLeafNodes;
1178 m_contiguousNodes.resize(numElem);
1180 if (numElem)
1182 btOptimizedBvhNodeFloatData* memPtr = quantizedBvhFloatData.m_contiguousNodesPtr;
1184 for (int i=0;i<numElem;i++,memPtr++)
1186 m_contiguousNodes[i].m_aabbMaxOrg.deSerializeFloat(memPtr->m_aabbMaxOrg);
1187 m_contiguousNodes[i].m_aabbMinOrg.deSerializeFloat(memPtr->m_aabbMinOrg);
1188 m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1189 m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1190 m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1196 int numElem = quantizedBvhFloatData.m_numQuantizedContiguousNodes;
1197 m_quantizedContiguousNodes.resize(numElem);
1199 if (numElem)
1201 btQuantizedBvhNodeData* memPtr = quantizedBvhFloatData.m_quantizedContiguousNodesPtr;
1202 for (int i=0;i<numElem;i++,memPtr++)
1204 m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1205 m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1206 m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1207 m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1208 m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1209 m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1210 m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1215 m_traversalMode = btTraversalMode(quantizedBvhFloatData.m_traversalMode);
1218 int numElem = quantizedBvhFloatData.m_numSubtreeHeaders;
1219 m_SubtreeHeaders.resize(numElem);
1220 if (numElem)
1222 btBvhSubtreeInfoData* memPtr = quantizedBvhFloatData.m_subTreeInfoPtr;
1223 for (int i=0;i<numElem;i++,memPtr++)
1225 m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0] ;
1226 m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1227 m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1228 m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1229 m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1230 m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1231 m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1232 m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1238 void btQuantizedBvh::deSerializeDouble(struct btQuantizedBvhDoubleData& quantizedBvhDoubleData)
1240 m_bvhAabbMax.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMax);
1241 m_bvhAabbMin.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMin);
1242 m_bvhQuantization.deSerializeDouble(quantizedBvhDoubleData.m_bvhQuantization);
1244 m_curNodeIndex = quantizedBvhDoubleData.m_curNodeIndex;
1245 m_useQuantization = quantizedBvhDoubleData.m_useQuantization!=0;
1248 int numElem = quantizedBvhDoubleData.m_numContiguousLeafNodes;
1249 m_contiguousNodes.resize(numElem);
1251 if (numElem)
1253 btOptimizedBvhNodeDoubleData* memPtr = quantizedBvhDoubleData.m_contiguousNodesPtr;
1255 for (int i=0;i<numElem;i++,memPtr++)
1257 m_contiguousNodes[i].m_aabbMaxOrg.deSerializeDouble(memPtr->m_aabbMaxOrg);
1258 m_contiguousNodes[i].m_aabbMinOrg.deSerializeDouble(memPtr->m_aabbMinOrg);
1259 m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1260 m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1261 m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1267 int numElem = quantizedBvhDoubleData.m_numQuantizedContiguousNodes;
1268 m_quantizedContiguousNodes.resize(numElem);
1270 if (numElem)
1272 btQuantizedBvhNodeData* memPtr = quantizedBvhDoubleData.m_quantizedContiguousNodesPtr;
1273 for (int i=0;i<numElem;i++,memPtr++)
1275 m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1276 m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1277 m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1278 m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1279 m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1280 m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1281 m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1286 m_traversalMode = btTraversalMode(quantizedBvhDoubleData.m_traversalMode);
1289 int numElem = quantizedBvhDoubleData.m_numSubtreeHeaders;
1290 m_SubtreeHeaders.resize(numElem);
1291 if (numElem)
1293 btBvhSubtreeInfoData* memPtr = quantizedBvhDoubleData.m_subTreeInfoPtr;
1294 for (int i=0;i<numElem;i++,memPtr++)
1296 m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0] ;
1297 m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1298 m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1299 m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1300 m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1301 m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1302 m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1303 m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1312 ///fills the dataBuffer and returns the struct name (and 0 on failure)
1313 const char* btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer) const
1315 btQuantizedBvhData* quantizedData = (btQuantizedBvhData*)dataBuffer;
1317 m_bvhAabbMax.serialize(quantizedData->m_bvhAabbMax);
1318 m_bvhAabbMin.serialize(quantizedData->m_bvhAabbMin);
1319 m_bvhQuantization.serialize(quantizedData->m_bvhQuantization);
1321 quantizedData->m_curNodeIndex = m_curNodeIndex;
1322 quantizedData->m_useQuantization = m_useQuantization;
1324 quantizedData->m_numContiguousLeafNodes = m_contiguousNodes.size();
1325 quantizedData->m_contiguousNodesPtr = (btOptimizedBvhNodeData*) (m_contiguousNodes.size() ? serializer->getUniquePointer((void*)&m_contiguousNodes[0]) : 0);
1326 if (quantizedData->m_contiguousNodesPtr)
1328 int sz = sizeof(btOptimizedBvhNodeData);
1329 int numElem = m_contiguousNodes.size();
1330 btChunk* chunk = serializer->allocate(sz,numElem);
1331 btOptimizedBvhNodeData* memPtr = (btOptimizedBvhNodeData*)chunk->m_oldPtr;
1332 for (int i=0;i<numElem;i++,memPtr++)
1334 m_contiguousNodes[i].m_aabbMaxOrg.serialize(memPtr->m_aabbMaxOrg);
1335 m_contiguousNodes[i].m_aabbMinOrg.serialize(memPtr->m_aabbMinOrg);
1336 memPtr->m_escapeIndex = m_contiguousNodes[i].m_escapeIndex;
1337 memPtr->m_subPart = m_contiguousNodes[i].m_subPart;
1338 memPtr->m_triangleIndex = m_contiguousNodes[i].m_triangleIndex;
1340 serializer->finalizeChunk(chunk,"btOptimizedBvhNodeData",BT_ARRAY_CODE,(void*)&m_contiguousNodes[0]);
1343 quantizedData->m_numQuantizedContiguousNodes = m_quantizedContiguousNodes.size();
1344 // printf("quantizedData->m_numQuantizedContiguousNodes=%d\n",quantizedData->m_numQuantizedContiguousNodes);
1345 quantizedData->m_quantizedContiguousNodesPtr =(btQuantizedBvhNodeData*) (m_quantizedContiguousNodes.size() ? serializer->getUniquePointer((void*)&m_quantizedContiguousNodes[0]) : 0);
1346 if (quantizedData->m_quantizedContiguousNodesPtr)
1348 int sz = sizeof(btQuantizedBvhNodeData);
1349 int numElem = m_quantizedContiguousNodes.size();
1350 btChunk* chunk = serializer->allocate(sz,numElem);
1351 btQuantizedBvhNodeData* memPtr = (btQuantizedBvhNodeData*)chunk->m_oldPtr;
1352 for (int i=0;i<numElem;i++,memPtr++)
1354 memPtr->m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex;
1355 memPtr->m_quantizedAabbMax[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[0];
1356 memPtr->m_quantizedAabbMax[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[1];
1357 memPtr->m_quantizedAabbMax[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[2];
1358 memPtr->m_quantizedAabbMin[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[0];
1359 memPtr->m_quantizedAabbMin[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[1];
1360 memPtr->m_quantizedAabbMin[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[2];
1362 serializer->finalizeChunk(chunk,"btQuantizedBvhNodeData",BT_ARRAY_CODE,(void*)&m_quantizedContiguousNodes[0]);
1365 quantizedData->m_traversalMode = int(m_traversalMode);
1366 quantizedData->m_numSubtreeHeaders = m_SubtreeHeaders.size();
1368 quantizedData->m_subTreeInfoPtr = (btBvhSubtreeInfoData*) (m_SubtreeHeaders.size() ? serializer->getUniquePointer((void*)&m_SubtreeHeaders[0]) : 0);
1369 if (quantizedData->m_subTreeInfoPtr)
1371 int sz = sizeof(btBvhSubtreeInfoData);
1372 int numElem = m_SubtreeHeaders.size();
1373 btChunk* chunk = serializer->allocate(sz,numElem);
1374 btBvhSubtreeInfoData* memPtr = (btBvhSubtreeInfoData*)chunk->m_oldPtr;
1375 for (int i=0;i<numElem;i++,memPtr++)
1377 memPtr->m_quantizedAabbMax[0] = m_SubtreeHeaders[i].m_quantizedAabbMax[0];
1378 memPtr->m_quantizedAabbMax[1] = m_SubtreeHeaders[i].m_quantizedAabbMax[1];
1379 memPtr->m_quantizedAabbMax[2] = m_SubtreeHeaders[i].m_quantizedAabbMax[2];
1380 memPtr->m_quantizedAabbMin[0] = m_SubtreeHeaders[i].m_quantizedAabbMin[0];
1381 memPtr->m_quantizedAabbMin[1] = m_SubtreeHeaders[i].m_quantizedAabbMin[1];
1382 memPtr->m_quantizedAabbMin[2] = m_SubtreeHeaders[i].m_quantizedAabbMin[2];
1384 memPtr->m_rootNodeIndex = m_SubtreeHeaders[i].m_rootNodeIndex;
1385 memPtr->m_subtreeSize = m_SubtreeHeaders[i].m_subtreeSize;
1387 serializer->finalizeChunk(chunk,"btBvhSubtreeInfoData",BT_ARRAY_CODE,(void*)&m_SubtreeHeaders[0]);
1389 return btQuantizedBvhDataName;