Fix #100973: Node Wrangler: previewing node if hierarchy not active
[blender-addons.git] / io_scene_fbx / fbx_utils.py
blob1b8a08ca312f523167079bf9705701631b6dc085
1 # SPDX-FileCopyrightText: 2013 Campbell Barton
2 # SPDX-FileCopyrightText: 2014 Bastien Montagne
4 # SPDX-License-Identifier: GPL-2.0-or-later
6 import math
7 import time
9 from collections import namedtuple
10 from collections.abc import Iterable
11 from itertools import zip_longest, chain
12 from dataclasses import dataclass, field
13 from typing import Callable
14 import numpy as np
16 import bpy
17 import bpy_extras
18 from bpy.types import Object, Bone, PoseBone, DepsgraphObjectInstance
19 from mathutils import Vector, Matrix
21 from . import encode_bin, data_types
24 # "Constants"
25 FBX_VERSION = 7400
26 # 1004 adds use of "OtherFlags"->"TCDefinition" to control the FBX_KTIME opt-in in FBX version 7700.
27 FBX_HEADER_VERSION = 1003
28 FBX_SCENEINFO_VERSION = 100
29 FBX_TEMPLATES_VERSION = 100
31 FBX_MODELS_VERSION = 232
33 FBX_GEOMETRY_VERSION = 124
34 # Revert back normals to 101 (simple 3D values) for now, 102 (4D + weights) seems not well supported by most apps
35 # currently, apart from some AD products.
36 FBX_GEOMETRY_NORMAL_VERSION = 101
37 FBX_GEOMETRY_BINORMAL_VERSION = 101
38 FBX_GEOMETRY_TANGENT_VERSION = 101
39 FBX_GEOMETRY_SMOOTHING_VERSION = 102
40 FBX_GEOMETRY_CREASE_VERSION = 101
41 FBX_GEOMETRY_VCOLOR_VERSION = 101
42 FBX_GEOMETRY_UV_VERSION = 101
43 FBX_GEOMETRY_MATERIAL_VERSION = 101
44 FBX_GEOMETRY_LAYER_VERSION = 100
45 FBX_GEOMETRY_SHAPE_VERSION = 100
46 FBX_DEFORMER_SHAPE_VERSION = 100
47 FBX_DEFORMER_SHAPECHANNEL_VERSION = 100
48 FBX_POSE_BIND_VERSION = 100
49 FBX_DEFORMER_SKIN_VERSION = 101
50 FBX_DEFORMER_CLUSTER_VERSION = 100
51 FBX_MATERIAL_VERSION = 102
52 FBX_TEXTURE_VERSION = 202
53 FBX_ANIM_KEY_VERSION = 4008
55 FBX_NAME_CLASS_SEP = b"\x00\x01"
56 FBX_ANIM_PROPSGROUP_NAME = "d"
58 FBX_KTIME_V7 = 46186158000 # This is the number of "ktimes" in one second (yep, precision over the nanosecond...)
59 # FBX 2019.5 (FBX version 7700) changed the number of "ktimes" per second, however, the new value is opt-in until FBX
60 # version 8000 where it will probably become opt-out.
61 FBX_KTIME_V8 = 141120000
62 # To explicitly use the V7 value in FBX versions 7700-7XXX: fbx_root->"FBXHeaderExtension"->"OtherFlags"->"TCDefinition"
63 # is set to 127.
64 # To opt in to the V8 value in FBX version 7700-7XXX: "TCDefinition" is set to 0.
65 FBX_TIMECODE_DEFINITION_TO_KTIME_PER_SECOND = {
66 0: FBX_KTIME_V8,
67 127: FBX_KTIME_V7,
69 # The "ktimes" per second for Blender exported FBX is constant because the exported `FBX_VERSION` is constant.
70 FBX_KTIME = FBX_KTIME_V8 if FBX_VERSION >= 8000 else FBX_KTIME_V7
73 MAT_CONVERT_LIGHT = Matrix.Rotation(math.pi / 2.0, 4, 'X') # Blender is -Z, FBX is -Y.
74 MAT_CONVERT_CAMERA = Matrix.Rotation(math.pi / 2.0, 4, 'Y') # Blender is -Z, FBX is +X.
75 # XXX I can't get this working :(
76 # MAT_CONVERT_BONE = Matrix.Rotation(math.pi / 2.0, 4, 'Z') # Blender is +Y, FBX is -X.
77 MAT_CONVERT_BONE = Matrix()
80 BLENDER_OTHER_OBJECT_TYPES = {'CURVE', 'SURFACE', 'FONT', 'META'}
81 BLENDER_OBJECT_TYPES_MESHLIKE = {'MESH'} | BLENDER_OTHER_OBJECT_TYPES
83 SHAPE_KEY_SLIDER_HARD_MIN = bpy.types.ShapeKey.bl_rna.properties["slider_min"].hard_min
84 SHAPE_KEY_SLIDER_HARD_MAX = bpy.types.ShapeKey.bl_rna.properties["slider_max"].hard_max
87 # Lamps.
88 FBX_LIGHT_TYPES = {
89 'POINT': 0, # Point.
90 'SUN': 1, # Directional.
91 'SPOT': 2, # Spot.
92 'HEMI': 1, # Directional.
93 'AREA': 3, # Area.
95 FBX_LIGHT_DECAY_TYPES = {
96 'CONSTANT': 0, # None.
97 'INVERSE_LINEAR': 1, # Linear.
98 'INVERSE_SQUARE': 2, # Quadratic.
99 'INVERSE_COEFFICIENTS': 2, # Quadratic...
100 'CUSTOM_CURVE': 2, # Quadratic.
101 'LINEAR_QUADRATIC_WEIGHTED': 2, # Quadratic.
105 RIGHT_HAND_AXES = {
106 # Up, Forward -> FBX values (tuples of (axis, sign), Up, Front, Coord).
107 ( 'X', '-Y'): ((0, 1), (1, 1), (2, 1)),
108 ( 'X', 'Y'): ((0, 1), (1, -1), (2, -1)),
109 ( 'X', '-Z'): ((0, 1), (2, 1), (1, -1)),
110 ( 'X', 'Z'): ((0, 1), (2, -1), (1, 1)),
111 ('-X', '-Y'): ((0, -1), (1, 1), (2, -1)),
112 ('-X', 'Y'): ((0, -1), (1, -1), (2, 1)),
113 ('-X', '-Z'): ((0, -1), (2, 1), (1, 1)),
114 ('-X', 'Z'): ((0, -1), (2, -1), (1, -1)),
115 ( 'Y', '-X'): ((1, 1), (0, 1), (2, -1)),
116 ( 'Y', 'X'): ((1, 1), (0, -1), (2, 1)),
117 ( 'Y', '-Z'): ((1, 1), (2, 1), (0, 1)),
118 ( 'Y', 'Z'): ((1, 1), (2, -1), (0, -1)),
119 ('-Y', '-X'): ((1, -1), (0, 1), (2, 1)),
120 ('-Y', 'X'): ((1, -1), (0, -1), (2, -1)),
121 ('-Y', '-Z'): ((1, -1), (2, 1), (0, -1)),
122 ('-Y', 'Z'): ((1, -1), (2, -1), (0, 1)),
123 ( 'Z', '-X'): ((2, 1), (0, 1), (1, 1)),
124 ( 'Z', 'X'): ((2, 1), (0, -1), (1, -1)),
125 ( 'Z', '-Y'): ((2, 1), (1, 1), (0, -1)),
126 ( 'Z', 'Y'): ((2, 1), (1, -1), (0, 1)), # Blender system!
127 ('-Z', '-X'): ((2, -1), (0, 1), (1, -1)),
128 ('-Z', 'X'): ((2, -1), (0, -1), (1, 1)),
129 ('-Z', '-Y'): ((2, -1), (1, 1), (0, 1)),
130 ('-Z', 'Y'): ((2, -1), (1, -1), (0, -1)),
134 # NOTE: Not fully in enum value order, since when exporting the first entry matching the framerate value is used
135 # (e.g. better have NTSC fullframe than NTSC drop frame for 29.97 framerate).
136 FBX_FRAMERATES = (
137 #(-1.0, 0), # Default framerate.
138 (-1.0, 14), # Custom framerate.
139 (120.0, 1),
140 (100.0, 2),
141 (60.0, 3),
142 (50.0, 4),
143 (48.0, 5),
144 (30.0, 6), # BW NTSC, full frame.
145 (30.0, 7), # Drop frame.
146 (30.0 / 1.001, 9), # Color NTSC, full frame.
147 (30.0 / 1.001, 8), # Color NTSC, drop frame.
148 (25.0, 10),
149 (24.0, 11),
150 #(1.0, 12), # 1000 milli/s (use for date time?).
151 (24.0 / 1.001, 13),
152 (96.0, 15),
153 (72.0, 16),
154 (60.0 / 1.001, 17),
155 (120.0 / 1.001, 18),
159 # ##### Misc utilities #####
161 # Enable performance reports (measuring time used to perform various steps of importing or exporting).
162 DO_PERFMON = False
164 if DO_PERFMON:
165 class PerfMon():
166 def __init__(self):
167 self.level = -1
168 self.ref_time = []
170 def level_up(self, message=""):
171 self.level += 1
172 self.ref_time.append(None)
173 if message:
174 print("\t" * self.level, message, sep="")
176 def level_down(self, message=""):
177 if not self.ref_time:
178 if message:
179 print(message)
180 return
181 ref_time = self.ref_time[self.level]
182 print("\t" * self.level,
183 "\tDone (%f sec)\n" % ((time.process_time() - ref_time) if ref_time is not None else 0.0),
184 sep="")
185 if message:
186 print("\t" * self.level, message, sep="")
187 del self.ref_time[self.level]
188 self.level -= 1
190 def step(self, message=""):
191 ref_time = self.ref_time[self.level]
192 curr_time = time.process_time()
193 if ref_time is not None:
194 print("\t" * self.level, "\tDone (%f sec)\n" % (curr_time - ref_time), sep="")
195 self.ref_time[self.level] = curr_time
196 print("\t" * self.level, message, sep="")
197 else:
198 class PerfMon():
199 def __init__(self):
200 pass
202 def level_up(self, message=""):
203 pass
205 def level_down(self, message=""):
206 pass
208 def step(self, message=""):
209 pass
212 # Scale/unit mess. FBX can store the 'reference' unit of a file in its UnitScaleFactor property
213 # (1.0 meaning centimeter, afaik). We use that to reflect user's default unit as set in Blender with scale_length.
214 # However, we always get values in BU (i.e. meters), so we have to reverse-apply that scale in global matrix...
215 # Note that when no default unit is available, we assume 'meters' (and hence scale by 100).
216 def units_blender_to_fbx_factor(scene):
217 return 100.0 if (scene.unit_settings.system == 'NONE') else (100.0 * scene.unit_settings.scale_length)
220 # Note: this could be in a utility (math.units e.g.)...
222 UNITS = {
223 "meter": 1.0, # Ref unit!
224 "kilometer": 0.001,
225 "millimeter": 1000.0,
226 "foot": 1.0 / 0.3048,
227 "inch": 1.0 / 0.0254,
228 "turn": 1.0, # Ref unit!
229 "degree": 360.0,
230 "radian": math.pi * 2.0,
231 "second": 1.0, # Ref unit!
232 "ktime": FBX_KTIME, # For export use only because the imported "ktimes" per second may vary.
236 def units_convertor(u_from, u_to):
237 """Return a convertor between specified units."""
238 conv = UNITS[u_to] / UNITS[u_from]
239 return lambda v: v * conv
242 def units_convertor_iter(u_from, u_to):
243 """Return an iterable convertor between specified units."""
244 conv = units_convertor(u_from, u_to)
246 def convertor(it):
247 for v in it:
248 yield(conv(v))
250 return convertor
253 def matrix4_to_array(mat):
254 """Concatenate matrix's columns into a single, flat tuple"""
255 # blender matrix is row major, fbx is col major so transpose on write
256 return tuple(f for v in mat.transposed() for f in v)
259 def array_to_matrix4(arr):
260 """Convert a single 16-len tuple into a valid 4D Blender matrix"""
261 # Blender matrix is row major, fbx is col major so transpose on read
262 return Matrix(tuple(zip(*[iter(arr)]*4))).transposed()
265 def parray_as_ndarray(arr):
266 """Convert an array.array into an np.ndarray that shares the same memory"""
267 return np.frombuffer(arr, dtype=arr.typecode)
270 def similar_values(v1, v2, e=1e-6):
271 """Return True if v1 and v2 are nearly the same."""
272 if v1 == v2:
273 return True
274 return ((abs(v1 - v2) / max(abs(v1), abs(v2))) <= e)
277 def similar_values_iter(v1, v2, e=1e-6):
278 """Return True if iterables v1 and v2 are nearly the same."""
279 if v1 == v2:
280 return True
281 for v1, v2 in zip(v1, v2):
282 if (v1 != v2) and ((abs(v1 - v2) / max(abs(v1), abs(v2))) > e):
283 return False
284 return True
287 def shape_difference_exclude_similar(sv_cos, ref_cos, e=1e-6):
288 """Return a tuple of:
289 the difference between the vertex cos in sv_cos and ref_cos, excluding any that are nearly the same,
290 and the indices of the vertices that are not nearly the same"""
291 assert(sv_cos.size == ref_cos.size)
293 # Create views of 1 co per row of the arrays, only making copies if needed.
294 sv_cos = sv_cos.reshape(-1, 3)
295 ref_cos = ref_cos.reshape(-1, 3)
297 # Quick check for equality
298 if np.array_equal(sv_cos, ref_cos):
299 # There's no difference between the two arrays.
300 empty_cos = np.empty((0, 3), dtype=sv_cos.dtype)
301 empty_indices = np.empty(0, dtype=np.int32)
302 return empty_cos, empty_indices
304 # Note that unlike math.isclose(a,b), np.isclose(a,b) is not symmetrical and the second argument 'b', is
305 # considered to be the reference value.
306 # Note that atol=0 will mean that if only one co component being compared is zero, they won't be considered close.
307 similar_mask = np.isclose(sv_cos, ref_cos, atol=0, rtol=e)
309 # A co is only similar if every component in it is similar.
310 co_similar_mask = np.all(similar_mask, axis=1)
312 # Get the indices of cos that are not similar.
313 not_similar_verts_idx = np.flatnonzero(~co_similar_mask)
315 # Subtracting first over the entire arrays and then indexing seems faster than indexing both arrays first and then
316 # subtracting, until less than about 3% of the cos are being indexed.
317 difference_cos = (sv_cos - ref_cos)[not_similar_verts_idx]
318 return difference_cos, not_similar_verts_idx
321 def _mat4_vec3_array_multiply(mat4, vec3_array, dtype=None, return_4d=False):
322 """Multiply a 4d matrix by each 3d vector in an array and return as an array of either 3d or 4d vectors.
324 A view of the input array is returned if return_4d=False, the dtype matches the input array and either the matrix is
325 None or, ignoring the last row, is a 3x3 identity matrix with no translation:
326 ┌1, 0, 0, 0┐
327 │0, 1, 0, 0│
328 └0, 0, 1, 0┘
330 When dtype=None, it defaults to the dtype of the input array."""
331 return_dtype = dtype if dtype is not None else vec3_array.dtype
332 vec3_array = vec3_array.reshape(-1, 3)
334 # Multiplying a 4d mathutils.Matrix by a 3d mathutils.Vector implicitly extends the Vector to 4d during the
335 # calculation by appending 1.0 to the Vector and then the 4d result is truncated back to 3d.
336 # Numpy does not do an implicit extension to 4d, so it would have to be done explicitly by extending the entire
337 # vec3_array to 4d.
338 # However, since the w component of the vectors is always 1.0, the last column can be excluded from the
339 # multiplication and then added to every multiplied vector afterwards, which avoids having to make a 4d copy of
340 # vec3_array beforehand.
341 # For a single column vector:
342 # ┌a, b, c, d┐ ┌x┐ ┌ax+by+cz+d┐
343 # │e, f, g, h│ @ │y│ = │ex+fy+gz+h│
344 # │i, j, k, l│ │z│ │ix+jy+kz+l│
345 # └m, n, o, p┘ └1┘ └mx+ny+oz+p┘
346 # ┌a, b, c┐ ┌x┐ ┌d┐ ┌ax+by+cz┐ ┌d┐ ┌ax+by+cz+d┐
347 # │e, f, g│ @ │y│ + │h│ = │ex+fy+gz│ + │h│ = │ex+fy+gz+h│
348 # │i, j, k│ └z┘ │l│ │ix+jy+kz│ │l│ │ix+jy+kz+l│
349 # └m, n, o┘ └p┘ └mx+ny+oz┘ └p┘ └mx+ny+oz+p┘
351 # column_vector_multiplication in mathutils_Vector.c uses double precision math for Matrix @ Vector by casting the
352 # matrix's values to double precision and then casts back to single precision when returning the result, so at least
353 # double precision math is always be used to match standard Blender behaviour.
354 math_precision = np.result_type(np.double, vec3_array)
356 to_multiply = None
357 to_add = None
358 w_to_set = 1.0
359 if mat4 is not None:
360 mat_np = np.array(mat4, dtype=math_precision)
361 # Identity matrix is compared against to check if any matrix multiplication is required.
362 identity = np.identity(4, dtype=math_precision)
363 if not return_4d:
364 # If returning 3d, the entire last row of the matrix can be ignored because it only affects the w component.
365 mat_np = mat_np[:3]
366 identity = identity[:3]
368 # Split mat_np into the columns to multiply and the column to add afterwards.
369 # First 3 columns
370 multiply_columns = mat_np[:, :3]
371 multiply_identity = identity[:, :3]
372 # Last column only
373 add_column = mat_np.T[3]
375 # Analyze the split parts of the matrix to figure out if there is anything to multiply and anything to add.
376 if not np.array_equal(multiply_columns, multiply_identity):
377 to_multiply = multiply_columns
379 if return_4d and to_multiply is None:
380 # When there's nothing to multiply, the w component of add_column can be set directly into the array because
381 # mx+ny+oz+p becomes 0x+0y+0z+p where p is add_column[3].
382 w_to_set = add_column[3]
383 # Replace add_column with a view of only the translation.
384 add_column = add_column[:3]
386 if add_column.any():
387 to_add = add_column
389 if to_multiply is None:
390 # If there's anything to add, ensure it's added using the precision being used for math.
391 array_dtype = math_precision if to_add is not None else return_dtype
392 if return_4d:
393 multiplied_vectors = np.empty((len(vec3_array), 4), dtype=array_dtype)
394 multiplied_vectors[:, :3] = vec3_array
395 multiplied_vectors[:, 3] = w_to_set
396 else:
397 # If there's anything to add, ensure a copy is made so that the input vec3_array isn't modified.
398 multiplied_vectors = vec3_array.astype(array_dtype, copy=to_add is not None)
399 else:
400 # Matrix multiplication has the signature (n,k) @ (k,m) -> (n,m).
401 # Where v is the number of vectors in vec3_array and d is the number of vector dimensions to return:
402 # to_multiply has shape (d,3), vec3_array has shape (v,3) and the result should have shape (v,d).
403 # Either vec3_array or to_multiply must be transposed:
404 # Can transpose vec3_array and then transpose the result:
405 # (v,3).T -> (3,v); (d,3) @ (3,v) -> (d,v); (d,v).T -> (v,d)
406 # Or transpose to_multiply and swap the order of multiplication:
407 # (d,3).T -> (3,d); (v,3) @ (3,d) -> (v,d)
408 # There's no, or negligible, performance difference between the two options, however, the result of the latter
409 # will be C contiguous in memory, making it faster to convert to flattened bytes with .tobytes().
410 multiplied_vectors = vec3_array @ to_multiply.T
412 if to_add is not None:
413 for axis, to_add_to_axis in zip(multiplied_vectors.T, to_add):
414 if to_add_to_axis != 0:
415 axis += to_add_to_axis
417 # Cast to the desired return type before returning.
418 return multiplied_vectors.astype(return_dtype, copy=False)
421 def vcos_transformed(raw_cos, m=None, dtype=None):
422 return _mat4_vec3_array_multiply(m, raw_cos, dtype)
425 def nors_transformed(raw_nors, m=None, dtype=None):
426 # Great, now normals are also expected 4D!
427 # XXX Back to 3D normals for now!
428 # return _mat4_vec3_array_multiply(m, raw_nors, dtype, return_4d=True)
429 return _mat4_vec3_array_multiply(m, raw_nors, dtype)
432 def astype_view_signedness(arr, new_dtype):
433 """Unsafely views arr as new_dtype if the itemsize and byteorder of arr matches but the signedness does not.
435 Safely views arr as new_dtype if both arr and new_dtype have the same itemsize, byteorder and signedness, but could
436 have a different character code, e.g. 'i' and 'l'. np.ndarray.astype with copy=False does not normally create this
437 view, but Blender can be picky about the character code used, so this function will create the view.
439 Otherwise, calls np.ndarray.astype with copy=False.
441 The benefit of copy=False is that if the array can be safely viewed as the new type, then a view is made, instead of
442 a copy with the new type.
444 Unsigned types can't be viewed safely as signed or vice-versa, meaning that a copy would always be made by
445 .astype(..., copy=False).
447 This is intended for viewing uintc data (a common Blender C type with variable itemsize, though usually 4 bytes, so
448 uint32) as int32 (a common FBX type), when the itemsizes match."""
449 arr_dtype = arr.dtype
451 if not isinstance(new_dtype, np.dtype):
452 # new_dtype could be a type instance or a string, but it needs to be a dtype to compare its itemsize, byteorder
453 # and kind.
454 new_dtype = np.dtype(new_dtype)
456 # For simplicity, only dtypes of the same itemsize and byteorder, but opposite signedness, are handled. Everything
457 # else is left to .astype.
458 arr_kind = arr_dtype.kind
459 new_kind = new_dtype.kind
460 # Signed and unsigned int are opposite in terms of signedness. Other types don't have signedness.
461 integer_kinds = {'i', 'u'}
462 if (
463 arr_kind in integer_kinds and new_kind in integer_kinds
464 and arr_dtype.itemsize == new_dtype.itemsize
465 and arr_dtype.byteorder == new_dtype.byteorder
467 # arr and new_dtype have signedness and matching itemsize and byteorder, so return a view of the new type.
468 return arr.view(new_dtype)
469 else:
470 return arr.astype(new_dtype, copy=False)
473 def fast_first_axis_flat(ar):
474 """Get a flat view (or a copy if a view is not possible) of the input array whereby each element is a single element
475 of a dtype that is fast to sort, sorts according to individual bytes and contains the data for an entire row (and
476 any further dimensions) of the input array.
478 Since the dtype of the view could sort in a different order to the dtype of the input array, this isn't typically
479 useful for actual sorting, but it is useful for sorting-based uniqueness, such as np.unique."""
480 # If there are no rows, each element will be viewed as the new dtype.
481 elements_per_row = math.prod(ar.shape[1:])
482 row_itemsize = ar.itemsize * elements_per_row
484 # Get a dtype with itemsize that equals row_itemsize.
485 # Integer types sort the fastest, but are only available for specific itemsizes.
486 uint_dtypes_by_itemsize = {1: np.uint8, 2: np.uint16, 4: np.uint32, 8: np.uint64}
487 # Signed/unsigned makes no noticeable speed difference, but using unsigned will result in ordering according to
488 # individual bytes like the other, non-integer types.
489 if row_itemsize in uint_dtypes_by_itemsize:
490 entire_row_dtype = uint_dtypes_by_itemsize[row_itemsize]
491 else:
492 # When using kind='stable' sorting, numpy only uses radix sort with integer types, but it's still
493 # significantly faster to sort by a single item per row instead of multiple row elements or multiple structured
494 # type fields.
495 # Construct a flexible size dtype with matching itemsize.
496 # Should always be 4 because each character in a unicode string is UCS4.
497 str_itemsize = np.dtype((np.str_, 1)).itemsize
498 if row_itemsize % str_itemsize == 0:
499 # Unicode strings seem to be slightly faster to sort than bytes.
500 entire_row_dtype = np.dtype((np.str_, row_itemsize // str_itemsize))
501 else:
502 # Bytes seem to be slightly faster to sort than raw bytes (np.void).
503 entire_row_dtype = np.dtype((np.bytes_, row_itemsize))
505 # View each element along the first axis as a single element.
506 # View (or copy if a view is not possible) as flat
507 ar = ar.reshape(-1)
508 # To view as a dtype of different size, the last axis (entire array in NumPy 1.22 and earlier) must be C-contiguous.
509 if row_itemsize != ar.itemsize and not ar.flags.c_contiguous:
510 ar = np.ascontiguousarray(ar)
511 return ar.view(entire_row_dtype)
514 def fast_first_axis_unique(ar, return_unique=True, return_index=False, return_inverse=False, return_counts=False):
515 """np.unique with axis=0 but optimised for when the input array has multiple elements per row, and the returned
516 unique array doesn't need to be sorted.
518 Arrays with more than one element per row are more costly to sort in np.unique due to being compared one
519 row-element at a time, like comparing tuples.
521 By viewing each entire row as a single non-structured element, much faster sorting can be achieved. Since the values
522 are viewed as a different type to their original, this means that the returned array of unique values may not be
523 sorted according to their original type.
525 The array of unique values can be excluded from the returned tuple by specifying return_unique=False.
527 Float type caveats:
528 All elements of -0.0 in the input array will be replaced with 0.0 to ensure that both values are collapsed into one.
529 NaN values can have lots of different byte representations (e.g. signalling/quiet and custom payloads). Only the
530 duplicates of each unique byte representation will be collapsed into one."""
531 # At least something should always be returned.
532 assert(return_unique or return_index or return_inverse or return_counts)
533 # Only signed integer, unsigned integer and floating-point kinds of data are allowed. Other kinds of data have not
534 # been tested.
535 assert(ar.dtype.kind in "iuf")
537 # Floating-point types have different byte representations for -0.0 and 0.0. Collapse them together by replacing all
538 # -0.0 in the input array with 0.0.
539 if ar.dtype.kind == 'f':
540 ar[ar == -0.0] = 0.0
542 # It's a bit annoying that the unique array is always calculated even when it might not be needed, but it is
543 # generally insignificant compared to the cost of sorting.
544 result = np.unique(fast_first_axis_flat(ar), return_index=return_index,
545 return_inverse=return_inverse, return_counts=return_counts)
547 if return_unique:
548 unique = result[0] if isinstance(result, tuple) else result
549 # View in the original dtype.
550 unique = unique.view(ar.dtype)
551 # Return the same number of elements per row and any extra dimensions per row as the input array.
552 unique.shape = (-1, *ar.shape[1:])
553 if isinstance(result, tuple):
554 return (unique,) + result[1:]
555 else:
556 return unique
557 else:
558 # Remove the first element, the unique array.
559 result = result[1:]
560 if len(result) == 1:
561 # Unpack single element tuples.
562 return result[0]
563 else:
564 return result
567 def ensure_object_not_in_edit_mode(context, obj):
568 """Objects in Edit mode usually cannot be exported because much of the API used when exporting is not available for
569 Objects in Edit mode.
571 Exiting the currently active Object (and any other Objects opened in multi-editing) from Edit mode is simple and
572 should be done with `bpy.ops.mesh.mode_set(mode='OBJECT')` instead of using this function.
574 This function is for the rare case where an Object is in Edit mode, but the current context mode is not Edit mode.
575 This can occur from a state where the current context mode is Edit mode, but then the active Object of the current
576 View Layer is changed to a different Object that is not in Edit mode. This changes the current context mode, but
577 leaves the other Object(s) in Edit mode.
579 if obj.mode != 'EDIT':
580 return True
582 # Get the active View Layer.
583 view_layer = context.view_layer
585 # A View Layer belongs to a scene.
586 scene = view_layer.id_data
588 # Get the current active Object of this View Layer, so we can restore it once done.
589 orig_active = view_layer.objects.active
591 # Check if obj is in the View Layer. If obj is not in the View Layer, it cannot be set as the active Object.
592 # We don't use `obj.name in view_layer.objects` because an Object from a Library could have the same name.
593 is_in_view_layer = any(o == obj for o in view_layer.objects)
595 do_unlink_from_scene_collection = False
596 try:
597 if not is_in_view_layer:
598 # There might not be any enabled collections in the View Layer, so link obj into the Scene Collection
599 # instead, which is always available to all View Layers of that Scene.
600 scene.collection.objects.link(obj)
601 do_unlink_from_scene_collection = True
602 view_layer.objects.active = obj
604 # Now we're finally ready to attempt to change obj's mode.
605 if bpy.ops.object.mode_set.poll():
606 bpy.ops.object.mode_set(mode='OBJECT')
607 if obj.mode == 'EDIT':
608 # The Object could not be set out of EDIT mode and therefore cannot be exported.
609 return False
610 finally:
611 # Always restore the original active Object and unlink obj from the Scene Collection if it had to be linked.
612 view_layer.objects.active = orig_active
613 if do_unlink_from_scene_collection:
614 scene.collection.objects.unlink(obj)
616 return True
619 def expand_shape_key_range(shape_key, value_to_fit):
620 """Attempt to expand the slider_min/slider_max of a shape key to fit `value_to_fit` within the slider range,
621 expanding slightly beyond `value_to_fit` if possible, so that the new slider_min/slider_max is not the same as
622 `value_to_fit`. Blender has a hard minimum and maximum for slider values, so it may not be possible to fit the value
623 within the slider range.
625 If `value_to_fit` is already within the slider range, no changes are made.
627 First tries setting slider_min/slider_max to double `value_to_fit`, otherwise, expands the range in the direction of
628 `value_to_fit` by double the distance to `value_to_fit`.
630 The new slider_min/slider_max is rounded down/up to the nearest whole number for a more visually pleasing result.
632 Returns whether it was possible to expand the slider range to fit `value_to_fit`."""
633 if value_to_fit < (slider_min := shape_key.slider_min):
634 if value_to_fit < 0.0:
635 # For the most common case, set slider_min to double value_to_fit.
636 target_slider_min = value_to_fit * 2.0
637 else:
638 # Doubling value_to_fit would make it larger, so instead decrease slider_min by double the distance between
639 # slider_min and value_to_fit.
640 target_slider_min = slider_min - (slider_min - value_to_fit) * 2.0
641 # Set slider_min to the first whole number less than or equal to target_slider_min.
642 shape_key.slider_min = math.floor(target_slider_min)
644 return value_to_fit >= SHAPE_KEY_SLIDER_HARD_MIN
645 elif value_to_fit > (slider_max := shape_key.slider_max):
646 if value_to_fit > 0.0:
647 # For the most common case, set slider_max to double value_to_fit.
648 target_slider_max = value_to_fit * 2.0
649 else:
650 # Doubling value_to_fit would make it smaller, so instead increase slider_max by double the distance between
651 # slider_max and value_to_fit.
652 target_slider_max = slider_max + (value_to_fit - slider_max) * 2.0
653 # Set slider_max to the first whole number greater than or equal to target_slider_max.
654 shape_key.slider_max = math.ceil(target_slider_max)
656 return value_to_fit <= SHAPE_KEY_SLIDER_HARD_MAX
657 else:
658 # Value is already within the range.
659 return True
662 # ##### Attribute utils. #####
663 AttributeDataTypeInfo = namedtuple("AttributeDataTypeInfo", ["dtype", "foreach_attribute", "item_size"])
664 _attribute_data_type_info_lookup = {
665 'FLOAT': AttributeDataTypeInfo(np.single, "value", 1),
666 'INT': AttributeDataTypeInfo(np.intc, "value", 1),
667 'FLOAT_VECTOR': AttributeDataTypeInfo(np.single, "vector", 3),
668 'FLOAT_COLOR': AttributeDataTypeInfo(np.single, "color", 4), # color_srgb is an alternative
669 'BYTE_COLOR': AttributeDataTypeInfo(np.single, "color", 4), # color_srgb is an alternative
670 'STRING': AttributeDataTypeInfo(None, "value", 1), # Not usable with foreach_get/set
671 'BOOLEAN': AttributeDataTypeInfo(bool, "value", 1),
672 'FLOAT2': AttributeDataTypeInfo(np.single, "vector", 2),
673 'INT8': AttributeDataTypeInfo(np.intc, "value", 1),
674 'INT32_2D': AttributeDataTypeInfo(np.intc, "value", 2),
678 def attribute_get(attributes, name, data_type, domain):
679 """Get an attribute by its name, data_type and domain.
681 Returns None if no attribute with this name, data_type and domain exists."""
682 attr = attributes.get(name)
683 if not attr:
684 return None
685 if attr.data_type == data_type and attr.domain == domain:
686 return attr
687 # It shouldn't normally happen, but it's possible there are multiple attributes with the same name, but different
688 # data_types or domains.
689 for attr in attributes:
690 if attr.name == name and attr.data_type == data_type and attr.domain == domain:
691 return attr
692 return None
695 def attribute_foreach_set(attribute, array_or_list, foreach_attribute=None):
696 """Set every value of an attribute with foreach_set."""
697 if foreach_attribute is None:
698 foreach_attribute = _attribute_data_type_info_lookup[attribute.data_type].foreach_attribute
699 attribute.data.foreach_set(foreach_attribute, array_or_list)
702 def attribute_to_ndarray(attribute, foreach_attribute=None):
703 """Create a NumPy ndarray from an attribute."""
704 data = attribute.data
705 data_type_info = _attribute_data_type_info_lookup[attribute.data_type]
706 ndarray = np.empty(len(data) * data_type_info.item_size, dtype=data_type_info.dtype)
707 if foreach_attribute is None:
708 foreach_attribute = data_type_info.foreach_attribute
709 data.foreach_get(foreach_attribute, ndarray)
710 return ndarray
713 @dataclass
714 class AttributeDescription:
715 """Helper class to reduce duplicate code for handling built-in Blender attributes."""
716 name: str
717 # Valid identifiers can be found in bpy.types.Attribute.bl_rna.properties["data_type"].enum_items
718 data_type: str
719 # Valid identifiers can be found in bpy.types.Attribute.bl_rna.properties["domain"].enum_items
720 domain: str
721 # Some attributes are required to exist if certain conditions are met. If a required attribute does not exist when
722 # attempting to get it, an AssertionError is raised.
723 is_required_check: Callable[[bpy.types.AttributeGroup], bool] = None
724 # NumPy dtype that matches the internal C data of this attribute.
725 dtype: np.dtype = field(init=False)
726 # The default attribute name to use with foreach_get and foreach_set.
727 foreach_attribute: str = field(init=False)
728 # The number of elements per value of the attribute when flattened into a 1-dimensional list/array.
729 item_size: int = field(init=False)
731 def __post_init__(self):
732 data_type_info = _attribute_data_type_info_lookup[self.data_type]
733 self.dtype = data_type_info.dtype
734 self.foreach_attribute = data_type_info.foreach_attribute
735 self.item_size = data_type_info.item_size
737 def is_required(self, attributes):
738 """Check if the attribute is required to exist in the provided attributes."""
739 is_required_check = self.is_required_check
740 return is_required_check and is_required_check(attributes)
742 def get(self, attributes):
743 """Get the attribute.
745 If the attribute is required, but does not exist, an AssertionError is raised, otherwise None is returned."""
746 attr = attribute_get(attributes, self.name, self.data_type, self.domain)
747 if not attr and self.is_required(attributes):
748 raise AssertionError("Required attribute '%s' with type '%s' and domain '%s' not found in %r"
749 % (self.name, self.data_type, self.domain, attributes))
750 return attr
752 def ensure(self, attributes):
753 """Get the attribute, creating it if it does not exist.
755 Raises a RuntimeError if the attribute could not be created, which should only happen when attempting to create
756 an attribute with a reserved name, but with the wrong data_type or domain. See usage of
757 BuiltinCustomDataLayerProvider in Blender source for most reserved names.
759 There is no guarantee that the returned attribute has the desired name because the name could already be in use
760 by another attribute with a different data_type and/or domain."""
761 attr = self.get(attributes)
762 if attr:
763 return attr
765 attr = attributes.new(self.name, self.data_type, self.domain)
766 if not attr:
767 raise RuntimeError("Could not create attribute '%s' with type '%s' and domain '%s' in %r"
768 % (self.name, self.data_type, self.domain, attributes))
769 return attr
771 def foreach_set(self, attributes, array_or_list, foreach_attribute=None):
772 """Get the attribute, creating it if it does not exist, and then set every value in the attribute."""
773 attribute_foreach_set(self.ensure(attributes), array_or_list, foreach_attribute)
775 def get_ndarray(self, attributes, foreach_attribute=None):
776 """Get the attribute and if it exists, return a NumPy ndarray containing its data, otherwise return None."""
777 attr = self.get(attributes)
778 return attribute_to_ndarray(attr, foreach_attribute) if attr else None
780 def to_ndarray(self, attributes, foreach_attribute=None):
781 """Get the attribute and if it exists, return a NumPy ndarray containing its data, otherwise return a
782 zero-length ndarray."""
783 ndarray = self.get_ndarray(attributes, foreach_attribute)
784 return ndarray if ndarray is not None else np.empty(0, dtype=self.dtype)
787 # Built-in Blender attributes
788 # Only attributes used by the importer/exporter are included here.
789 # See usage of BuiltinCustomDataLayerProvider in Blender source to find most built-in attributes.
790 MESH_ATTRIBUTE_MATERIAL_INDEX = AttributeDescription("material_index", 'INT', 'FACE')
791 MESH_ATTRIBUTE_POSITION = AttributeDescription("position", 'FLOAT_VECTOR', 'POINT',
792 is_required_check=lambda attributes: bool(attributes.id_data.vertices))
793 MESH_ATTRIBUTE_SHARP_EDGE = AttributeDescription("sharp_edge", 'BOOLEAN', 'EDGE')
794 MESH_ATTRIBUTE_EDGE_VERTS = AttributeDescription(".edge_verts", 'INT32_2D', 'EDGE',
795 is_required_check=lambda attributes: bool(attributes.id_data.edges))
796 MESH_ATTRIBUTE_CORNER_VERT = AttributeDescription(".corner_vert", 'INT', 'CORNER',
797 is_required_check=lambda attributes: bool(attributes.id_data.loops))
798 MESH_ATTRIBUTE_CORNER_EDGE = AttributeDescription(".corner_edge", 'INT', 'CORNER',
799 is_required_check=lambda attributes: bool(attributes.id_data.loops))
800 MESH_ATTRIBUTE_SHARP_FACE = AttributeDescription("sharp_face", 'BOOLEAN', 'FACE')
803 # ##### UIDs code. #####
805 # ID class (mere int).
806 class UUID(int):
807 pass
810 # UIDs storage.
811 _keys_to_uuids = {}
812 _uuids_to_keys = {}
815 def _key_to_uuid(uuids, key):
816 # TODO: Check this is robust enough for our needs!
817 # Note: We assume we have already checked the related key wasn't yet in _keys_to_uids!
818 # As int64 is signed in FBX, we keep uids below 2**63...
819 if isinstance(key, int) and 0 <= key < 2**63:
820 # We can use value directly as id!
821 uuid = key
822 else:
823 uuid = hash(key)
824 if uuid < 0:
825 uuid = -uuid
826 if uuid >= 2**63:
827 uuid //= 2
828 # Try to make our uid shorter!
829 if uuid > int(1e9):
830 t_uuid = uuid % int(1e9)
831 if t_uuid not in uuids:
832 uuid = t_uuid
833 # Make sure our uuid *is* unique.
834 if uuid in uuids:
835 inc = 1 if uuid < 2**62 else -1
836 while uuid in uuids:
837 uuid += inc
838 if 0 > uuid >= 2**63:
839 # Note that this is more that unlikely, but does not harm anyway...
840 raise ValueError("Unable to generate an UUID for key {}".format(key))
841 return UUID(uuid)
844 def get_fbx_uuid_from_key(key):
846 Return an UUID for given key, which is assumed to be hashable.
848 uuid = _keys_to_uuids.get(key, None)
849 if uuid is None:
850 uuid = _key_to_uuid(_uuids_to_keys, key)
851 _keys_to_uuids[key] = uuid
852 _uuids_to_keys[uuid] = key
853 return uuid
856 # XXX Not sure we'll actually need this one?
857 def get_key_from_fbx_uuid(uuid):
859 Return the key which generated this uid.
861 assert(uuid.__class__ == UUID)
862 return _uuids_to_keys.get(uuid, None)
865 # Blender-specific key generators
866 def get_bid_name(bid):
867 library = getattr(bid, "library", None)
868 if library is not None:
869 return "%s_L_%s" % (bid.name, library.name)
870 else:
871 return bid.name
874 def get_blenderID_key(bid):
875 if isinstance(bid, Iterable):
876 return "|".join("B" + e.rna_type.name + "#" + get_bid_name(e) for e in bid)
877 else:
878 return "B" + bid.rna_type.name + "#" + get_bid_name(bid)
881 def get_blenderID_name(bid):
882 if isinstance(bid, Iterable):
883 return "|".join(get_bid_name(e) for e in bid)
884 else:
885 return get_bid_name(bid)
888 def get_blender_empty_key(obj):
889 """Return bone's keys (Model and NodeAttribute)."""
890 return "|".join((get_blenderID_key(obj), "Empty"))
893 def get_blender_mesh_shape_key(me):
894 """Return main shape deformer's key."""
895 return "|".join((get_blenderID_key(me), "Shape"))
898 def get_blender_mesh_shape_channel_key(me, shape):
899 """Return shape channel and geometry shape keys."""
900 return ("|".join((get_blenderID_key(me), "Shape", get_blenderID_key(shape))),
901 "|".join((get_blenderID_key(me), "Geometry", get_blenderID_key(shape))))
904 def get_blender_bone_key(armature, bone):
905 """Return bone's keys (Model and NodeAttribute)."""
906 return "|".join((get_blenderID_key((armature, bone)), "Data"))
909 def get_blender_bindpose_key(obj, mesh):
910 """Return object's bindpose key."""
911 return "|".join((get_blenderID_key(obj), get_blenderID_key(mesh), "BindPose"))
914 def get_blender_armature_skin_key(armature, mesh):
915 """Return armature's skin key."""
916 return "|".join((get_blenderID_key(armature), get_blenderID_key(mesh), "DeformerSkin"))
919 def get_blender_bone_cluster_key(armature, mesh, bone):
920 """Return bone's cluster key."""
921 return "|".join((get_blenderID_key(armature), get_blenderID_key(mesh),
922 get_blenderID_key(bone), "SubDeformerCluster"))
925 def get_blender_anim_id_base(scene, ref_id):
926 if ref_id is not None:
927 return get_blenderID_key(scene) + "|" + get_blenderID_key(ref_id)
928 else:
929 return get_blenderID_key(scene)
932 def get_blender_anim_stack_key(scene, ref_id):
933 """Return single anim stack key."""
934 return get_blender_anim_id_base(scene, ref_id) + "|AnimStack"
937 def get_blender_anim_layer_key(scene, ref_id):
938 """Return ID's anim layer key."""
939 return get_blender_anim_id_base(scene, ref_id) + "|AnimLayer"
942 def get_blender_anim_curve_node_key(scene, ref_id, obj_key, fbx_prop_name):
943 """Return (stack/layer, ID, fbxprop) curve node key."""
944 return "|".join((get_blender_anim_id_base(scene, ref_id), obj_key, fbx_prop_name, "AnimCurveNode"))
947 def get_blender_anim_curve_key(scene, ref_id, obj_key, fbx_prop_name, fbx_prop_item_name):
948 """Return (stack/layer, ID, fbxprop, item) curve key."""
949 return "|".join((get_blender_anim_id_base(scene, ref_id), obj_key, fbx_prop_name,
950 fbx_prop_item_name, "AnimCurve"))
953 def get_blender_nodetexture_key(ma, socket_names):
954 return "|".join((get_blenderID_key(ma), *socket_names))
957 # ##### Element generators. #####
959 # Note: elem may be None, in this case the element is not added to any parent.
960 def elem_empty(elem, name):
961 sub_elem = encode_bin.FBXElem(name)
962 if elem is not None:
963 elem.elems.append(sub_elem)
964 return sub_elem
967 def _elem_data_single(elem, name, value, func_name):
968 sub_elem = elem_empty(elem, name)
969 getattr(sub_elem, func_name)(value)
970 return sub_elem
973 def _elem_data_vec(elem, name, value, func_name):
974 sub_elem = elem_empty(elem, name)
975 func = getattr(sub_elem, func_name)
976 for v in value:
977 func(v)
978 return sub_elem
981 def elem_data_single_bool(elem, name, value):
982 return _elem_data_single(elem, name, value, "add_bool")
985 def elem_data_single_char(elem, name, value):
986 return _elem_data_single(elem, name, value, "add_char")
989 def elem_data_single_int8(elem, name, value):
990 return _elem_data_single(elem, name, value, "add_int8")
993 def elem_data_single_int16(elem, name, value):
994 return _elem_data_single(elem, name, value, "add_int16")
997 def elem_data_single_int32(elem, name, value):
998 return _elem_data_single(elem, name, value, "add_int32")
1001 def elem_data_single_int64(elem, name, value):
1002 return _elem_data_single(elem, name, value, "add_int64")
1005 def elem_data_single_float32(elem, name, value):
1006 return _elem_data_single(elem, name, value, "add_float32")
1009 def elem_data_single_float64(elem, name, value):
1010 return _elem_data_single(elem, name, value, "add_float64")
1013 def elem_data_single_bytes(elem, name, value):
1014 return _elem_data_single(elem, name, value, "add_bytes")
1017 def elem_data_single_string(elem, name, value):
1018 return _elem_data_single(elem, name, value, "add_string")
1021 def elem_data_single_string_unicode(elem, name, value):
1022 return _elem_data_single(elem, name, value, "add_string_unicode")
1025 def elem_data_single_bool_array(elem, name, value):
1026 return _elem_data_single(elem, name, value, "add_bool_array")
1029 def elem_data_single_int32_array(elem, name, value):
1030 return _elem_data_single(elem, name, value, "add_int32_array")
1033 def elem_data_single_int64_array(elem, name, value):
1034 return _elem_data_single(elem, name, value, "add_int64_array")
1037 def elem_data_single_float32_array(elem, name, value):
1038 return _elem_data_single(elem, name, value, "add_float32_array")
1041 def elem_data_single_float64_array(elem, name, value):
1042 return _elem_data_single(elem, name, value, "add_float64_array")
1045 def elem_data_single_byte_array(elem, name, value):
1046 return _elem_data_single(elem, name, value, "add_byte_array")
1049 def elem_data_vec_float64(elem, name, value):
1050 return _elem_data_vec(elem, name, value, "add_float64")
1053 # ##### Generators for standard FBXProperties70 properties. #####
1055 def elem_properties(elem):
1056 return elem_empty(elem, b"Properties70")
1059 # Properties definitions, format: (b"type_1", b"label(???)", "name_set_value_1", "name_set_value_2", ...)
1060 # XXX Looks like there can be various variations of formats here... Will have to be checked ultimately!
1061 # Also, those "custom" types like 'FieldOfView' or 'Lcl Translation' are pure nonsense,
1062 # these are just Vector3D ultimately... *sigh* (again).
1063 FBX_PROPERTIES_DEFINITIONS = {
1064 # Generic types.
1065 "p_bool": (b"bool", b"", "add_int32"), # Yes, int32 for a bool (and they do have a core bool type)!!!
1066 "p_integer": (b"int", b"Integer", "add_int32"),
1067 "p_ulonglong": (b"ULongLong", b"", "add_int64"),
1068 "p_double": (b"double", b"Number", "add_float64"), # Non-animatable?
1069 "p_number": (b"Number", b"", "add_float64"), # Animatable-only?
1070 "p_enum": (b"enum", b"", "add_int32"),
1071 "p_vector_3d": (b"Vector3D", b"Vector", "add_float64", "add_float64", "add_float64"), # Non-animatable?
1072 "p_vector": (b"Vector", b"", "add_float64", "add_float64", "add_float64"), # Animatable-only?
1073 "p_color_rgb": (b"ColorRGB", b"Color", "add_float64", "add_float64", "add_float64"), # Non-animatable?
1074 "p_color": (b"Color", b"", "add_float64", "add_float64", "add_float64"), # Animatable-only?
1075 "p_string": (b"KString", b"", "add_string_unicode"),
1076 "p_string_url": (b"KString", b"Url", "add_string_unicode"),
1077 "p_timestamp": (b"KTime", b"Time", "add_int64"),
1078 "p_datetime": (b"DateTime", b"", "add_string_unicode"),
1079 # Special types.
1080 "p_object": (b"object", b""), # XXX Check this! No value for this prop??? Would really like to know how it works!
1081 "p_compound": (b"Compound", b""),
1082 # Specific types (sic).
1083 # ## Objects (Models).
1084 "p_lcl_translation": (b"Lcl Translation", b"", "add_float64", "add_float64", "add_float64"),
1085 "p_lcl_rotation": (b"Lcl Rotation", b"", "add_float64", "add_float64", "add_float64"),
1086 "p_lcl_scaling": (b"Lcl Scaling", b"", "add_float64", "add_float64", "add_float64"),
1087 "p_visibility": (b"Visibility", b"", "add_float64"),
1088 "p_visibility_inheritance": (b"Visibility Inheritance", b"", "add_int32"),
1089 # ## Cameras!!!
1090 "p_roll": (b"Roll", b"", "add_float64"),
1091 "p_opticalcenterx": (b"OpticalCenterX", b"", "add_float64"),
1092 "p_opticalcentery": (b"OpticalCenterY", b"", "add_float64"),
1093 "p_fov": (b"FieldOfView", b"", "add_float64"),
1094 "p_fov_x": (b"FieldOfViewX", b"", "add_float64"),
1095 "p_fov_y": (b"FieldOfViewY", b"", "add_float64"),
1099 def _elem_props_set(elem, ptype, name, value, flags):
1100 p = elem_data_single_string(elem, b"P", name)
1101 for t in ptype[:2]:
1102 p.add_string(t)
1103 p.add_string(flags)
1104 if len(ptype) == 3:
1105 getattr(p, ptype[2])(value)
1106 elif len(ptype) > 3:
1107 # We assume value is iterable, else it's a bug!
1108 for callback, val in zip(ptype[2:], value):
1109 getattr(p, callback)(val)
1112 def _elem_props_flags(animatable, animated, custom):
1113 # XXX: There are way more flags, see
1114 # http://help.autodesk.com/view/FBX/2015/ENU/?guid=__cpp_ref_class_fbx_property_flags_html
1115 # Unfortunately, as usual, no doc at all about their 'translation' in actual FBX file format.
1116 # Curse you-know-who.
1117 if animatable:
1118 if animated:
1119 if custom:
1120 return b"A+U"
1121 return b"A+"
1122 if custom:
1123 # Seems that customprops always need those 'flags', see T69554. Go figure...
1124 return b"A+U"
1125 return b"A"
1126 if custom:
1127 # Seems that customprops always need those 'flags', see T69554. Go figure...
1128 return b"A+U"
1129 return b""
1132 def elem_props_set(elem, ptype, name, value=None, animatable=False, animated=False, custom=False):
1133 ptype = FBX_PROPERTIES_DEFINITIONS[ptype]
1134 _elem_props_set(elem, ptype, name, value, _elem_props_flags(animatable, animated, custom))
1137 def elem_props_compound(elem, cmpd_name, custom=False):
1138 def _setter(ptype, name, value, animatable=False, animated=False, custom=False):
1139 name = cmpd_name + b"|" + name
1140 elem_props_set(elem, ptype, name, value, animatable=animatable, animated=animated, custom=custom)
1142 elem_props_set(elem, "p_compound", cmpd_name, custom=custom)
1143 return _setter
1146 def elem_props_template_init(templates, template_type):
1148 Init a writing template of given type, for *one* element's properties.
1150 ret = {}
1151 tmpl = templates.get(template_type)
1152 if tmpl is not None:
1153 written = tmpl.written[0]
1154 props = tmpl.properties
1155 ret = {name: [val, ptype, anim, written] for name, (val, ptype, anim) in props.items()}
1156 return ret
1159 def elem_props_template_set(template, elem, ptype_name, name, value, animatable=False, animated=False):
1161 Only add a prop if the same value is not already defined in given template.
1162 Note it is important to not give iterators as value, here!
1164 ptype = FBX_PROPERTIES_DEFINITIONS[ptype_name]
1165 if len(ptype) > 3:
1166 value = tuple(value)
1167 tmpl_val, tmpl_ptype, tmpl_animatable, tmpl_written = template.get(name, (None, None, False, False))
1168 # Note animatable flag from template takes precedence over given one, if applicable.
1169 # However, animated properties are always written, since they cannot match their template!
1170 if tmpl_ptype is not None and not animated:
1171 if (tmpl_written and
1172 ((len(ptype) == 3 and (tmpl_val, tmpl_ptype) == (value, ptype_name)) or
1173 (len(ptype) > 3 and (tuple(tmpl_val), tmpl_ptype) == (value, ptype_name)))):
1174 return # Already in template and same value.
1175 _elem_props_set(elem, ptype, name, value, _elem_props_flags(tmpl_animatable, animated, False))
1176 template[name][3] = True
1177 else:
1178 _elem_props_set(elem, ptype, name, value, _elem_props_flags(animatable, animated, False))
1181 def elem_props_template_finalize(template, elem):
1183 Finalize one element's template/props.
1184 Issue is, some templates might be "needed" by different types (e.g. NodeAttribute is for lights, cameras, etc.),
1185 but values for only *one* subtype can be written as template. So we have to be sure we write those for the other
1186 subtypes in each and every elements, if they are not overridden by that element.
1187 Yes, hairy, FBX that is to say. When they could easily support several subtypes per template... :(
1189 for name, (value, ptype_name, animatable, written) in template.items():
1190 if written:
1191 continue
1192 ptype = FBX_PROPERTIES_DEFINITIONS[ptype_name]
1193 _elem_props_set(elem, ptype, name, value, _elem_props_flags(animatable, False, False))
1196 # ##### Templates #####
1197 # TODO: check all those "default" values, they should match Blender's default as much as possible, I guess?
1199 FBXTemplate = namedtuple("FBXTemplate", ("type_name", "prop_type_name", "properties", "nbr_users", "written"))
1202 def fbx_templates_generate(root, fbx_templates):
1203 # We may have to gather different templates in the same node (e.g. NodeAttribute template gathers properties
1204 # for Lights, Cameras, LibNodes, etc.).
1205 ref_templates = {(tmpl.type_name, tmpl.prop_type_name): tmpl for tmpl in fbx_templates.values()}
1207 templates = {}
1208 for type_name, prop_type_name, properties, nbr_users, _written in fbx_templates.values():
1209 tmpl = templates.setdefault(type_name, [{}, 0])
1210 tmpl[0][prop_type_name] = (properties, nbr_users)
1211 tmpl[1] += nbr_users
1213 for type_name, (subprops, nbr_users) in templates.items():
1214 template = elem_data_single_string(root, b"ObjectType", type_name)
1215 elem_data_single_int32(template, b"Count", nbr_users)
1217 if len(subprops) == 1:
1218 prop_type_name, (properties, _nbr_sub_type_users) = next(iter(subprops.items()))
1219 subprops = (prop_type_name, properties)
1220 ref_templates[(type_name, prop_type_name)].written[0] = True
1221 else:
1222 # Ack! Even though this could/should work, looks like it is not supported. So we have to chose one. :|
1223 max_users = max_props = -1
1224 written_prop_type_name = None
1225 for prop_type_name, (properties, nbr_sub_type_users) in subprops.items():
1226 if nbr_sub_type_users > max_users or (nbr_sub_type_users == max_users and len(properties) > max_props):
1227 max_users = nbr_sub_type_users
1228 max_props = len(properties)
1229 written_prop_type_name = prop_type_name
1230 subprops = (written_prop_type_name, properties)
1231 ref_templates[(type_name, written_prop_type_name)].written[0] = True
1233 prop_type_name, properties = subprops
1234 if prop_type_name and properties:
1235 elem = elem_data_single_string(template, b"PropertyTemplate", prop_type_name)
1236 props = elem_properties(elem)
1237 for name, (value, ptype, animatable) in properties.items():
1238 try:
1239 elem_props_set(props, ptype, name, value, animatable=animatable)
1240 except Exception as e:
1241 print("Failed to write template prop (%r)" % e)
1242 print(props, ptype, name, value, animatable)
1245 # ##### FBX animation helpers. #####
1248 class AnimationCurveNodeWrapper:
1250 This class provides a same common interface for all (FBX-wise) AnimationCurveNode and AnimationCurve elements,
1251 and easy API to handle those.
1253 __slots__ = (
1254 'elem_keys', 'default_values', 'fbx_group', 'fbx_gname', 'fbx_props',
1255 'force_keying', 'force_startend_keying',
1256 '_frame_times_array', '_frame_values_array', '_frame_write_mask_array',
1259 kinds = {
1260 'LCL_TRANSLATION': ("Lcl Translation", "T", ("X", "Y", "Z")),
1261 'LCL_ROTATION': ("Lcl Rotation", "R", ("X", "Y", "Z")),
1262 'LCL_SCALING': ("Lcl Scaling", "S", ("X", "Y", "Z")),
1263 'SHAPE_KEY': ("DeformPercent", "DeformPercent", ("DeformPercent",)),
1264 'CAMERA_FOCAL': ("FocalLength", "FocalLength", ("FocalLength",)),
1265 'CAMERA_FOCUS_DISTANCE': ("FocusDistance", "FocusDistance", ("FocusDistance",)),
1268 def __init__(self, elem_key, kind, force_keying, force_startend_keying, default_values=...):
1269 self.elem_keys = [elem_key]
1270 assert(kind in self.kinds)
1271 self.fbx_group = [self.kinds[kind][0]]
1272 self.fbx_gname = [self.kinds[kind][1]]
1273 self.fbx_props = [self.kinds[kind][2]]
1274 self.force_keying = force_keying
1275 self.force_startend_keying = force_startend_keying
1276 self._frame_times_array = None
1277 self._frame_values_array = None
1278 self._frame_write_mask_array = None
1279 if default_values is not ...:
1280 assert(len(default_values) == len(self.fbx_props[0]))
1281 self.default_values = default_values
1282 else:
1283 self.default_values = (0.0) * len(self.fbx_props[0])
1285 def __bool__(self):
1286 # We are 'True' if we do have some validated keyframes...
1287 return self._frame_write_mask_array is not None and bool(np.any(self._frame_write_mask_array))
1289 def add_group(self, elem_key, fbx_group, fbx_gname, fbx_props):
1291 Add another whole group stuff (curvenode, animated item/prop + curvnode/curve identifiers).
1292 E.g. Shapes animations is written twice, houra!
1294 assert(len(fbx_props) == len(self.fbx_props[0]))
1295 self.elem_keys.append(elem_key)
1296 self.fbx_group.append(fbx_group)
1297 self.fbx_gname.append(fbx_gname)
1298 self.fbx_props.append(fbx_props)
1300 def set_keyframes(self, keyframe_times, keyframe_values):
1302 Set all keyframe times and values of the group.
1303 Values can be a 2D array where each row is the values for a separate curve.
1305 # View 1D keyframe_values as 2D with a single row, so that the same code can be used for both 1D and
1306 # 2D inputs.
1307 if len(keyframe_values.shape) == 1:
1308 keyframe_values = keyframe_values[np.newaxis]
1309 # There must be a time for each column of values.
1310 assert(len(keyframe_times) == keyframe_values.shape[1])
1311 # There must be as many rows of values as there are properties.
1312 assert(len(self.fbx_props[0]) == len(keyframe_values))
1313 write_mask = np.full_like(keyframe_values, True, dtype=bool) # write everything by default
1314 self._frame_times_array = keyframe_times
1315 self._frame_values_array = keyframe_values
1316 self._frame_write_mask_array = write_mask
1318 def simplify(self, fac, step, force_keep=False):
1320 Simplifies sampled curves by only enabling samples when:
1321 * their values relatively differ from the previous sample ones.
1323 if self._frame_times_array is None:
1324 # Keyframes have not been added yet.
1325 return
1327 if fac == 0.0:
1328 return
1330 # So that, with default factor and step values (1), we get:
1331 min_reldiff_fac = fac * 1.0e-3 # min relative value evolution: 0.1% of current 'order of magnitude'.
1332 min_absdiff_fac = 0.1 # A tenth of reldiff...
1334 # Initialise to no values enabled for writing.
1335 self._frame_write_mask_array[:] = False
1337 # Values are enabled for writing if they differ enough from either of their adjacent values or if they differ
1338 # enough from the closest previous value that is enabled due to either of these conditions.
1339 for sampled_values, enabled_mask in zip(self._frame_values_array, self._frame_write_mask_array):
1340 # Create overlapping views of the 'previous' (all but the last) and 'current' (all but the first)
1341 # `sampled_values` and `enabled_mask`.
1342 # Calculate absolute values from `sampled_values` so that the 'previous' and 'current' absolute arrays can
1343 # be views into the same array instead of separately calculated arrays.
1344 abs_sampled_values = np.abs(sampled_values)
1345 # 'previous' views.
1346 p_val_view = sampled_values[:-1]
1347 p_abs_val_view = abs_sampled_values[:-1]
1348 p_enabled_mask_view = enabled_mask[:-1]
1349 # 'current' views.
1350 c_val_view = sampled_values[1:]
1351 c_abs_val_view = abs_sampled_values[1:]
1352 c_enabled_mask_view = enabled_mask[1:]
1354 # If enough difference from previous sampled value, enable the current value *and* the previous one!
1355 # The difference check is symmetrical, so this will compare each value to both of its adjacent values.
1356 # Unless it is forcefully enabled later, this is the only way that the first value can be enabled.
1357 # This is a contracted form of relative + absolute-near-zero difference:
1358 # def is_different(a, b):
1359 # abs_diff = abs(a - b)
1360 # if abs_diff < min_reldiff_fac * min_absdiff_fac:
1361 # return False
1362 # return (abs_diff / ((abs(a) + abs(b)) / 2)) > min_reldiff_fac
1363 # Note that we ignore the '/ 2' part here, since it's not much significant for us.
1364 # Contracted form using only builtin Python functions:
1365 # return abs(a - b) > (min_reldiff_fac * max(abs(a) + abs(b), min_absdiff_fac))
1366 abs_diff = np.abs(c_val_view - p_val_view)
1367 different_if_greater_than = min_reldiff_fac * np.maximum(c_abs_val_view + p_abs_val_view, min_absdiff_fac)
1368 enough_diff_p_val_mask = abs_diff > different_if_greater_than
1369 # Enable both the current values *and* the previous values where `enough_diff_p_val_mask` is True. Some
1370 # values may get set to True twice because the views overlap, but this is not a problem.
1371 p_enabled_mask_view[enough_diff_p_val_mask] = True
1372 c_enabled_mask_view[enough_diff_p_val_mask] = True
1374 # Else, if enough difference from previous enabled value, enable the current value only!
1375 # For each 'current' value, get the index of the nearest previous enabled value in `sampled_values` (or
1376 # itself if the value is enabled).
1377 # Start with an array that is the index of the 'current' value in `sampled_values`. The 'current' values are
1378 # all but the first value, so the indices will be from 1 to `len(sampled_values)` exclusive.
1379 # Let len(sampled_values) == 9:
1380 # [1, 2, 3, 4, 5, 6, 7, 8]
1381 p_enabled_idx_in_sampled_values = np.arange(1, len(sampled_values))
1382 # Replace the indices of all disabled values with 0 in preparation of filling them in with the index of the
1383 # nearest previous enabled value. We choose to replace with 0 so that if there is no nearest previous
1384 # enabled value, we instead default to `sampled_values[0]`.
1385 c_val_disabled_mask = ~c_enabled_mask_view
1386 # Let `c_val_disabled_mask` be:
1387 # [F, F, T, F, F, T, T, T]
1388 # Set indices to 0 where `c_val_disabled_mask` is True:
1389 # [1, 2, 3, 4, 5, 6, 7, 8]
1390 # v v v v
1391 # [1, 2, 0, 4, 5, 0, 0, 0]
1392 p_enabled_idx_in_sampled_values[c_val_disabled_mask] = 0
1393 # Accumulative maximum travels across the array from left to right, filling in the zeroed indices with the
1394 # maximum value so far, which will be the closest previous enabled index because the non-zero indices are
1395 # strictly increasing.
1396 # [1, 2, 0, 4, 5, 0, 0, 0]
1397 # v v v v
1398 # [1, 2, 2, 4, 5, 5, 5, 5]
1399 p_enabled_idx_in_sampled_values = np.maximum.accumulate(p_enabled_idx_in_sampled_values)
1400 # Only disabled values need to be checked against their nearest previous enabled values.
1401 # We can additionally ignore all values which equal their immediately previous value because those values
1402 # will never be enabled if they were not enabled by the earlier difference check against immediately
1403 # previous values.
1404 p_enabled_diff_to_check_mask = np.logical_and(c_val_disabled_mask, p_val_view != c_val_view)
1405 # Convert from a mask to indices because we need the indices later and because the array of indices will
1406 # usually be smaller than the mask array making it faster to index other arrays with.
1407 p_enabled_diff_to_check_idx = np.flatnonzero(p_enabled_diff_to_check_mask)
1408 # `p_enabled_idx_in_sampled_values` from earlier:
1409 # [1, 2, 2, 4, 5, 5, 5, 5]
1410 # `p_enabled_diff_to_check_mask` assuming no values equal their immediately previous value:
1411 # [F, F, T, F, F, T, T, T]
1412 # `p_enabled_diff_to_check_idx`:
1413 # [ 2, 5, 6, 7]
1414 # `p_enabled_idx_in_sampled_values_to_check`:
1415 # [ 2, 5, 5, 5]
1416 p_enabled_idx_in_sampled_values_to_check = p_enabled_idx_in_sampled_values[p_enabled_diff_to_check_idx]
1417 # Get the 'current' disabled values that need to be checked.
1418 c_val_to_check = c_val_view[p_enabled_diff_to_check_idx]
1419 c_abs_val_to_check = c_abs_val_view[p_enabled_diff_to_check_idx]
1420 # Get the nearest previous enabled value for each value to be checked.
1421 nearest_p_enabled_val = sampled_values[p_enabled_idx_in_sampled_values_to_check]
1422 abs_nearest_p_enabled_val = np.abs(nearest_p_enabled_val)
1423 # Check the relative + absolute-near-zero difference again, but against the nearest previous enabled value
1424 # this time.
1425 abs_diff = np.abs(c_val_to_check - nearest_p_enabled_val)
1426 different_if_greater_than = (min_reldiff_fac
1427 * np.maximum(c_abs_val_to_check + abs_nearest_p_enabled_val, min_absdiff_fac))
1428 enough_diff_p_enabled_val_mask = abs_diff > different_if_greater_than
1429 # If there are any that are different enough from the previous enabled value, then we have to check them all
1430 # iteratively because enabling a new value can change the nearest previous enabled value of some elements,
1431 # which changes their relative + absolute-near-zero difference:
1432 # `p_enabled_diff_to_check_idx`:
1433 # [2, 5, 6, 7]
1434 # `p_enabled_idx_in_sampled_values_to_check`:
1435 # [2, 5, 5, 5]
1436 # Let `enough_diff_p_enabled_val_mask` be:
1437 # [F, F, T, T]
1438 # The first index that is newly enabled is 6:
1439 # [2, 5,>6<,5]
1440 # But 6 > 5, so the next value's nearest previous enabled index is also affected:
1441 # [2, 5, 6,>6<]
1442 # We had calculated a newly enabled index of 7 too, but that was calculated against the old nearest previous
1443 # enabled index of 5, which has now been updated to 6, so whether 7 is enabled or not needs to be
1444 # recalculated:
1445 # [F, F, T, ?]
1446 if np.any(enough_diff_p_enabled_val_mask):
1447 # Accessing .data, the memoryview of the array, iteratively or by individual index is faster than doing
1448 # the same with the array itself.
1449 zipped = zip(p_enabled_diff_to_check_idx.data,
1450 c_val_to_check.data,
1451 c_abs_val_to_check.data,
1452 p_enabled_idx_in_sampled_values_to_check.data,
1453 enough_diff_p_enabled_val_mask.data)
1454 # While iterating, we could set updated values into `enough_diff_p_enabled_val_mask` as we go and then
1455 # update `enabled_mask` in bulk after the iteration, but if we're going to update an array while
1456 # iterating, we may as well update `enabled_mask` directly instead and skip the bulk update.
1457 # Additionally, the number of `True` writes to `enabled_mask` is usually much less than the number of
1458 # updates that would be required to `enough_diff_p_enabled_val_mask`.
1459 c_enabled_mask_view_mv = c_enabled_mask_view.data
1461 # While iterating, keep track of the most recent newly enabled index, so we can tell when we need to
1462 # recalculate whether the current value needs to be enabled.
1463 new_p_enabled_idx = -1
1464 # Keep track of its value too for performance.
1465 new_p_enabled_val = -1
1466 new_abs_p_enabled_val = -1
1467 for cur_idx, c_val, c_abs_val, old_p_enabled_idx, enough_diff in zipped:
1468 if new_p_enabled_idx > old_p_enabled_idx:
1469 # The nearest previous enabled value is newly enabled and was not included when
1470 # `enough_diff_p_enabled_val_mask` was calculated, so whether the current value is different
1471 # enough needs to be recalculated using the newly enabled value.
1472 # Check if the relative + absolute-near-zero difference is enough to enable this value.
1473 enough_diff = (abs(c_val - new_p_enabled_val)
1474 > (min_reldiff_fac * max(c_abs_val + new_abs_p_enabled_val, min_absdiff_fac)))
1475 if enough_diff:
1476 # The current value needs to be enabled.
1477 c_enabled_mask_view_mv[cur_idx] = True
1478 # Update the index and values for this newly enabled value.
1479 new_p_enabled_idx = cur_idx
1480 new_p_enabled_val = c_val
1481 new_abs_p_enabled_val = c_abs_val
1483 # If we write nothing (action doing nothing) and are in 'force_keep' mode, we key everything! :P
1484 # See T41766.
1485 # Also, it seems some importers (e.g. UE4) do not handle correctly armatures where some bones
1486 # are not animated, but are children of animated ones, so added an option to systematically force writing
1487 # one key in this case.
1488 # See T41719, T41605, T41254...
1489 if self.force_keying or (force_keep and not self):
1490 are_keyed = [True] * len(self._frame_write_mask_array)
1491 else:
1492 are_keyed = np.any(self._frame_write_mask_array, axis=1)
1494 # If we did key something, ensure first and last sampled values are keyed as well.
1495 if self.force_startend_keying:
1496 for is_keyed, frame_write_mask in zip(are_keyed, self._frame_write_mask_array):
1497 if is_keyed:
1498 frame_write_mask[:1] = True
1499 frame_write_mask[-1:] = True
1501 def get_final_data(self, scene, ref_id, force_keep=False):
1503 Yield final anim data for this 'curvenode' (for all curvenodes defined).
1504 force_keep is to force to keep a curve even if it only has one valid keyframe.
1506 curves = [
1507 (self._frame_times_array[write_mask], values[write_mask])
1508 for values, write_mask in zip(self._frame_values_array, self._frame_write_mask_array)
1511 force_keep = force_keep or self.force_keying
1512 for elem_key, fbx_group, fbx_gname, fbx_props in \
1513 zip(self.elem_keys, self.fbx_group, self.fbx_gname, self.fbx_props):
1514 group_key = get_blender_anim_curve_node_key(scene, ref_id, elem_key, fbx_group)
1515 group = {}
1516 for c, def_val, fbx_item in zip(curves, self.default_values, fbx_props):
1517 fbx_item = FBX_ANIM_PROPSGROUP_NAME + "|" + fbx_item
1518 curve_key = get_blender_anim_curve_key(scene, ref_id, elem_key, fbx_group, fbx_item)
1519 # (curve key, default value, keyframes, write flag).
1520 times = c[0]
1521 write_flag = len(times) > (0 if force_keep else 1)
1522 group[fbx_item] = (curve_key, def_val, c, write_flag)
1523 yield elem_key, group_key, group, fbx_group, fbx_gname
1526 # ##### FBX objects generators. #####
1528 # FBX Model-like data (i.e. Blender objects, depsgraph instances and bones) are wrapped in ObjectWrapper.
1529 # This allows us to have a (nearly) same code FBX-wise for all those types.
1530 # The wrapper tries to stay as small as possible, by mostly using callbacks (property(get...))
1531 # to actual Blender data it contains.
1532 # Note it caches its instances, so that you may call several times ObjectWrapper(your_object)
1533 # with a minimal cost (just re-computing the key).
1535 class MetaObjectWrapper(type):
1536 def __call__(cls, bdata, armature=None):
1537 if bdata is None:
1538 return None
1539 dup_mat = None
1540 if isinstance(bdata, Object):
1541 key = get_blenderID_key(bdata)
1542 elif isinstance(bdata, DepsgraphObjectInstance):
1543 if bdata.is_instance:
1544 key = "|".join((get_blenderID_key((bdata.parent.original, bdata.instance_object.original)),
1545 cls._get_dup_num_id(bdata)))
1546 dup_mat = bdata.matrix_world.copy()
1547 else:
1548 key = get_blenderID_key(bdata.object.original)
1549 else: # isinstance(bdata, (Bone, PoseBone)):
1550 if isinstance(bdata, PoseBone):
1551 bdata = armature.data.bones[bdata.name]
1552 key = get_blenderID_key((armature, bdata))
1554 cache = getattr(cls, "_cache", None)
1555 if cache is None:
1556 cache = cls._cache = {}
1557 instance = cache.get(key)
1558 if instance is not None:
1559 # Duplis hack: since dupli instances are not persistent in Blender (we have to re-create them to get updated
1560 # info like matrix...), we *always* need to reset that matrix when calling ObjectWrapper() (all
1561 # other data is supposed valid during whole cache live span, so we can skip resetting it).
1562 instance._dupli_matrix = dup_mat
1563 return instance
1565 instance = cls.__new__(cls, bdata, armature)
1566 instance.__init__(bdata, armature)
1567 instance.key = key
1568 instance._dupli_matrix = dup_mat
1569 cache[key] = instance
1570 return instance
1573 class ObjectWrapper(metaclass=MetaObjectWrapper):
1575 This class provides a same common interface for all (FBX-wise) object-like elements:
1576 * Blender Object
1577 * Blender Bone and PoseBone
1578 * Blender DepsgraphObjectInstance (for dulis).
1579 Note since a same Blender object might be 'mapped' to several FBX models (esp. with duplis),
1580 we need to use a key to identify each.
1582 __slots__ = (
1583 'name', 'key', 'bdata', 'parented_to_armature', 'override_materials',
1584 '_tag', '_ref', '_dupli_matrix'
1587 @classmethod
1588 def cache_clear(cls):
1589 if hasattr(cls, "_cache"):
1590 del cls._cache
1592 @staticmethod
1593 def _get_dup_num_id(bdata):
1594 INVALID_IDS = {2147483647, 0}
1595 pids = tuple(bdata.persistent_id)
1596 idx_valid = 0
1597 prev_i = ...
1598 for idx, i in enumerate(pids[::-1]):
1599 if i not in INVALID_IDS or (idx == len(pids) and i == 0 and prev_i != 0):
1600 idx_valid = len(pids) - idx
1601 break
1602 prev_i = i
1603 return ".".join(str(i) for i in pids[:idx_valid])
1605 def __init__(self, bdata, armature=None):
1607 bdata might be an Object (deprecated), DepsgraphObjectInstance, Bone or PoseBone.
1608 If Bone or PoseBone, armature Object must be provided.
1610 # Note: DepsgraphObjectInstance are purely runtime data, they become invalid as soon as we step to the next item!
1611 # Hence we have to immediately copy *all* needed data...
1612 if isinstance(bdata, Object): # DEPRECATED
1613 self._tag = 'OB'
1614 self.name = get_blenderID_name(bdata)
1615 self.bdata = bdata
1616 self._ref = None
1617 elif isinstance(bdata, DepsgraphObjectInstance):
1618 if bdata.is_instance:
1619 # Note that dupli instance matrix is set by meta-class initialization.
1620 self._tag = 'DP'
1621 self.name = "|".join((get_blenderID_name((bdata.parent.original, bdata.instance_object.original)),
1622 "Dupli", self._get_dup_num_id(bdata)))
1623 self.bdata = bdata.instance_object.original
1624 self._ref = bdata.parent.original
1625 else:
1626 self._tag = 'OB'
1627 self.name = get_blenderID_name(bdata)
1628 self.bdata = bdata.object.original
1629 self._ref = None
1630 else: # isinstance(bdata, (Bone, PoseBone)):
1631 if isinstance(bdata, PoseBone):
1632 bdata = armature.data.bones[bdata.name]
1633 self._tag = 'BO'
1634 self.name = get_blenderID_name(bdata)
1635 self.bdata = bdata
1636 self._ref = armature
1637 self.parented_to_armature = False
1638 self.override_materials = None
1640 def __eq__(self, other):
1641 return isinstance(other, self.__class__) and self.key == other.key
1643 def __hash__(self):
1644 return hash(self.key)
1646 def __repr__(self):
1647 return self.key
1649 # #### Common to all _tag values.
1650 def get_fbx_uuid(self):
1651 return get_fbx_uuid_from_key(self.key)
1652 fbx_uuid = property(get_fbx_uuid)
1654 # XXX Not sure how much that’s useful now... :/
1655 def get_hide(self):
1656 return self.bdata.hide_viewport if self._tag in {'OB', 'DP'} else self.bdata.hide
1657 hide = property(get_hide)
1659 def get_parent(self):
1660 if self._tag == 'OB':
1661 if (self.bdata.parent and self.bdata.parent.type == 'ARMATURE' and
1662 self.bdata.parent_type == 'BONE' and self.bdata.parent_bone):
1663 # Try to parent to a bone.
1664 bo_par = self.bdata.parent.pose.bones.get(self.bdata.parent_bone, None)
1665 if (bo_par):
1666 return ObjectWrapper(bo_par, self.bdata.parent)
1667 else: # Fallback to mere object parenting.
1668 return ObjectWrapper(self.bdata.parent)
1669 else:
1670 # Mere object parenting.
1671 return ObjectWrapper(self.bdata.parent)
1672 elif self._tag == 'DP':
1673 return ObjectWrapper(self._ref)
1674 else: # self._tag == 'BO'
1675 return ObjectWrapper(self.bdata.parent, self._ref) or ObjectWrapper(self._ref)
1676 parent = property(get_parent)
1678 def get_bdata_pose_bone(self):
1679 if self._tag == 'BO':
1680 return self._ref.pose.bones[self.bdata.name]
1681 return None
1682 bdata_pose_bone = property(get_bdata_pose_bone)
1684 def get_matrix_local(self):
1685 if self._tag == 'OB':
1686 return self.bdata.matrix_local.copy()
1687 elif self._tag == 'DP':
1688 return self._ref.matrix_world.inverted_safe() @ self._dupli_matrix
1689 else: # 'BO', current pose
1690 # PoseBone.matrix is in armature space, bring in back in real local one!
1691 par = self.bdata.parent
1692 par_mat_inv = self._ref.pose.bones[par.name].matrix.inverted_safe() if par else Matrix()
1693 return par_mat_inv @ self._ref.pose.bones[self.bdata.name].matrix
1694 matrix_local = property(get_matrix_local)
1696 def get_matrix_global(self):
1697 if self._tag == 'OB':
1698 return self.bdata.matrix_world.copy()
1699 elif self._tag == 'DP':
1700 return self._dupli_matrix
1701 else: # 'BO', current pose
1702 return self._ref.matrix_world @ self._ref.pose.bones[self.bdata.name].matrix
1703 matrix_global = property(get_matrix_global)
1705 def get_matrix_rest_local(self):
1706 if self._tag == 'BO':
1707 # Bone.matrix_local is in armature space, bring in back in real local one!
1708 par = self.bdata.parent
1709 par_mat_inv = par.matrix_local.inverted_safe() if par else Matrix()
1710 return par_mat_inv @ self.bdata.matrix_local
1711 else:
1712 return self.matrix_local.copy()
1713 matrix_rest_local = property(get_matrix_rest_local)
1715 def get_matrix_rest_global(self):
1716 if self._tag == 'BO':
1717 return self._ref.matrix_world @ self.bdata.matrix_local
1718 else:
1719 return self.matrix_global.copy()
1720 matrix_rest_global = property(get_matrix_rest_global)
1722 # #### Transform and helpers
1723 def has_valid_parent(self, objects):
1724 par = self.parent
1725 if par in objects:
1726 if self._tag == 'OB':
1727 par_type = self.bdata.parent_type
1728 if par_type in {'OBJECT', 'BONE'}:
1729 return True
1730 else:
1731 print("Sorry, “{}” parenting type is not supported".format(par_type))
1732 return False
1733 return True
1734 return False
1736 def use_bake_space_transform(self, scene_data):
1737 # NOTE: Only applies to object types supporting this!!! Currently, only meshes and the like...
1738 # TODO: Check whether this can work for bones too...
1739 return (scene_data.settings.bake_space_transform and self._tag in {'OB', 'DP'} and
1740 self.bdata.type in BLENDER_OBJECT_TYPES_MESHLIKE | {'EMPTY'})
1742 def fbx_object_matrix(self, scene_data, rest=False, local_space=False, global_space=False):
1744 Generate object transform matrix (*always* in matching *FBX* space!).
1745 If local_space is True, returned matrix is *always* in local space.
1746 Else if global_space is True, returned matrix is always in world space.
1747 If both local_space and global_space are False, returned matrix is in parent space if parent is valid,
1748 else in world space.
1749 Note local_space has precedence over global_space.
1750 If rest is True and object is a Bone, returns matching rest pose transform instead of current pose one.
1751 Applies specific rotation to bones, lamps and cameras (conversion Blender -> FBX).
1753 # Objects which are not bones and do not have any parent are *always* in global space
1754 # (unless local_space is True!).
1755 is_global = (not local_space and
1756 (global_space or not (self._tag in {'DP', 'BO'} or self.has_valid_parent(scene_data.objects))))
1758 # Objects (meshes!) parented to armature are not parented to anything in FBX, hence we need them
1759 # in global space, which is their 'virtual' local space...
1760 is_global = is_global or self.parented_to_armature
1762 # Since we have to apply corrections to some types of object, we always need local Blender space here...
1763 matrix = self.matrix_rest_local if rest else self.matrix_local
1764 parent = self.parent
1766 # Bones, lamps and cameras need to be rotated (in local space!).
1767 if self._tag == 'BO':
1768 # If we have a bone parent we need to undo the parent correction.
1769 if not is_global and scene_data.settings.bone_correction_matrix_inv and parent and parent.is_bone:
1770 matrix = scene_data.settings.bone_correction_matrix_inv @ matrix
1771 # Apply the bone correction.
1772 if scene_data.settings.bone_correction_matrix:
1773 matrix = matrix @ scene_data.settings.bone_correction_matrix
1774 elif self.bdata.type == 'LIGHT':
1775 matrix = matrix @ MAT_CONVERT_LIGHT
1776 elif self.bdata.type == 'CAMERA':
1777 matrix = matrix @ MAT_CONVERT_CAMERA
1779 if self._tag in {'DP', 'OB'} and parent:
1780 if parent._tag == 'BO':
1781 # In bone parent case, we get transformation in **bone tip** space (sigh).
1782 # Have to bring it back into bone root, which is FBX expected value.
1783 matrix = Matrix.Translation((0, (parent.bdata.tail - parent.bdata.head).length, 0)) @ matrix
1785 # Our matrix is in local space, time to bring it in its final desired space.
1786 if parent:
1787 if is_global:
1788 # Move matrix to global Blender space.
1789 matrix = (parent.matrix_rest_global if rest else parent.matrix_global) @ matrix
1790 elif parent.use_bake_space_transform(scene_data):
1791 # Blender's and FBX's local space of parent may differ if we use bake_space_transform...
1792 # Apply parent's *Blender* local space...
1793 matrix = (parent.matrix_rest_local if rest else parent.matrix_local) @ matrix
1794 # ...and move it back into parent's *FBX* local space.
1795 par_mat = parent.fbx_object_matrix(scene_data, rest=rest, local_space=True)
1796 matrix = par_mat.inverted_safe() @ matrix
1798 if self.use_bake_space_transform(scene_data):
1799 # If we bake the transforms we need to post-multiply inverse global transform.
1800 # This means that the global transform will not apply to children of this transform.
1801 matrix = matrix @ scene_data.settings.global_matrix_inv
1802 if is_global:
1803 # In any case, pre-multiply the global matrix to get it in FBX global space!
1804 matrix = scene_data.settings.global_matrix @ matrix
1806 return matrix
1808 def fbx_object_tx(self, scene_data, rest=False, rot_euler_compat=None):
1810 Generate object transform data (always in local space when possible).
1812 matrix = self.fbx_object_matrix(scene_data, rest=rest)
1813 loc, rot, scale = matrix.decompose()
1814 matrix_rot = rot.to_matrix()
1815 # quat -> euler, we always use 'XYZ' order, use ref rotation if given.
1816 if rot_euler_compat is not None:
1817 rot = rot.to_euler('XYZ', rot_euler_compat)
1818 else:
1819 rot = rot.to_euler('XYZ')
1820 return loc, rot, scale, matrix, matrix_rot
1822 # #### _tag dependent...
1823 def get_is_object(self):
1824 return self._tag == 'OB'
1825 is_object = property(get_is_object)
1827 def get_is_dupli(self):
1828 return self._tag == 'DP'
1829 is_dupli = property(get_is_dupli)
1831 def get_is_bone(self):
1832 return self._tag == 'BO'
1833 is_bone = property(get_is_bone)
1835 def get_type(self):
1836 if self._tag in {'OB', 'DP'}:
1837 return self.bdata.type
1838 return ...
1839 type = property(get_type)
1841 def get_armature(self):
1842 if self._tag == 'BO':
1843 return ObjectWrapper(self._ref)
1844 return None
1845 armature = property(get_armature)
1847 def get_bones(self):
1848 if self._tag == 'OB' and self.bdata.type == 'ARMATURE':
1849 return (ObjectWrapper(bo, self.bdata) for bo in self.bdata.data.bones)
1850 return ()
1851 bones = property(get_bones)
1853 def get_materials(self):
1854 override_materials = self.override_materials
1855 if override_materials is not None:
1856 return override_materials
1857 if self._tag in {'OB', 'DP'}:
1858 return tuple(slot.material for slot in self.bdata.material_slots)
1859 return ()
1860 materials = property(get_materials)
1862 def is_deformed_by_armature(self, arm_obj):
1863 if not (self.is_object and self.type == 'MESH'):
1864 return False
1865 if self.parent == arm_obj and self.bdata.parent_type == 'ARMATURE':
1866 return True
1867 for mod in self.bdata.modifiers:
1868 if mod.type == 'ARMATURE' and mod.object == arm_obj.bdata:
1869 return True
1871 # #### Duplis...
1872 def dupli_list_gen(self, depsgraph):
1873 if self._tag == 'OB' and self.bdata.is_instancer:
1874 return (ObjectWrapper(dup) for dup in depsgraph.object_instances
1875 if dup.parent and ObjectWrapper(dup.parent.original) == self)
1876 return ()
1879 def fbx_name_class(name, cls):
1880 return FBX_NAME_CLASS_SEP.join((name, cls))
1883 # ##### Top-level FBX data container. #####
1885 # Helper sub-container gathering all exporter settings related to media (texture files).
1886 FBXExportSettingsMedia = namedtuple("FBXExportSettingsMedia", (
1887 "path_mode", "base_src", "base_dst", "subdir",
1888 "embed_textures", "copy_set", "embedded_set",
1891 # Helper container gathering all exporter settings.
1892 FBXExportSettings = namedtuple("FBXExportSettings", (
1893 "report", "to_axes", "global_matrix", "global_scale", "apply_unit_scale", "unit_scale",
1894 "bake_space_transform", "global_matrix_inv", "global_matrix_inv_transposed",
1895 "context_objects", "object_types", "use_mesh_modifiers", "use_mesh_modifiers_render",
1896 "mesh_smooth_type", "use_subsurf", "use_mesh_edges", "use_tspace", "use_triangles",
1897 "armature_nodetype", "use_armature_deform_only", "add_leaf_bones",
1898 "bone_correction_matrix", "bone_correction_matrix_inv",
1899 "bake_anim", "bake_anim_use_all_bones", "bake_anim_use_nla_strips", "bake_anim_use_all_actions",
1900 "bake_anim_step", "bake_anim_simplify_factor", "bake_anim_force_startend_keying",
1901 "use_metadata", "media_settings", "use_custom_props", "colors_type", "prioritize_active_color"
1904 # Helper container gathering some data we need multiple times:
1905 # * templates.
1906 # * settings, scene.
1907 # * objects.
1908 # * object data.
1909 # * skinning data (binding armature/mesh).
1910 # * animations.
1911 FBXExportData = namedtuple("FBXExportData", (
1912 "templates", "templates_users", "connections",
1913 "settings", "scene", "depsgraph", "objects", "animations", "animated", "frame_start", "frame_end",
1914 "data_empties", "data_lights", "data_cameras", "data_meshes", "mesh_material_indices",
1915 "data_bones", "data_leaf_bones", "data_deformers_skin", "data_deformers_shape",
1916 "data_world", "data_materials", "data_textures", "data_videos",
1919 # Helper container gathering all importer settings.
1920 FBXImportSettings = namedtuple("FBXImportSettings", (
1921 "report", "to_axes", "global_matrix", "global_scale",
1922 "bake_space_transform", "global_matrix_inv", "global_matrix_inv_transposed",
1923 "use_custom_normals", "use_image_search",
1924 "use_alpha_decals", "decal_offset",
1925 "use_anim", "anim_offset",
1926 "use_subsurf",
1927 "use_custom_props", "use_custom_props_enum_as_string",
1928 "nodal_material_wrap_map", "image_cache",
1929 "ignore_leaf_bones", "force_connect_children", "automatic_bone_orientation", "bone_correction_matrix",
1930 "use_prepost_rot", "colors_type",