|
import pandas as pd |
|
import numpy as np |
|
from pathlib import Path |
|
from typing import List, Union, Tuple |
|
import h5py |
|
|
|
|
|
import fbx |
|
import itertools |
|
|
|
|
|
import globals |
|
import utils |
|
|
|
|
|
def center_axis(a: Union[List[float], np.array]) -> np.array: |
|
""" |
|
Centers a list of floats. |
|
:param a: List of floats to center. |
|
:return: The centered list as a `np.array`. |
|
""" |
|
|
|
if not isinstance(a, np.ndarray): |
|
a = np.array(a) |
|
|
|
_min = np.min(a) |
|
_max = np.max(a) |
|
_c = _max - (_max - _min) * 0.5 |
|
a -= _c |
|
return a |
|
|
|
|
|
def append_zero(arr: np.ndarray) -> np.ndarray: |
|
""" |
|
Appends a zero array to the end of an array. |
|
:param arr: `np.array` to fill. |
|
:return: `np.array` with a zero array appended to the end. |
|
""" |
|
zeros = np.zeros((arr.shape[0], arr.shape[1], 1), dtype=float) |
|
return np.concatenate((arr, zeros), axis=-1) |
|
|
|
|
|
def append_one(arr: np.ndarray) -> np.ndarray: |
|
""" |
|
Appends a one array to the end of an array. |
|
:param arr: `np.array` to fill. |
|
:return: `np.array` with a one array appended to the end. |
|
""" |
|
ones = np.ones((arr.shape[0], arr.shape[1], 1), dtype=float) |
|
return np.concatenate((arr, ones), axis=-1) |
|
|
|
|
|
def merge_tdc(actor_classes: np.array, |
|
marker_classes: np.array, |
|
translation_vectors: np.array, |
|
rotation_vectors: np.array, |
|
scale_vectors: np.array, |
|
ordered: bool = True) -> np.array: |
|
""" |
|
Merges the given arrays into a single array of shape (n_frames, pc_size, 14). |
|
:param actor_classes: `np.array` of actor class labels. |
|
:param marker_classes: `np.array` of marker class labels. |
|
:param translation_vectors: `np.array` of translation vectors (3 values each). |
|
:param rotation_vectors: `np.array` of Euler rotation angles (3 values each). |
|
:param scale_vectors: `np.array` of scale factors (3 values each). |
|
:param ordered: Whether to sort the cloud by actor and marker classes. |
|
:return: Merged `np.array` of shape (n_frames, pc_size, 14). |
|
""" |
|
|
|
|
|
|
|
tdc = np.concatenate((np.expand_dims(actor_classes, -1), |
|
np.expand_dims(marker_classes, -1), |
|
append_zero(translation_vectors), |
|
append_zero(rotation_vectors), |
|
append_one(scale_vectors)), axis=2) |
|
if ordered: |
|
tdc = sort_cloud(tdc) |
|
|
|
return tdc |
|
|
|
|
|
def shuffle_tdc(tdc: np.array) -> np.array: |
|
""" |
|
Shuffles the given timeline dense cloud at its second dimension, the marker rows. |
|
This will not mess up the transforms. |
|
:param tdc: `np.array` to shuffle. |
|
:return: shuffled `np.array`. |
|
""" |
|
|
|
if tdc.ndim != 3: |
|
raise ValueError(f'Array does not have 3 dimensions: {tdc.ndim}/3.') |
|
|
|
|
|
for i in range(tdc.shape[0]): |
|
np.random.shuffle(tdc[i]) |
|
return tdc |
|
|
|
|
|
def sort_cloud(cloud: np.array) -> np.array: |
|
""" |
|
Convenience function to sort a timeline dense cloud by actor and marker classes. |
|
Not required. |
|
:param cloud: `np.array` point cloud to sort. |
|
:return: Sorted `np.array` point cloud. |
|
""" |
|
|
|
actor_classes = cloud[:, :, 0] |
|
marker_classes = cloud[:, :, 1] |
|
|
|
|
|
sorted_tdc = np.empty_like(cloud) |
|
|
|
|
|
for i in range(cloud.shape[0]): |
|
|
|
sorted_indices = np.lexsort((marker_classes[i], actor_classes[i])) |
|
|
|
|
|
sorted_tdc[i] = cloud[i, sorted_indices] |
|
|
|
return sorted_tdc |
|
|
|
|
|
def create_keyframe(anim_curve: fbx.FbxAnimCurve, frame: int, value: float) -> None: |
|
""" |
|
Creates a keyframe at the given frame number on the given animation curve. |
|
:param anim_curve: `fbx.FbxAnimCurve` node to add the keyframe to. |
|
:param frame: `int` frame number at which to add the keyframe. |
|
:param value: `float` value that the keyframe will have. |
|
:return: True |
|
""" |
|
|
|
t = fbx.FbxTime() |
|
t.SetFrame(frame) |
|
|
|
|
|
key_index = anim_curve.KeyAdd(t)[0] |
|
anim_curve.KeySetValue(key_index, value) |
|
return |
|
|
|
|
|
def get_child_node_by_name(parent_node: fbx.FbxNode, name: str, ignore_namespace: bool = False) \ |
|
-> Union[fbx.FbxNode, None]: |
|
""" |
|
Gets the child node with the given name. |
|
:param parent_node: `fbx.FbxNode` to get the child node from. |
|
:param name: `str` name of the child node to get. |
|
:param ignore_namespace: `bool` whether to ignore the namespace in the node name. |
|
:return: `fbx.FbxNode` child node with the given name, if it exists, else None. |
|
""" |
|
|
|
for c in range(parent_node.GetChildCount()): |
|
|
|
child = parent_node.GetChild(c) |
|
|
|
if match_name(child, name, ignore_namespace): |
|
|
|
return child |
|
|
|
return None |
|
|
|
|
|
def match_name(node: fbx.FbxNode, name: str, ignore_namespace: bool = True) -> bool: |
|
""" |
|
Checks if the given node's name matches the given name. |
|
:param node: `fbx.FbxNode` to check. |
|
:param name: `str` name to match. |
|
:param ignore_namespace: `bool` whether to ignore the namespace in the node name. |
|
""" |
|
|
|
node_name = node.GetName() |
|
|
|
if ignore_namespace: |
|
node_name = node_name.split(':')[-1] |
|
|
|
return node_name == name |
|
|
|
|
|
def array_to_dict(tsc: np.array, start_frame: int = 0) -> dict: |
|
""" |
|
Converts an `np.array` timeline sparse cloud to a dictionary structured for keyframed animation. |
|
:param tsc: `np.array` timeline sparse cloud to process. |
|
:param start_frame: Optional `int` frame at which the animation starts, useful for timecode. |
|
:return: `dict` optimized for retrieving keyframe info. |
|
""" |
|
|
|
result = {} |
|
|
|
|
|
for frame, node in itertools.product(range(tsc.shape[0]), range(tsc.shape[1])): |
|
|
|
|
|
actor_class = int(tsc[frame, node, 0]) |
|
marker_class = int(tsc[frame, node, 1]) |
|
|
|
|
|
if actor_class == 0 or marker_class == 0: |
|
continue |
|
|
|
|
|
|
|
translations = tsc[frame, node, 2:5] + np.array([0.0]) |
|
rotations = tsc[frame, node, 6:9] + np.array([0.0]) |
|
scales = tsc[frame, node, 10:13] + np.array([1.0]) |
|
|
|
|
|
world_matrix = fbx.FbxAMatrix() |
|
world_matrix.SetT(fbx.FbxVector4(*translations)) |
|
world_matrix.SetR(fbx.FbxVector4(*rotations)) |
|
world_matrix.SetS(fbx.FbxVector4(*scales)) |
|
|
|
|
|
if actor_class not in result: |
|
result[actor_class] = {} |
|
|
|
|
|
if marker_class not in result[actor_class]: |
|
result[actor_class][marker_class] = {} |
|
|
|
|
|
result[actor_class][marker_class][frame + start_frame] = world_matrix |
|
|
|
return result |
|
|
|
|
|
def world_to_local_transform(node: fbx.FbxNode, world_transform: fbx.FbxAMatrix, frame: int) -> \ |
|
Tuple[List[float], List[float], List[float]]: |
|
""" |
|
Takes a world transform and uses the node's parent world transform to calculate this node's |
|
local transform at the given frame. |
|
:param node: `fbx.FbxNode` that the given world transform belongs to. |
|
:param world_transform: `fbx.FbxAMatrix` world transform to convert to local transform. |
|
:param frame: `int` frame number at which to evaluate the parent's world transform. |
|
:return: |
|
""" |
|
t = fbx.FbxTime() |
|
t.SetFrame(frame) |
|
if node.GetParent(): |
|
|
|
parent_world_transform = node.GetParent().EvaluateGlobalTransform(t) |
|
|
|
|
|
parent_world_transform_inv = parent_world_transform.Inverse() |
|
|
|
|
|
lcl = parent_world_transform_inv * world_transform |
|
else: |
|
|
|
lcl = world_transform |
|
|
|
return [lcl.GetT()[t] for t in range(3)], [lcl.GetR()[r] for r in range(3)], [lcl.GetS()[s] for s in range(3)] |
|
|
|
|
|
def isolate_actor_from_tdc(tdc: np.array, actor: int) -> np.array: |
|
""" |
|
Returns all markers of the given actor in the timeline dense cloud. |
|
:param tdc: `np.array` timeline dense cloud to filter. |
|
:param actor: `int` actor class, starting at 1. |
|
:return: `np.array` that contains only the markers of the given actor. |
|
""" |
|
if actor == 0: |
|
raise ValueError('Second argument (actor) cannot be 0, must be higher.') |
|
mask = tdc[:, :, 0] == float(actor) |
|
return tdc[mask] |
|
|
|
|
|
def split_tdc_into_actors(tdc: np.array) -> List[np.array]: |
|
""" |
|
Uses isolate_actor_from_tdc() to isolate all unique actors in the timeline dense cloud. |
|
:param tdc: Timeline dense cloud to filter. |
|
:return: List of isolated actor `np.array`. |
|
""" |
|
actor_count = len([x for x in np.unique(tdc[:, :, 0]) if x != 0.]) |
|
return [isolate_actor_from_tdc(tdc, i) for i in range(1, actor_count + 1)] |
|
|
|
|
|
def get_keyed_frames_from_curve(curve: fbx.FbxAnimCurve, length: int = -1) -> List[fbx.FbxAnimCurveKey]: |
|
""" |
|
Returns a list of all the frames on the given curve. |
|
:param curve: `fbx.FbxAnimCurve` to get frames from. |
|
:param length: Desired amount of frame numbers to return. If this is more than there are keyframes on the curve, |
|
it pads 0s to the end. Default -1, which will not add any 0s. |
|
:return: List of all the frames on the given curve. |
|
""" |
|
|
|
frames = [curve.KeyGet(i).GetTime().GetFrameCount() for i in range(curve.KeyGetCount())] |
|
|
|
dif = length - len(frames) |
|
|
|
if dif > 0 and length != -1: |
|
frames += [0.] * dif |
|
|
|
return frames |
|
|
|
|
|
def get_world_transforms(actor_idx: int, marker_idx: int, m: fbx.FbxNode, |
|
r: List[int], c: fbx.FbxAnimCurve) -> List[List[float]]: |
|
""" |
|
For the given marker node, gets the world transform for each frame in r, and stores the translation, rotation |
|
and scaling values as a list of lists. Stores the actor and marker classes at the start of this list of lists. |
|
Optionally, if incl_keyed is 1, also stores the keyed frames as the last list. |
|
Note: This function has to be passed the animation curve, because we need the animation layer to get the |
|
animation curve. The animation layer is stored inside the FBXContainer class, to which we don't have access to here. |
|
:param actor_idx: `int` actor class. |
|
:param marker_idx: `int` marker class. |
|
:param m: `fbx.FbxNode` to evaluate the world transform of at each frame. |
|
:param r: `List[int]` list of frame numbers to evaluate the world transform at. |
|
:param c: `fbx.FbxAnimCurve` node to read the keyframes from. |
|
:return: |
|
""" |
|
|
|
zeros = [0.0 for _ in range(len(r))] |
|
|
|
ones = [1.0 for _ in range(len(r))] |
|
|
|
|
|
tx, ty, tz, rx, ry, rz, sx, sy, sz = [], [], [], [], [], [], [], [], [] |
|
|
|
actors = [actor_idx for _ in range(len(r))] |
|
|
|
markers = [marker_idx for _ in range(len(r))] |
|
|
|
t = fbx.FbxTime() |
|
|
|
|
|
|
|
for f in r: |
|
t.SetFrame(f) |
|
wt = m.EvaluateGlobalTransform(t) |
|
wtt, wtr, wts = wt.GetT(), wt.GetR(), wt.GetS() |
|
tx.append(wtt[0]) |
|
ty.append(wtt[1]) |
|
tz.append(wtt[2]) |
|
rx.append(wtr[0]) |
|
ry.append(wtr[1]) |
|
rz.append(wtr[2]) |
|
sx.append(wts[0]) |
|
sy.append(wts[1]) |
|
sz.append(wts[2]) |
|
|
|
|
|
keyed_frames = get_keyed_frames_from_curve(c) |
|
|
|
keyed_bools = [1 if f in keyed_frames else 0 for f in r] |
|
|
|
|
|
return [ |
|
actors, |
|
markers, |
|
tx, ty, tz, zeros, |
|
rx, ry, rz, zeros, |
|
sx, sy, sz, ones, |
|
r, keyed_bools |
|
] |
|
|
|
|
|
def get_children_of_parent(parent: fbx.FbxNode) -> List[fbx.FbxNode]: |
|
""" |
|
Returns a list of all the children of the given parent. |
|
:param parent: `fbx.FbxNode` to get children from. |
|
:return: List of `fbx.FbxNode` children. |
|
""" |
|
return [parent.GetChild(i) for i in range(parent.GetChildCount())] |
|
|
|
|
|
def flatten_labeled_transforms(arr: np.array) -> np.array: |
|
""" |
|
Flattens the given array so that it has the shape (n_actors * n_frames, 15, 73). |
|
:param arr: `np.array` to process. |
|
:return: `np.array` of shape (n_actors * n_frames, 15, 73). |
|
""" |
|
|
|
|
|
|
|
flattened = arr.transpose(1, 0, 2, 3) |
|
|
|
|
|
return np.concatenate(flattened, axis=0) |
|
|
|
|
|
def replace_zeros_with_inf(arr: np.array) -> np.array: |
|
""" |
|
Replaces all transform values for each marker on each frame that was not keyed. |
|
:param arr: `np.array` to process. |
|
:return: `np.array` with updated values. |
|
""" |
|
|
|
|
|
mask = arr[:, -1] == 0 |
|
for i in range(arr.shape[0]): |
|
arr[i, 2:-2, mask[i]] = np.inf |
|
return arr |
|
|
|
|
|
def scale_translations(arr: np.array, scale: float = 0.01, |
|
dims: Tuple[float, float, float] = (10., 10., 10.)) -> np.array: |
|
""" |
|
Applies a scaling to the translation values in the given array. |
|
:param arr: `np.array` that can either be a timeline dense cloud or translation vectors. |
|
:param scale: `float` scaling factor. |
|
:param dims: `tuple` of `float` values that determine the dimensions of the volume. |
|
:return: Modified `np.array`. |
|
""" |
|
|
|
|
|
start = 0 if arr.shape[0] == 3 else 2 |
|
|
|
|
|
|
|
arr[:, start + 0] *= scale / dims[0] |
|
arr[:, start + 1] *= scale / dims[1] |
|
arr[:, start + 2] *= scale / dims[2] |
|
|
|
return arr |
|
|
|
|
|
def transform_translations(arr: np.array, move_to_center: bool = True, |
|
scale: float = 0.01, dims: Tuple[float, float, float] = (10., 10., 10.)) -> np.array: |
|
""" |
|
First moves the x and y values to their axis' center. Then scales all values to normalize them. |
|
:param arr: `np.array` that can either be a timeline dense cloud or translation vectors. |
|
:param move_to_center: Uses center_axis() to move the x and y translations to the center of their axes. |
|
:param scale: `float` scaling factor. |
|
:param dims: `tuple` of `float` values that determine the dimensions of the volume. |
|
:return: Modified `np.array`. |
|
""" |
|
if move_to_center: |
|
for frame in range(arr.shape[0]): |
|
|
|
arr[frame, 2] = center_axis(arr[frame, 2]) |
|
|
|
arr[frame, 4] = center_axis(arr[frame, 4]) |
|
|
|
return scale_translations(arr, scale, dims) |
|
|
|
|
|
class FBXContainerBase: |
|
def __init__(self, fbx_file: Path, debug: int = -1) -> None: |
|
""" |
|
Class that stores references to important nodes in an FBX file. |
|
Offers utility functions to quickly load animation data. |
|
:param fbx_file: `Path` to the file to load. |
|
""" |
|
self.input_fbx = fbx_file |
|
self.debug = debug |
|
|
|
|
|
self.parents = [] |
|
self.children = [] |
|
|
|
self.parent_names = [] |
|
|
|
def _init_scene(self) -> None: |
|
""" |
|
Stores scene, root, and time_mode properties. |
|
Destroys the importer to remove the reference to the loaded file. |
|
""" |
|
|
|
self.manager = fbx.FbxManager.Create() |
|
importer = fbx.FbxImporter.Create(self.manager, 'MyScene') |
|
|
|
|
|
importer.Initialize(str(self.input_fbx)) |
|
self.scene = fbx.FbxScene.Create(self.manager, '') |
|
importer.Import(self.scene) |
|
self.root = self.scene.GetRootNode() |
|
if self.root is None: |
|
raise ValueError('No root node found.') |
|
self.time_mode = self.scene.GetGlobalSettings().GetTimeMode() |
|
fbx.FbxTime.SetGlobalTimeMode(self.time_mode) |
|
|
|
|
|
|
|
importer.Destroy() |
|
|
|
def _init_anim(self) -> None: |
|
""" |
|
Stores the anim_stack, num_frames, start_frame, end_frame properties. |
|
""" |
|
|
|
anim_stack = self.scene.GetCurrentAnimationStack() |
|
self.anim_layer = anim_stack.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId), 0) |
|
if self.anim_layer is None: |
|
raise ValueError('No animation layer found.') |
|
|
|
|
|
local_time_span = anim_stack.GetLocalTimeSpan() |
|
self.num_frames = int(local_time_span.GetDuration().GetFrameCount()) |
|
if self.num_frames == 0: |
|
raise ValueError('Number of animated frames is 0.') |
|
self.start_frame = local_time_span.GetStart().GetFrameCount() |
|
self.end_frame = local_time_span.GetStop().GetFrameCount() |
|
|
|
def _init_parents(self, names_to_look_for: List[str]) -> None: |
|
""" |
|
Goes through all root children (generation 1). |
|
If a child has 4 markers as children, it is considered an actor (Shogun subject) and appended to actors |
|
and actor_names list properties. |
|
""" |
|
ts = fbx.FbxTime() |
|
ts.SetFrame(self.start_frame) |
|
|
|
te = fbx.FbxTime() |
|
te.SetFrame(self.end_frame) |
|
|
|
|
|
gen1_nodes = [self.root.GetChild(i) for i in range(self.root.GetChildCount())] |
|
for gen1_node in gen1_nodes: |
|
gen2_nodes = [gen1_node.GetChild(i) for i in |
|
range(gen1_node.GetChildCount())] |
|
|
|
|
|
gen2_names = [node.GetName().split(':')[-1] for node in gen2_nodes] |
|
if all(name in gen2_names for name in names_to_look_for): |
|
self.parent_names.append(gen1_node.GetName()) |
|
self.parents.append(gen1_node) |
|
|
|
if len(self.parents) == 0: |
|
raise ValueError('No parents found. A node is considered a parent ' + |
|
'if it has the following child nodes: ' + |
|
', '.join(names_to_look_for) + '.') |
|
|
|
self.parent_count = len(self.parents) |
|
|
|
def _init_children(self, child_names: List[str]) -> None: |
|
""" |
|
Goes through all parent nodes and stores references to its child nodes. |
|
""" |
|
for parent_node in self.parents: |
|
family = {} |
|
for child_name in child_names: |
|
for parent_idx in range(parent_node.GetChildCount()): |
|
child = parent_node.GetChild(parent_idx) |
|
|
|
|
|
if match_name(child, child_name, ignore_namespace=True): |
|
family[child_name] = child |
|
|
|
if len(family) != len(child_names): |
|
raise ValueError(f'{parent_node.GetName()} does not have all children.') |
|
|
|
self.children.append(family) |
|
|
|
def _print(self, txt: str, lvl: int = 0) -> None: |
|
if lvl <= self.debug: |
|
print(txt) |
|
|
|
def get_frame_range(self) -> List[int]: |
|
""" |
|
Replacement and improvement for: |
|
`list(range(self.num_frames))` |
|
If the animation does not start at frame 0, this will return a list that has the correct frames. |
|
:return: List of `int` frame numbers that are between the start and end frame of the animation. |
|
""" |
|
return list(range(self.start_frame, self.end_frame)) |
|
|
|
def _check_parent(self, parent: int = 0): |
|
""" |
|
Safety check to see if the actor `int` is a valid number (to avoid out of range errors). |
|
:param parent: `int` actor index, which should be between 0-max_actors. |
|
""" |
|
if not 0 <= parent <= self.parent_count: |
|
raise ValueError(f'Actor index must be between 0 and {self.parent_count - 1} ({parent}).') |
|
|
|
def convert_r(self, r: Union[int, Tuple[int, int], Tuple[int, int, int]] = None) -> List[int]: |
|
""" |
|
Converts the value of r to a list of frame numbers, depending on what r is. |
|
:param r: Custom frame range to use. |
|
:return: List of `int` frame numbers (doesn't have to start at 0). |
|
""" |
|
|
|
if isinstance(r, int): |
|
r = list(range(self.num_frames)) if r > self.num_frames else list(range(r)) |
|
|
|
|
|
elif isinstance(r, tuple) and len(r) == 2: |
|
|
|
if r[1] - r[0] > self.num_frames: |
|
r = list(range(r[0], r[0] + self.num_frames)) |
|
else: |
|
r = list(range(r[0], r[1])) |
|
|
|
|
|
elif isinstance(r, tuple) and len(r) == 3: |
|
|
|
if r[1] - r[0] > self.num_frames: |
|
r = list(range(r[0], r[0] + self.num_frames, r[2])) |
|
else: |
|
r = list(range(r[0], r[1], r[2])) |
|
|
|
|
|
else: |
|
r = self.get_frame_range() |
|
|
|
return r |
|
|
|
def get_parent_node_by_name(self, parent_name: str, ignore_namespace: bool = True) -> Union[fbx.FbxNode, None]: |
|
""" |
|
Utility function to get a parent node reference by name. |
|
:param parent_name: `str` name that will be looked for in the node name. |
|
:param ignore_namespace: `bool` Whether to ignore namespaces in a node's name. |
|
:return: |
|
""" |
|
|
|
parent_nodes = [self.root.GetChild(i) for i in range(self.root.GetChildCount())] |
|
|
|
return next( |
|
( |
|
parent_node |
|
for parent_node in parent_nodes |
|
if match_name(parent_node, parent_name, ignore_namespace=ignore_namespace) |
|
), |
|
None, |
|
) |
|
|
|
def get_child_by_name(self, parent: int, name: str): |
|
""" |
|
Returns the reference to the parent's direct child. |
|
:param parent: `int` parent index. |
|
:param name: `str` child name. |
|
:return: `fbx.FbxNode` reference. |
|
""" |
|
self._check_parent(parent) |
|
return self.children[parent][name] |
|
|
|
def get_node_by_path(self, path: str) -> fbx.FbxNode: |
|
""" |
|
Utility function to retrieve a node reference from a path like /Actor1/Hips/UpperLeg_l. |
|
:param path: `str` path with forward slashes to follow. |
|
:return: `fbx.FbxNode` reference to that node. |
|
""" |
|
|
|
node_names = [x for x in path.split('/') if x] |
|
|
|
nodes = [self.get_parent_node_by_name(node_names[0], False)] |
|
|
|
nodes.extend( |
|
get_child_node_by_name(nodes[idx], node_name) |
|
for idx, node_name in enumerate(node_names[1:]) |
|
) |
|
|
|
return nodes[-1] |
|
|
|
def get_hierarchy(self, start: fbx.FbxNode, hierarchy: List = None) -> List[fbx.FbxNode]: |
|
""" |
|
Returns the hierarchy under the start node. |
|
:param hierarchy: Hierarchical `List` of `fbx.FbxNode` references. |
|
:param start: `fbx.FbxNode` that will be used as starting point for finding the leaf nodes. |
|
:return: |
|
""" |
|
if hierarchy is None: |
|
hierarchy = [] |
|
hierarchy.append(start) |
|
children = get_children_of_parent(start) |
|
for child in children: |
|
self.get_hierarchy(child, hierarchy) |
|
|
|
return hierarchy |
|
|
|
def remove_node(self, node: fbx.FbxNode, recursive: bool = False) -> bool: |
|
""" |
|
Removes a node by reference from the scene. |
|
:param node: `fbx.FbxNode` to remove. |
|
:param recursive: `bool` Apply deletion recursively. |
|
:return: True if success. |
|
""" |
|
if recursive: |
|
children = [node.GetChild(c) for c in range(node.GetChildCount())] |
|
for child in children: |
|
self.remove_node(child, True) |
|
|
|
node.GetParent().RemoveChild(node) |
|
|
|
|
|
self.scene.RemoveNode(node) |
|
|
|
return True |
|
|
|
|
|
class FBXContainer(FBXContainerBase): |
|
def __init__(self, fbx_file: Path, |
|
volume_dims: Tuple[float] = (10., 10., 10.), |
|
max_actors: int = 8, |
|
pc_size: int = 1024, |
|
scale: float = 0.01, |
|
debug: int = -1): |
|
""" |
|
Class that stores references to important nodes in an FBX file. |
|
Offers utility functions to quickly load animation data. |
|
:param fbx_file: `Path` to the file to load. |
|
:param volume_dims: `tuple` of `float` that represent the dimensions of the capture volume in meters. |
|
:param max_actors: `int` maximum amount of actors to expect in a point cloud. |
|
:param pc_size: `int` amount of points in a point cloud. |
|
:param debug: If higher than -1, will print out debugging statements. |
|
""" |
|
super().__init__(fbx_file=fbx_file, debug=debug) |
|
|
|
if pc_size < max_actors * 73: |
|
raise ValueError('Point cloud size must be large enough to contain the maximum amount of actors * 73' |
|
f' markers: {pc_size}/{max_actors * 73}.') |
|
|
|
self.debug = debug |
|
|
|
self.time_modes = globals.get_time_modes() |
|
|
|
self.child_names = globals.get_marker_names() |
|
|
|
|
|
self.children = [] |
|
|
|
self.labeled_world_transforms = None |
|
self.unlabeled_world_transforms = None |
|
|
|
|
|
self.vol_x = volume_dims[0] |
|
self.vol_y = volume_dims[1] |
|
self.vol_z = volume_dims[2] |
|
self.hvol_x = volume_dims[0] / 2 |
|
self.hvol_y = volume_dims[1] / 2 |
|
self.hvol_z = volume_dims[2] / 2 |
|
|
|
self.scale = scale |
|
|
|
self.max_actors = max_actors |
|
|
|
self.pc_size = pc_size |
|
|
|
self.output_fbx = utils.append_suffix_to_file(fbx_file, '_INF') |
|
self.valid_frames = [] |
|
|
|
self.init() |
|
|
|
def __init_unlabeled_markers(self) -> None: |
|
""" |
|
Looks for the Unlabeled_Markers parent node under the root and stores references to all unlabeled marker nodes. |
|
""" |
|
|
|
for i in range(self.root.GetChildCount()): |
|
gen1_node = self.root.GetChild(i) |
|
if match_name(gen1_node, 'Unlabeled_Markers'): |
|
self.unlabeled_markers_parent = gen1_node |
|
self.unlabeled_markers = [gen1_node.GetChild(um) for um in range(gen1_node.GetChildCount())] |
|
return |
|
|
|
def init_world_transforms(self, r: Union[int, Tuple[int, int], Tuple[int, int, int]] = None) -> None: |
|
""" |
|
Calls the init functions for the labeled and unlabeled world transforms. |
|
:param r: Custom frame range to extract. |
|
""" |
|
self.init_labeled_world_transforms(r=r) |
|
self.init_unlabeled_world_transforms(r=r) |
|
|
|
def init_labeled_world_transforms(self, r: Union[int, Tuple[int, int], Tuple[int, int, int]] = None) -> np.array: |
|
""" |
|
For each actor, for each marker, stores a list for each element in the world transform for each frame |
|
in r. This can later be used to recreate the world transform matrix. |
|
:param r: Custom frame range to use. |
|
:return: `np.array` of shape (n_frames, 15, n_markers). |
|
""" |
|
r = self.convert_r(r) |
|
labeled_data = [] |
|
|
|
|
|
for actor_idx in range(self.parent_count): |
|
|
|
actor_data = [] |
|
|
|
for marker_idx, (n, m) in enumerate(self.children[actor_idx].items()): |
|
|
|
|
|
curve = m.LclTranslation.GetCurve(self.anim_layer, 'X', True) |
|
|
|
marker_data = get_world_transforms(actor_idx + 1, marker_idx + 1, m, r, curve) |
|
|
|
actor_data.append(marker_data) |
|
self._print(f'Actor {actor_idx} marker {marker_idx} done', 1) |
|
|
|
labeled_data.append(actor_data) |
|
|
|
|
|
|
|
wide_layout = np.array(labeled_data) |
|
|
|
self.labeled_world_transforms = np.transpose(wide_layout, axes=(3, 0, 2, 1)) |
|
return self.labeled_world_transforms |
|
|
|
def init_unlabeled_world_transforms(self, r: Union[int, Tuple[int, int], Tuple[int, int, int]] = None) -> np.array: |
|
""" |
|
For all unlabeled markers, stores a list for each element in the world transform for each frame |
|
in r. This can later be used to recreate the world transform matrix. |
|
:param r: Custom frame range to use. |
|
:return: `np.array` of shape (n_frames, 15, n_unlabeled_markers). |
|
""" |
|
r = self.convert_r(r) |
|
unlabeled_data = [] |
|
|
|
|
|
for ulm in self.unlabeled_markers: |
|
|
|
|
|
curve = ulm.LclTranslation.GetCurve(self.anim_layer, 'X', True) |
|
|
|
marker_data = get_world_transforms(0, 0, ulm, r, curve) |
|
|
|
unlabeled_data.append(marker_data) |
|
self._print(f'Unlabeled marker {ulm.GetName()} done', 1) |
|
|
|
|
|
|
|
wide_layout = np.array(unlabeled_data) |
|
|
|
self.unlabeled_world_transforms = np.transpose(wide_layout, axes=(2, 1, 0)) |
|
return self.unlabeled_world_transforms |
|
|
|
def init(self) -> None: |
|
""" |
|
Initializes the scene. |
|
""" |
|
self._init_scene() |
|
self._init_anim() |
|
self._init_parents(list(self.child_names[:4])) |
|
self._init_children(self.child_names) |
|
self.__init_unlabeled_markers() |
|
self._print('Init done', 0) |
|
|
|
def columns_from_joints(self) -> List[str]: |
|
""" |
|
Generates a list of column names based on the (order of the) marker names. |
|
:return: List of column names, in the form of [node1_tx, node1_ty, node1_tz, node2_tx, node2_ty, node2_tz..]. |
|
""" |
|
columns = [] |
|
for name in self.child_names: |
|
columns += [f'{name}x', f'{name}y', f'{name}z'] |
|
|
|
return columns |
|
|
|
def remove_clipping_poses(self, arr: np.array) -> np.array: |
|
""" |
|
Checks for each axis if it does not cross the volume limits. Returns an array without clipping poses. |
|
This function uses the volume dimensions in cms, so use it before the data is scaled down. |
|
:param arr: `np.array` to filter. |
|
:return: Filtered `np.array` that only has non-clipping poses. |
|
""" |
|
mask_x1 = (arr[:, 2] < self.hvol_x / self.scale).all(axis=1) |
|
mask_x2 = (arr[:, 2] > -self.hvol_x / self.scale).all(axis=1) |
|
mask_z1 = (arr[:, 4] < self.hvol_z / self.scale).all(axis=1) |
|
mask_z2 = (arr[:, 4] > -self.hvol_z / self.scale).all(axis=1) |
|
mask = mask_x1 & mask_x2 & mask_z1 & mask_z2 |
|
return arr[mask] |
|
|
|
def extract_training_translations(self, r: Union[int, Tuple[int, int], Tuple[int, int, int]] = None, |
|
move_to_center: bool = True) -> np.array: |
|
""" |
|
Manipulates the existing labeled world transform array into one that is suitable for training. |
|
It does this through flattening the array to shape (n_frames, n_actors * 73, 15), then removing |
|
all clipping frames and finally transforms the frames to the right location and scale. |
|
:param r: Custom frame range to use if the labeled transforms are not stored yet. |
|
:param move_to_center: If True, the x and y axes is moved to the center of the volume. |
|
:return: Transformed labeled world transforms. |
|
""" |
|
if self.labeled_world_transforms is None: |
|
self.init_labeled_world_transforms(r=r) |
|
|
|
flattened = flatten_labeled_transforms(self.labeled_world_transforms) |
|
|
|
|
|
|
|
mask = flattened[:, -1] == 1 |
|
|
|
|
|
mask = mask.all(axis=1) |
|
|
|
flattened = flattened[mask][:, :-1] |
|
del mask |
|
|
|
|
|
flattened = self.remove_clipping_poses(flattened) |
|
|
|
return transform_translations(flattened, move_to_center, self.scale, (self.vol_x, self.vol_y, self.vol_z)) |
|
|
|
def extract_inf_translations(self, r: Union[int, Tuple[int, int], Tuple[int, int, int]] = None, |
|
merged: bool = True) -> Union[np.array, Tuple[np.array, np.array]]: |
|
""" |
|
Manipulates the existing (un)labeled world transform arrays into arrays that are suitable for inference. |
|
It does this through flattening the labeled world transforms to shape (n_frames, n_actors * 73, 14). |
|
If merged is True, merges the unlabeled data with the labeled data. |
|
:param r: Custom frame range to use if the transforms were not extracted yet. |
|
:param merged: `bool` whether to merge both arrays into one or return separate arrays. |
|
:return: If merged, returns one `np.array`, else flattened labeled `np.array` and unlabeled `np.array`. |
|
""" |
|
|
|
if self.labeled_world_transforms is None: |
|
|
|
self.init_labeled_world_transforms(r=r) |
|
if self.unlabeled_world_transforms is None: |
|
|
|
self.init_unlabeled_world_transforms(r=r) |
|
|
|
|
|
|
|
|
|
flat_labeled = self.labeled_world_transforms.transpose(0, 2, 1, 3) |
|
|
|
|
|
|
|
ls = flat_labeled.shape |
|
flat_labeled = flat_labeled.reshape(ls[0], ls[1], -1) |
|
del ls |
|
|
|
|
|
flat_labeled = replace_zeros_with_inf(flat_labeled)[:, :-1] |
|
self.unlabeled_world_transforms = replace_zeros_with_inf(self.unlabeled_world_transforms)[:, :-1] |
|
|
|
if merged: |
|
return utils.merge_labeled_and_unlabeled_data(labeled=flat_labeled, |
|
unlabeled=self.unlabeled_world_transforms, |
|
pc_size=self.pc_size) |
|
else: |
|
return flat_labeled, self.unlabeled_world_transforms |
|
|
|
def get_split_transforms(self, r: Union[int, Tuple[int, int], Tuple[int, int, int]] = None, |
|
mode: str = 'train') -> Tuple[np.array, np.array, np.array, np.array, np.array]: |
|
""" |
|
Splits a timeline dense cloud with shape (self.num_frames, self.pc_size, 5) into 3 different |
|
arrays: |
|
1. A `np.array` with the actor classes as shape (self.num_frames, self.pc_size, 1). |
|
2. A `np.array` with the marker classes as shape (self.num_frames, self.pc_size, 1). |
|
3. A `np.array` with the translation floats as shape (self.num_frames, self.pc_size, 4). |
|
4. A `np.array` with the rotation Euler angles as shape (self.num_frames, self.pc_size, 3). |
|
:return: Return tuple of `np.array` as (actor classes, marker classes, translation vectors). |
|
""" |
|
cloud = self.extract_training_translations(r) if mode == 'train' else self.extract_inf_translations(r) |
|
|
|
return cloud[:, :, 0], cloud[:, :, 1], cloud[:, :, 2:5], cloud[:, :, 6:9], cloud[:, :, 10:13] |
|
|
|
def convert_class_to_parent(self, c: float = 0): |
|
""" |
|
Returns the actor name based on the class value. |
|
:param c: `float` actor class index. |
|
:return: `str` actor name. |
|
""" |
|
return 'UNLABELED' if int(c) == 0 else self.parent_names[int(c) - 1] |
|
|
|
def convert_class_to_child(self, c: float = 0): |
|
""" |
|
Returns the marker name based on the class value. |
|
:param c: `float` marker class index. |
|
:return: `str` marker name. |
|
""" |
|
return 'UNLABELED' if int(c) == 0 else self.child_names[int(c) - 1] |
|
|
|
def export_train_data(self, output_file: Path, r: Union[int, Tuple[int, int], Tuple[int, int, int]] = None) \ |
|
-> Union[bytes, pd.DataFrame, np.array]: |
|
""" |
|
Exports train data to an HDF5 file. |
|
:param output_file: `Path` to the file. |
|
:param r: Custom frame range to use. |
|
:return: `np.array` of shape (n_poses, 14, 73) of train data. |
|
""" |
|
if output_file.suffix == '.h5': |
|
array_4d = self.extract_training_translations(r) |
|
with h5py.File(output_file, 'w') as h5f: |
|
h5f.create_dataset('array_data', data=array_4d, compression='gzip', compression_opts=9) |
|
self._print(f'Exported train data to {output_file}', 0) |
|
return array_4d |
|
|
|
else: |
|
raise ValueError('Invalid file extension. Must be .h5') |
|
|
|
def export_inf_data(self, output_file: Path, r: Union[int, Tuple[int, int], Tuple[int, int, int]] = None, |
|
merged: bool = True) -> Union[np.array, Tuple[np.array, np.array]]: |
|
""" |
|
Exports inference data to an HDF5 file. |
|
:param output_file: `Path` to the file. |
|
:param r: Custom frame range to use. |
|
:param merged: `bool` whether to merge the test data or output an unlabeled dataset and labeled dataset. |
|
:return: `np.array` of the test data. |
|
""" |
|
|
|
|
|
|
|
|
|
array_4d = self.extract_inf_translations(r, merged=merged) |
|
|
|
if output_file.suffix == '.h5': |
|
with h5py.File(output_file, 'w') as h5f: |
|
if merged: |
|
|
|
h5f.create_dataset('merged_data', data=array_4d, compression='gzip', compression_opts=9) |
|
|
|
else: |
|
|
|
h5f.create_dataset('labeled', data=array_4d[0], compression='gzip', compression_opts=9) |
|
h5f.create_dataset('unlabeled', data=array_4d[1], compression='gzip', compression_opts=9) |
|
|
|
self._print(f'Exported test data to {output_file}', 0) |
|
|
|
return array_4d |
|
|
|
def export_fbx(self, output_file: Path = None) -> bool: |
|
""" |
|
Exports the entire scene to the output file. If output_file is None, it uses the automatic output_fbx property. |
|
:param output_file: `Path` path to an FBX file to export the contents to. |
|
:return: `True` if successful, otherwise `False`. |
|
""" |
|
if output_file is None: |
|
output_file = self.output_fbx |
|
|
|
|
|
exporter = fbx.FbxExporter.Create(self.manager, "") |
|
|
|
|
|
result = exporter.Initialize(str(output_file)) |
|
if not result: |
|
self._print(f"Failed to initialize the exporter for file '{output_file}'.", 0) |
|
return False |
|
|
|
|
|
result = exporter.Export(self.scene) |
|
if not result: |
|
self._print(f"Failed to export the scene to file '{output_file}'.", 0) |
|
return False |
|
|
|
|
|
exporter.Destroy() |
|
|
|
self._print('Export finished', 0) |
|
|
|
return True |
|
|
|
def remove_unlabeled_markers(self) -> None: |
|
""" |
|
Uses self.remove_node() to delete all unlabeled markers from the scene. |
|
""" |
|
for m in self.unlabeled_markers: |
|
self.remove_node(m) |
|
|
|
self.remove_node(self.unlabeled_markers_parent) |
|
|
|
def remove_system(self) -> None: |
|
""" |
|
Removes all nodes under and including the System parent. |
|
""" |
|
system_node = self.get_parent_node_by_name('System') |
|
self.remove_node(system_node, recursive=True) |
|
|
|
def cleanup(self) -> None: |
|
""" |
|
Removes all unlabeled markers and System nodes. |
|
""" |
|
self.remove_unlabeled_markers() |
|
self.remove_system() |
|
|
|
def get_clean_translation_curves(self, marker: fbx.FbxNode) -> List[fbx.FbxAnimCurve]: |
|
""" |
|
Gets and cleans the local translation animation curves for the given marker. |
|
:param marker: `fbx.FbxNode` to get the anim curves off. |
|
:return: List of `fbx.FbxAnimCurve` without keyframes. |
|
""" |
|
curves = [] |
|
for axis in ['X', 'Y', 'Z']: |
|
|
|
|
|
curve = marker.LclTranslation.GetCurve(self.anim_layer, axis, True) |
|
|
|
curve.KeyClear() |
|
curves.append(curve) |
|
|
|
return curves |
|
|
|
def set_default_lcl_rotation(self, node: fbx.FbxNode, lcl_r: List[float]) -> None: |
|
""" |
|
First checks if the local rotation has an animation curve. This gets destroyed if it exists. |
|
Then it sets the default values. |
|
:param node: `fbx.FbxNode` to set the default local rotation for. |
|
:param lcl_r: List of `float` to set the default local rotation to. |
|
""" |
|
for axis in ['X', 'Y', 'Z']: |
|
curve = node.LclRotation.GetCurve(self.anim_layer, axis, False) |
|
if curve: |
|
curve.Destroy() |
|
node.LclRotation.Set(fbx.FbxDouble3(*lcl_r)) |
|
|
|
def set_default_lcl_scaling(self, node: fbx.FbxNode, lcl_s: List[float]) -> None: |
|
""" |
|
First checks if the local scaling has an animation curve. This gets destroyed if it exists. |
|
Then it sets the default values. |
|
:param node: `fbx.FbxNode` to set the default local scaling for. |
|
:param lcl_s: List of `float` to set the default local scaling to. |
|
""" |
|
for axis in ['X', 'Y', 'Z']: |
|
curve = node.LclScaling.GetCurve(self.anim_layer, axis, False) |
|
if curve: |
|
curve.Destroy() |
|
node.LclScaling.Set(fbx.FbxDouble3(*lcl_s)) |
|
|
|
def set_default_lcl_transforms(self, marker: fbx.FbxNode, marker_keys: dict) -> None: |
|
""" |
|
Finds the first frame in the dict, calculates that frame's local transform, |
|
and then sets the default values for the local rotation and scaling. |
|
:param marker: `fbx.FbxNode` marker to set the default values for. |
|
:param marker_keys: `dict` in the form of {'frame': [tx, ty, tz, rx, ry, rz...]}. |
|
""" |
|
|
|
frame = list(marker_keys.keys())[0] |
|
|
|
world_transform = marker_keys[frame] |
|
|
|
_, lcl_r, lcl_s = world_to_local_transform(marker, world_transform, frame) |
|
|
|
|
|
|
|
self.set_default_lcl_rotation(marker, lcl_r) |
|
self.set_default_lcl_scaling(marker, lcl_s) |
|
|
|
def replace_keyframes_per_marker(self, marker: fbx.FbxNode, marker_keys: dict) -> None: |
|
""" |
|
For the given marker, creates new keyframes on its local translation animation curves, |
|
and sets the default local rotation values. |
|
:param marker: `fbx.FbxNode` to set the default values on. |
|
:param marker_keys: `dict` of keys that contain all info needed to create world transform matrices. |
|
""" |
|
|
|
|
|
self.set_default_lcl_transforms(marker, marker_keys) |
|
|
|
for axis, curve in enumerate(self.get_clean_translation_curves(marker)): |
|
curve.KeyModifyBegin() |
|
|
|
|
|
|
|
for frame, world_transform in marker_keys.items(): |
|
|
|
lcl_t, _, _ = world_to_local_transform(marker, world_transform, frame) |
|
|
|
create_keyframe(curve, frame, lcl_t[axis]) |
|
|
|
curve.KeyModifyEnd() |
|
|
|
def replace_keyframes_per_actor(self, actor: int, actor_keys: dict) -> None: |
|
""" |
|
Uses self.replace_keyframes_per_marker() to keyframe all markers of given actor. |
|
:param actor: `int` actor index to apply to. Index starts at 0. |
|
:param actor_keys: `dict` with all marker keys for this actor. |
|
""" |
|
for marker_class, (marker_name, marker) in enumerate(self.children[actor].items(), start=1): |
|
marker_keys = actor_keys.get(marker_class) |
|
if marker_keys: |
|
self._print(f'Replacing keys for {marker_name}', 1) |
|
self.replace_keyframes_per_marker(marker, marker_keys) |
|
|
|
def replace_keyframes_for_all_actors(self, key_dict: dict) -> None: |
|
""" |
|
For all actors, uses self.replace_keyframes_per_actor() to set keyframes on each actor's marker nodes. |
|
:param key_dict: `dict` with all actor keyframes. |
|
""" |
|
for actor_idx in range(self.parent_count): |
|
actor_dict = key_dict.get(actor_idx + 1) |
|
if actor_dict: |
|
self._print(f'Replacing keys for actor {actor_idx}', 1) |
|
self.replace_keyframes_per_actor(actor_idx, actor_dict) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|