Natsha commited on
Commit
a0a49ab
·
1 Parent(s): a628625

Updated the testing workflow. Fixed some bugs. The bug with the new animation curves not connecting properly is fixed.

Browse files
Files changed (3) hide show
  1. README.md +1 -2
  2. fbx_handler.md +35 -0
  3. fbx_handler.py +221 -109
README.md CHANGED
@@ -11,9 +11,8 @@ Functionality to load FBX files, extract animation, process the animation and wr
11
  # Classifier
12
  * Globals: file with hardcoded values like the marker names.
13
  * Utilities:
14
- * Split dataset into train/valid/test sets.
15
  * Visualizations
16
- * Training file loader:
17
  * Load the `.fbx` file.
18
  * Go through each frame in the animation frame range and check if all skeleton nodes have a keyframe there.
19
  * If a keyframe is missing, remove that frame number from the valid frame numbers.
 
11
  # Classifier
12
  * Globals: file with hardcoded values like the marker names.
13
  * Utilities:
 
14
  * Visualizations
15
+ * FBX Handler:
16
  * Load the `.fbx` file.
17
  * Go through each frame in the animation frame range and check if all skeleton nodes have a keyframe there.
18
  * If a keyframe is missing, remove that frame number from the valid frame numbers.
fbx_handler.md ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FBX Handler
2
+
3
+ ## Load file:
4
+ ```python
5
+ # Path to file to load.
6
+ input_file = Path('/path/to/file.fbx')
7
+ # Load file into class.
8
+ container = FBXContainer(input_file)
9
+ ```
10
+
11
+ ## Training workflow:
12
+ ```python
13
+ # Get dataframe with all valid translation numbers.
14
+ df = container.extract_all_valid_translations()
15
+ # Convert to dataset...
16
+ ...
17
+ ```
18
+
19
+ ## Testing workflow:
20
+ ```python
21
+ # Get timeline dense cloud.
22
+ tdc = container.get_tdc() # wrap in shuffle_tdc() to shuffle nodes.
23
+ # Split array into subarrays.
24
+ actors_test, markers_test, t_test, r_test, s_test = container.split_tdc(tdc)
25
+ # Predict the new actors and classes...
26
+ actors_pred, markers_pred = Labeler(container.transform_translations(t_test))
27
+ # Merge the new labels with their original translations.
28
+ merged = merge_tdc(actors_pred, markers_pred, t_test, r_test, s_test)
29
+ # Convert the full cloud into a dict structured for easy keyframes.
30
+ new_dict = tsc_to_dict(merged)
31
+ # Replace the old translation keyframes with the new values.
32
+ container.replace_keyframes_for_all_actors(new_dict)
33
+ # Export file.
34
+ container.export_fbx(Path('/path/to/outputfile.fbx'))
35
+ ```
fbx_handler.py CHANGED
@@ -100,6 +100,23 @@ def merge_tdc(actor_classes: np.array,
100
  return tdc
101
 
102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  def sort_cloud(cloud: np.array) -> np.array:
104
  """
105
  Convenience function to sort a timeline dense cloud by actor and marker classes.
@@ -156,16 +173,22 @@ def match_name(node: fbx.FbxNode, name: str, ignore_namespace: bool = True) -> b
156
  return node_name == name
157
 
158
 
159
- def timeline_cloud_to_dict(data: np.array, start_frame: int = 0) -> dict:
 
 
 
 
 
 
160
  # Initialize an empty dictionary.
161
  result = {}
162
 
163
  # Iterate over the first dimension (frames) and second dimension (markers).
164
- for frame, node in itertools.product(range(data.shape[0]), range(data.shape[1])):
165
 
166
  # Extract the actor class, node class, and translation vector.
167
- actor_class = int(data[frame, node, 0])
168
- marker_class = int(data[frame, node, 1])
169
  # If actor or marker class is predicted to be 0 (unlabeled marker), then skip adding it to the dict,
170
  # because we only want to keyframe labeled markers.
171
  if actor_class == 0 or marker_class == 0:
@@ -173,10 +196,11 @@ def timeline_cloud_to_dict(data: np.array, start_frame: int = 0) -> dict:
173
 
174
  # Just to be sure, forcing the last numbers of each array to be the correct values.
175
  # Also check self.get_world_transform() for this.
176
- translations = data[frame, node, 2:5] + np.array([0.0])
177
- rotations = data[frame, node, 6:9] + np.array([0.0])
178
- scales = data[frame, node, 10:13] + np.array([1.0])
179
 
 
180
  world_matrix = fbx.FbxAMatrix()
181
  world_matrix.SetT(fbx.FbxVector4(*translations))
182
  world_matrix.SetR(fbx.FbxVector4(*rotations))
@@ -196,7 +220,16 @@ def timeline_cloud_to_dict(data: np.array, start_frame: int = 0) -> dict:
196
  return result
197
 
198
 
199
- def world_to_local_transform(node, world_transform, frame):
 
 
 
 
 
 
 
 
 
200
  t = fbx.FbxTime()
201
  t.SetFrame(frame)
202
  if node.GetParent():
@@ -215,6 +248,59 @@ def world_to_local_transform(node, world_transform, frame):
215
  return [lcl.GetT()[t] for t in range(3)], [lcl.GetR()[r] for r in range(3)], [lcl.GetS()[s] for s in range(3)]
216
 
217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  class FBXContainer:
219
  def __init__(self, fbx_file: Path,
220
  volume_dims: Tuple[float] = (10., 4., 10.),
@@ -418,7 +504,7 @@ class FBXContainer:
418
  if not len(self.valid_frames[actor]):
419
  self._set_valid_frames_for_actor(actor)
420
 
421
- def _modify_pose(self, actor: int = 0, frame: int = 0) -> List[float]:
422
  """
423
  Evaluates all marker nodes for the given actor and modifies the resulting point cloud,
424
  so it is centered and scaled properly for training.
@@ -580,7 +666,7 @@ class FBXContainer:
580
  # Note that these frames can be different per actor.
581
  for frame in self.valid_frames[actor]:
582
  # Get the centered point cloud as a 1D list.
583
- pose_at_frame = self._modify_pose(actor, frame)
584
  poses.append(pose_at_frame)
585
 
586
  return poses
@@ -601,32 +687,29 @@ class FBXContainer:
601
 
602
  return pd.DataFrame(all_poses, columns=columns)
603
 
604
- def get_world_transform(self, m: fbx.FbxNode, time: fbx.FbxTime, apply_transform: bool = True) -> List[float]:
605
  """
606
- Evaluates the world translation of the given marker at the given time,
607
- scales it down by scale and turns it into a vector list.
608
- :param m: `fbx.FbxNode` marker to evaluate the world translation of.
609
- :param time: `fbx.FbxTime` time to evaluate at.
610
- :param apply_transform: `bool` Whether to transform the translation or not.
611
- :return: Vector in the form: [tx, ty, tz].
612
  """
613
- world = m.EvaluateGlobalTransform(time)
614
- world = list(world.GetT()) + list(world.GetR()) + list(world.GetS())
615
- # Make sure that the last numbers of each row are the correct values.
616
- world[3] = 0.0
617
- world[7] = 0.0
618
- world[11] = 1.0
619
 
620
- if not apply_transform:
621
- return world
 
622
 
623
  # First multiply by self.scale, which turns meters to centimeters.
624
  # Then divide by volume dimensions, to normalize to the total area of the capture volume.
625
- world[2] = np.clip(world[2], -(self.vol_x * 0.5), self.vol_x * 0.5) * self.scale / self.vol_x
626
- world[3] = np.clip(world[3], -(self.vol_y * 0.5), self.vol_y * 0.5) * self.scale / self.vol_y
627
- world[4] = np.clip(world[4], -(self.vol_z * 0.5), self.vol_z * 0.5) * self.scale / self.vol_z
 
 
 
628
 
629
- return world
630
 
631
  def is_kf_present(self, marker: fbx.FbxNode, time: fbx.FbxTime) -> bool:
632
  """
@@ -639,11 +722,10 @@ class FBXContainer:
639
  curve = marker.LclTranslation.GetCurve(self.anim_layer, 'X')
640
  return False if curve is None else curve.KeyFind(time) != -1
641
 
642
- def get_sc(self, frame: int, apply_transform: bool = True) -> np.array:
643
  """
644
  For each actor at the given time, find all markers with keyframes and add their values to a point cloud.
645
  :param frame: `fbx.FbxTime` time at which to evaluate the marker.
646
- :param apply_transform: `bool` Whether to transform the translation or not.
647
  :return: sparse point cloud as `np.array`.
648
  """
649
  time = fbx.FbxTime()
@@ -653,7 +735,7 @@ class FBXContainer:
653
  # because by adding the labeled markers after (which use classes 1-74),
654
  # we eventually return an array that doesn't need to be sorted anymore.
655
  cloud = [
656
- [0, 0, *self.get_world_transform(m, time, apply_transform)]
657
  for m in self.unlabeled_markers
658
  if self.is_kf_present(m, time)
659
  ]
@@ -664,7 +746,7 @@ class FBXContainer:
664
  # This actor's point cloud is made up of all markers that have a keyframe at the given time.
665
  # For each marker, we create this row: [actor class (index+1), marker class (index+1), tx, ty, tz].
666
  # We use index+1 because the unlabeled markers will use index 0 for both classes.
667
- [actor_idx + 1, marker_class, *self.get_world_transform(m, time, apply_transform)]
668
  for marker_class, (marker_name, m) in enumerate(
669
  self.markers[actor_idx].items(), start=1
670
  )
@@ -679,18 +761,15 @@ class FBXContainer:
679
  # so return the cloud as a np array that cuts off any excessive markers.
680
  return np.array(cloud)[:self.pc_size]
681
 
682
- def get_tsc(self, apply_transform: bool = True) -> np.array:
683
  """
684
  Convenience method that calls self.get_sparse_cloud() for all frames in the frame range
685
  and returns the combined result.
686
- :param apply_transform: `bool` Whether to transform the translation or not.
687
  :return: `np.array` that contains a sparse cloud for each frame in the frame range.
688
  """
689
- return np.array([self.get_sc(f, apply_transform) for f in self.get_frame_range()])
690
 
691
- def get_tdc(self, r: Union[int, Tuple[int, int]] = None,
692
- shuffle: bool = False,
693
- apply_transform: bool = True) -> np.array:
694
  """
695
  For each frame in the frame range, collects the point cloud that is present in the file.
696
  Then it creates a ghost cloud of random markers that are treated as unlabeled markers,
@@ -698,8 +777,6 @@ class FBXContainer:
698
  Optionally shuffles this dense cloud before adding it to the final list.
699
  :param r: tuple of `int` that indicates the frame range to get. Default is None,
700
  resulting in the animation frame range.
701
- :param shuffle: If `True`, shuffles the dense point cloud of each frame.
702
- :param apply_transform: `bool` Whether to transform the translation or not.
703
  :return: `np.array` that contains a dense point cloud for each frame,
704
  with a shape of (self.num_frames, self.pc_size, 5).
705
  """
@@ -717,7 +794,7 @@ class FBXContainer:
717
  r = self.get_frame_range()
718
 
719
  for frame in r:
720
- cloud = self.get_sc(frame, apply_transform)
721
  missing = self.pc_size - cloud.shape[0]
722
 
723
  # Only bother creating ghost markers if there are any missing rows.
@@ -727,16 +804,11 @@ class FBXContainer:
727
  ghost_cloud = make_ghost_markers(missing)
728
  cloud = np.vstack([ghost_cloud, cloud])
729
 
730
- # Shuffle the rows if needed. Because each row contains all dependent and independent variables,
731
- # shuffling won't mess up the labels.
732
- if shuffle:
733
- np.random.shuffle(cloud)
734
-
735
  clouds.append(cloud)
736
 
737
  return np.array(clouds)
738
 
739
- def split_tdc(self, cloud: np.array = None, shuffle: bool = False, apply_transform: bool = True) \
740
  -> Tuple[np.array, np.array, np.array, np.array, np.array]:
741
  """
742
  Splits a timeline dense cloud with shape (self.num_frames, self.pc_size, 5) into 3 different
@@ -747,12 +819,10 @@ class FBXContainer:
747
  4. A `np.array` with the rotation Euler angles as shape (self.num_frames, self.pc_size, 3).
748
  :param cloud: `np.array` of shape (self.num_frames, self.pc_size, 5) that contains a dense point cloud
749
  (self.pc_size, 5) per frame in the frame range.
750
- :param shuffle: `bool` whether to shuffle the generated cloud if no cloud was given.
751
- :param apply_transform: `bool` Whether to transform the translation or not.
752
  :return: Return tuple of `np.array` as (actor classes, marker classes, translation vectors).
753
  """
754
  if cloud is None:
755
- cloud = self.get_tdc(shuffle=shuffle, apply_transform=apply_transform)
756
 
757
  if cloud.shape[1] != 1000:
758
  raise ValueError(f"Dense cloud doesn't have enough points. {cloud.shape[1]}/1000.")
@@ -852,91 +922,133 @@ class FBXContainer:
852
  self.remove_node(self.unlabeled_markers_parent)
853
 
854
  def remove_system(self) -> None:
 
 
 
855
  system_node = self.get_parent_node_by_name('System')
856
  self.remove_node(system_node, recursive=True)
857
 
858
  def cleanup(self) -> None:
 
 
 
859
  self.remove_unlabeled_markers()
860
  self.remove_system()
861
 
862
- def replace_animation_curves(self, node, curve_types):
863
- anim_curve_dict = {
864
- 't': [
865
- ("X", node.LclTranslation),
866
- ("Y", node.LclTranslation),
867
- ("Z", node.LclTranslation)
868
- ],
869
- 'r': [
870
- ("X", node.LclRotation),
871
- ("Y", node.LclRotation),
872
- ("Z", node.LclRotation)
873
- ],
874
- 's': [
875
- ("X", node.LclScaling),
876
- ("Y", node.LclScaling),
877
- ("Z", node.LclScaling)
878
- ]
879
- }
880
-
881
- anim_curves = []
882
- for curve_type in curve_types:
883
- if curve_type in anim_curve_dict:
884
- for anim_curve_name, p in anim_curve_dict[curve_type]:
885
- # Disconnect and remove existing animation curve, if any
886
- existing_anim_curve = p.GetCurve(self.anim_layer, anim_curve_name, False)
887
- # TODO: Make sure a new anim curve is properly connected.
888
- if existing_anim_curve:
889
- existing_anim_curve.KeyClear()
890
- anim_curves.append(existing_anim_curve)
891
- # p.DisconnectSrcObject(existing_anim_curve)
892
- # existing_anim_curve.Destroy()
893
- # del existing_anim_curve
894
- #
895
- # # Create a new animation curve and connect it to the node and animation layer
896
- # new_anim_curve = fbx.FbxAnimCurve.Create(self.manager, anim_curve_name)
897
- # p.ConnectSrcObject(new_anim_curve)
898
- # new_anim_curve.ConnectDstObject(self.anim_layer)
899
- #
900
- # anim_curves.append(new_anim_curve)
901
-
902
- return anim_curves
903
 
904
- def replace_keyframes_per_marker(self, marker: fbx.FbxNode, marker_keys: dict) -> None:
905
 
906
- # Collect lcl transform curves.
907
- curves = self.replace_animation_curves(marker, 't')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
908
 
909
- # TODO: Only set translation keys. Set rotation and scale as property values instead of curves.
910
- for axis, curve in enumerate(curves):
 
 
911
 
 
 
 
 
 
 
912
  curve.KeyModifyBegin()
913
 
914
  # The dict has frames mapped to world matrices.
915
  # The world_transform here is that full matrix, so we only need to convert this to local space.
916
  for frame, world_transform in marker_keys.items():
917
  # Convert world to local transform at the given frame.
918
- lcl_t, lcl_r, lcl_s = world_to_local_transform(marker, world_transform, frame)
919
- # Only for translations, set keyframes.
920
  create_keyframe(curve, frame, lcl_t[axis])
921
 
922
  curve.KeyModifyEnd()
923
 
924
  def replace_keyframes_per_actor(self, actor: int, actor_keys: dict) -> None:
 
 
 
 
 
925
  for marker_class, (marker_name, marker) in enumerate(self.markers[actor].items(), start=1):
926
- self.replace_keyframes_per_marker(marker, actor_keys[marker_class])
 
 
927
 
928
  def replace_keyframes_for_all_actors(self, key_dict: dict) -> None:
 
 
 
 
929
  for actor_idx in range(self.actor_count):
930
- self.replace_keyframes_per_actor(actor_idx, key_dict[actor_idx + 1])
 
 
931
 
932
 
933
  # d = FBXContainer(Path('G:/Firestorm/mocap-ai/data/fbx/dowg/TAKE_01+1_ALL_001.fbx'))
934
- # # cloud = d.get_tdc(apply_transform=False)
935
- # actors_train, markers_train, t_train, r_train, s_train = d.split_tdc(apply_transform=True)
936
- # actors_test, markers_test, t_test, r_test, s_test = d.split_tdc(apply_transform=False)
937
- # # splits = d.split_tdc(apply_transform=False)
938
- # merged = merge_tdc(actors_train, markers_train, t_test, r_test, s_test)
939
- # pc_dict = timeline_cloud_to_dict(merged, d.start_frame)
940
- # d.replace_keyframes_for_all_actors(pc_dict)
941
- # # d.cleanup()
 
 
 
 
 
942
  # d.export_fbx(Path('G:/Firestorm/mocap-ai/data/fbx/export/TAKE_01+1_ALL_001.fbx'))
 
100
  return tdc
101
 
102
 
103
+ def shuffle_tdc(tdc: np.array) -> np.array:
104
+ """
105
+ Shuffles the given timeline dense cloud at its second dimension, the marker rows.
106
+ This will not mess up the transforms.
107
+ :param tdc: `np.array` to shuffle.
108
+ :return: shuffled `np.array`.
109
+ """
110
+ # This function only works for arrays with 3 dimensions.
111
+ if tdc.ndim != 3:
112
+ raise ValueError(f'Array does not have 3 dimensions: {tdc.ndim}/3.')
113
+
114
+ # Shuffle the node rows.
115
+ for i in range(tdc.shape[0]):
116
+ np.random.shuffle(tdc[i])
117
+ return tdc
118
+
119
+
120
  def sort_cloud(cloud: np.array) -> np.array:
121
  """
122
  Convenience function to sort a timeline dense cloud by actor and marker classes.
 
173
  return node_name == name
174
 
175
 
176
+ def tsc_to_dict(tsc: np.array, start_frame: int = 0) -> dict:
177
+ """
178
+ Converts an `np.array` timeline sparse cloud to a dictionary structured for keyframed animation.
179
+ :param tsc: `np.array` timeline sparse cloud to process.
180
+ :param start_frame: Optional `int` frame at which the animation starts, useful for timecode.
181
+ :return: `dict` optimized for retrieving keyframe info.
182
+ """
183
  # Initialize an empty dictionary.
184
  result = {}
185
 
186
  # Iterate over the first dimension (frames) and second dimension (markers).
187
+ for frame, node in itertools.product(range(tsc.shape[0]), range(tsc.shape[1])):
188
 
189
  # Extract the actor class, node class, and translation vector.
190
+ actor_class = int(tsc[frame, node, 0])
191
+ marker_class = int(tsc[frame, node, 1])
192
  # If actor or marker class is predicted to be 0 (unlabeled marker), then skip adding it to the dict,
193
  # because we only want to keyframe labeled markers.
194
  if actor_class == 0 or marker_class == 0:
 
196
 
197
  # Just to be sure, forcing the last numbers of each array to be the correct values.
198
  # Also check self.get_world_transform() for this.
199
+ translations = tsc[frame, node, 2:5] + np.array([0.0])
200
+ rotations = tsc[frame, node, 6:9] + np.array([0.0])
201
+ scales = tsc[frame, node, 10:13] + np.array([1.0])
202
 
203
+ # Build a world transform matrix from the transform values.
204
  world_matrix = fbx.FbxAMatrix()
205
  world_matrix.SetT(fbx.FbxVector4(*translations))
206
  world_matrix.SetR(fbx.FbxVector4(*rotations))
 
220
  return result
221
 
222
 
223
+ def world_to_local_transform(node: fbx.FbxNode, world_transform: fbx.FbxAMatrix, frame: int) -> \
224
+ Tuple[List[float], List[float], List[float]]:
225
+ """
226
+ Takes a world transform and uses the node's parent world transform to calculate this node's
227
+ local transform at the given frame.
228
+ :param node: `fbx.FbxNode` that the given world transform belongs to.
229
+ :param world_transform: `fbx.FbxAMatrix` world transform to convert to local transform.
230
+ :param frame: `int` frame number at which to evaluate the parent's world transform.
231
+ :return:
232
+ """
233
  t = fbx.FbxTime()
234
  t.SetFrame(frame)
235
  if node.GetParent():
 
248
  return [lcl.GetT()[t] for t in range(3)], [lcl.GetR()[r] for r in range(3)], [lcl.GetS()[s] for s in range(3)]
249
 
250
 
251
+ def get_world_transform(m: fbx.FbxNode, time: fbx.FbxTime, axes: str = 'trs') -> np.array:
252
+ """
253
+ Evaluates the world translation of the given node at the given time,
254
+ scales it down by scale and turns it into a vector list.
255
+ :param m: `fbx.FbxNode` marker to evaluate the world translation of.
256
+ :param time: `fbx.FbxTime` time to evaluate at.
257
+ :param axes: `str` that contains types of info to include. Options are a combination of t, r, and s.
258
+ :return: Vector in the form: [tx, ty, etc..].
259
+ """
260
+ matrix = m.EvaluateGlobalTransform(time)
261
+
262
+ # If axes is only the translation, we return a vector of (tx, ty, tz) only (useful for the training).
263
+ if axes == 't':
264
+ return np.array([matrix[i] for i in range(3)])
265
+
266
+ # Otherwise, we assemble the entire row depending on the axes.
267
+ world = []
268
+ if 't' in axes:
269
+ world += list(matrix.GetT())
270
+ world[3] = 0.0
271
+ if 'r' in axes:
272
+ world += list(matrix.GetR())
273
+ world[7] = 0.0
274
+ if 's' in axes:
275
+ world += list(matrix.GetS())
276
+ world[11] = 1.0
277
+
278
+ return np.array(world)
279
+
280
+
281
+ def isolate_actor_from_tdc(tdc: np.array, actor: int) -> np.array:
282
+ """
283
+ Returns all markers of the given actor in the timeline dense cloud.
284
+ :param tdc: `np.array` timeline dense cloud to filter.
285
+ :param actor: `int` actor class, starting at 1.
286
+ :return: `np.array` that contains only the markers of the given actor.
287
+ """
288
+ if actor == 0:
289
+ raise ValueError('Second argument (actor) cannot be 0, must be higher.')
290
+ mask = tdc[:, :, 0] == float(actor)
291
+ return tdc[mask]
292
+
293
+
294
+ def split_tdc_into_actors(tdc: np.array) -> List[np.array]:
295
+ """
296
+ Uses isolate_actor_from_tdc() to isolate all unique actors in the timeline dense cloud.
297
+ :param tdc: Timeline dense cloud to filter.
298
+ :return: List of isolated actor `np.array`.
299
+ """
300
+ actor_count = len([x for x in np.unique(tdc[:, :, 0]) if x != 0.])
301
+ return [isolate_actor_from_tdc(tdc, i) for i in range(1, actor_count + 1)]
302
+
303
+
304
  class FBXContainer:
305
  def __init__(self, fbx_file: Path,
306
  volume_dims: Tuple[float] = (10., 4., 10.),
 
504
  if not len(self.valid_frames[actor]):
505
  self._set_valid_frames_for_actor(actor)
506
 
507
+ def get_transformed_pc(self, actor: int = 0, frame: int = 0) -> List[float]:
508
  """
509
  Evaluates all marker nodes for the given actor and modifies the resulting point cloud,
510
  so it is centered and scaled properly for training.
 
666
  # Note that these frames can be different per actor.
667
  for frame in self.valid_frames[actor]:
668
  # Get the centered point cloud as a 1D list.
669
+ pose_at_frame = self.get_transformed_pc(actor, frame)
670
  poses.append(pose_at_frame)
671
 
672
  return poses
 
687
 
688
  return pd.DataFrame(all_poses, columns=columns)
689
 
690
+ def transform_translations(self, w: np.array) -> np.array:
691
  """
692
+ Applies a scaling to the translation values in the given array.
693
+ :param w: `np.array` that can either be a timeline dense cloud or translation vectors.
694
+ :return: Modified `np.array`.
 
 
 
695
  """
696
+ if w.ndim != 3:
697
+ raise ValueError(f'Array does not have 3 dimensions: {w.ndim}/3.')
 
 
 
 
698
 
699
+ # If the last dimension has 3 elements, it is a translation vector of shape (tx, ty, tz).
700
+ # If it has 14 elements, it is a full marker row of shape (actor, marker, tx, ty, tz, rx, ry, rz, etc).
701
+ start = 0 if w.shape[-1] == 3 else 2
702
 
703
  # First multiply by self.scale, which turns meters to centimeters.
704
  # Then divide by volume dimensions, to normalize to the total area of the capture volume.
705
+ w[:, :, start + 0] = np.clip(w[:, :, start + 0], -(self.vol_x * 0.5),
706
+ self.vol_x * 0.5) * self.scale / self.vol_x
707
+ w[:, :, start + 1] = np.clip(w[:, :, start + 1], -(self.vol_y * 0.5),
708
+ self.vol_y * 0.5) * self.scale / self.vol_y
709
+ w[:, :, start + 2] = np.clip(w[:, :, start + 2], -(self.vol_z * 0.5),
710
+ self.vol_z * 0.5) * self.scale / self.vol_z
711
 
712
+ return w
713
 
714
  def is_kf_present(self, marker: fbx.FbxNode, time: fbx.FbxTime) -> bool:
715
  """
 
722
  curve = marker.LclTranslation.GetCurve(self.anim_layer, 'X')
723
  return False if curve is None else curve.KeyFind(time) != -1
724
 
725
+ def get_sc(self, frame: int) -> np.array:
726
  """
727
  For each actor at the given time, find all markers with keyframes and add their values to a point cloud.
728
  :param frame: `fbx.FbxTime` time at which to evaluate the marker.
 
729
  :return: sparse point cloud as `np.array`.
730
  """
731
  time = fbx.FbxTime()
 
735
  # because by adding the labeled markers after (which use classes 1-74),
736
  # we eventually return an array that doesn't need to be sorted anymore.
737
  cloud = [
738
+ [0, 0, *get_world_transform(m, time)]
739
  for m in self.unlabeled_markers
740
  if self.is_kf_present(m, time)
741
  ]
 
746
  # This actor's point cloud is made up of all markers that have a keyframe at the given time.
747
  # For each marker, we create this row: [actor class (index+1), marker class (index+1), tx, ty, tz].
748
  # We use index+1 because the unlabeled markers will use index 0 for both classes.
749
+ [actor_idx + 1, marker_class, *get_world_transform(m, time)]
750
  for marker_class, (marker_name, m) in enumerate(
751
  self.markers[actor_idx].items(), start=1
752
  )
 
761
  # so return the cloud as a np array that cuts off any excessive markers.
762
  return np.array(cloud)[:self.pc_size]
763
 
764
+ def get_tsc(self) -> np.array:
765
  """
766
  Convenience method that calls self.get_sparse_cloud() for all frames in the frame range
767
  and returns the combined result.
 
768
  :return: `np.array` that contains a sparse cloud for each frame in the frame range.
769
  """
770
+ return np.array([self.get_sc(f) for f in self.get_frame_range()])
771
 
772
+ def get_tdc(self, r: Union[int, Tuple[int, int]] = None) -> np.array:
 
 
773
  """
774
  For each frame in the frame range, collects the point cloud that is present in the file.
775
  Then it creates a ghost cloud of random markers that are treated as unlabeled markers,
 
777
  Optionally shuffles this dense cloud before adding it to the final list.
778
  :param r: tuple of `int` that indicates the frame range to get. Default is None,
779
  resulting in the animation frame range.
 
 
780
  :return: `np.array` that contains a dense point cloud for each frame,
781
  with a shape of (self.num_frames, self.pc_size, 5).
782
  """
 
794
  r = self.get_frame_range()
795
 
796
  for frame in r:
797
+ cloud = self.get_sc(frame)
798
  missing = self.pc_size - cloud.shape[0]
799
 
800
  # Only bother creating ghost markers if there are any missing rows.
 
804
  ghost_cloud = make_ghost_markers(missing)
805
  cloud = np.vstack([ghost_cloud, cloud])
806
 
 
 
 
 
 
807
  clouds.append(cloud)
808
 
809
  return np.array(clouds)
810
 
811
+ def split_tdc(self, cloud: np.array = None) \
812
  -> Tuple[np.array, np.array, np.array, np.array, np.array]:
813
  """
814
  Splits a timeline dense cloud with shape (self.num_frames, self.pc_size, 5) into 3 different
 
819
  4. A `np.array` with the rotation Euler angles as shape (self.num_frames, self.pc_size, 3).
820
  :param cloud: `np.array` of shape (self.num_frames, self.pc_size, 5) that contains a dense point cloud
821
  (self.pc_size, 5) per frame in the frame range.
 
 
822
  :return: Return tuple of `np.array` as (actor classes, marker classes, translation vectors).
823
  """
824
  if cloud is None:
825
+ cloud = self.get_tdc()
826
 
827
  if cloud.shape[1] != 1000:
828
  raise ValueError(f"Dense cloud doesn't have enough points. {cloud.shape[1]}/1000.")
 
922
  self.remove_node(self.unlabeled_markers_parent)
923
 
924
  def remove_system(self) -> None:
925
+ """
926
+ Removes all nodes under and including the System parent.
927
+ """
928
  system_node = self.get_parent_node_by_name('System')
929
  self.remove_node(system_node, recursive=True)
930
 
931
  def cleanup(self) -> None:
932
+ """
933
+ Removes all unlabeled markers and System nodes.
934
+ """
935
  self.remove_unlabeled_markers()
936
  self.remove_system()
937
 
938
+ def get_clean_translation_curves(self, marker: fbx.FbxNode) -> List[fbx.FbxAnimCurve]:
939
+ """
940
+ Gets and cleans the local translation animation curves for the given marker.
941
+ :param marker: `fbx.FbxNode` to get the anim curves off.
942
+ :return: List of `fbx.FbxAnimCurve` without keyframes.
943
+ """
944
+ curves = []
945
+ for axis in ['X', 'Y', 'Z']:
946
+ # Last argument is True, so if no anim curve was connected here,
947
+ # it'll automatically make a new one and connect it correctly.
948
+ curve = marker.LclTranslation.GetCurve(self.anim_layer, axis, True)
949
+ # Remove all existing keyframes.
950
+ curve.KeyClear()
951
+ curves.append(curve)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
952
 
953
+ return curves
954
 
955
+ def set_default_lcl_rotation(self, node: fbx.FbxNode, lcl_r: List[float]) -> None:
956
+ """
957
+ First checks if the local rotation has an animation curve. This gets destroyed if it exists.
958
+ Then it sets the default values.
959
+ :param node: `fbx.FbxNode` to set the default local rotation for.
960
+ :param lcl_r: List of `float` to set the default local rotation to.
961
+ """
962
+ for axis in ['X', 'Y', 'Z']:
963
+ curve = node.LclRotation.GetCurve(self.anim_layer, axis, False)
964
+ if curve:
965
+ curve.Destroy()
966
+ node.LclRotation.Set(fbx.FbxDouble3(*lcl_r))
967
+
968
+ def set_default_lcl_scaling(self, node: fbx.FbxNode, lcl_s: List[float]) -> None:
969
+ """
970
+ First checks if the local scaling has an animation curve. This gets destroyed if it exists.
971
+ Then it sets the default values.
972
+ :param node: `fbx.FbxNode` to set the default local scaling for.
973
+ :param lcl_s: List of `float` to set the default local scaling to.
974
+ """
975
+ for axis in ['X', 'Y', 'Z']:
976
+ curve = node.LclScaling.GetCurve(self.anim_layer, axis, False)
977
+ if curve:
978
+ curve.Destroy()
979
+ node.LclScaling.Set(fbx.FbxDouble3(*lcl_s))
980
+
981
+ def set_default_lcl_transforms(self, marker: fbx.FbxNode, marker_keys: dict) -> None:
982
+ """
983
+ Finds the first frame in the dict, calculates that frame's local transform,
984
+ and then sets the default values for the local rotation and scaling.
985
+ :param marker: `fbx.FbxNode` marker to set the default values for.
986
+ :param marker_keys: `dict` in the form of {'frame': [tx, ty, tz, rx, ry, rz...]}.
987
+ """
988
+ # Find the first frame in the dict.
989
+ frame = list(marker_keys.keys())[0]
990
+ # Find the FbxAMatrix stored under that frame.
991
+ world_transform = marker_keys[frame]
992
+ # Calculate the local rotation and scaling, ignore the translation.
993
+ _, lcl_r, lcl_s = world_to_local_transform(marker, world_transform, frame)
994
 
995
+ # Set the default values for the rotation and scaling, as they don't need keyframes.
996
+ # Note that these functions will destroy any associated anim curves.
997
+ self.set_default_lcl_rotation(marker, lcl_r)
998
+ self.set_default_lcl_scaling(marker, lcl_s)
999
 
1000
+ def replace_keyframes_per_marker(self, marker: fbx.FbxNode, marker_keys: dict) -> None:
1001
+ # Initialize empty variables for the local rotation and scaling.
1002
+ # These will be filled at the first keyframe
1003
+ self.set_default_lcl_transforms(marker, marker_keys)
1004
+
1005
+ for axis, curve in enumerate(self.get_clean_translation_curves(marker)):
1006
  curve.KeyModifyBegin()
1007
 
1008
  # The dict has frames mapped to world matrices.
1009
  # The world_transform here is that full matrix, so we only need to convert this to local space.
1010
  for frame, world_transform in marker_keys.items():
1011
  # Convert world to local transform at the given frame.
1012
+ lcl_t, _, _ = world_to_local_transform(marker, world_transform, frame)
1013
+ # Only for translations set keyframes.
1014
  create_keyframe(curve, frame, lcl_t[axis])
1015
 
1016
  curve.KeyModifyEnd()
1017
 
1018
  def replace_keyframes_per_actor(self, actor: int, actor_keys: dict) -> None:
1019
+ """
1020
+ Uses self.replace_keyframes_per_marker() to keyframe all markers of given actor.
1021
+ :param actor: `int` actor index to apply to. Index starts at 0.
1022
+ :param actor_keys: `dict` with all marker keys for this actor.
1023
+ """
1024
  for marker_class, (marker_name, marker) in enumerate(self.markers[actor].items(), start=1):
1025
+ marker_keys = actor_keys.get(marker_class)
1026
+ if marker_keys:
1027
+ self.replace_keyframes_per_marker(marker, marker_keys)
1028
 
1029
  def replace_keyframes_for_all_actors(self, key_dict: dict) -> None:
1030
+ """
1031
+ For all actors, uses self.replace_keyframes_per_actor() to set keyframes on each actor's marker nodes.
1032
+ :param key_dict: `dict` with all actor keyframes.
1033
+ """
1034
  for actor_idx in range(self.actor_count):
1035
+ actor_dict = key_dict.get(actor_idx+1)
1036
+ if actor_dict:
1037
+ self.replace_keyframes_per_actor(actor_idx, actor_dict)
1038
 
1039
 
1040
  # d = FBXContainer(Path('G:/Firestorm/mocap-ai/data/fbx/dowg/TAKE_01+1_ALL_001.fbx'))
1041
+ # og_cloud = d.get_tdc()
1042
+ # # print(og_cloud[0, -10:, 2:5])
1043
+ # di = tsc_to_dict(og_cloud)
1044
+ # d.replace_keyframes_for_all_actors(di)
1045
+ # # new_cloud = d.get_tdc(r=100)
1046
+ # # print(new_cloud[0, -10:, 2:5])
1047
+ # # actors_train, markers_train, t_train, r_train, s_train = d.split_tdc(cloud)
1048
+ # # # t_train_transformed = d.transform_translations(t_train)
1049
+ # # # splits = d.split_tdc(apply_transform=False)
1050
+ # # merged = merge_tdc(actors_train, markers_train, t_train, r_train, s_train)
1051
+ # # pc_dict = tsc_to_dict(merged, d.start_frame)
1052
+ # # d.replace_keyframes_for_all_actors(pc_dict)
1053
+ # # # d.cleanup()
1054
  # d.export_fbx(Path('G:/Firestorm/mocap-ai/data/fbx/export/TAKE_01+1_ALL_001.fbx'))