Initial push to check FBX SDK installation.
Browse files- Dockerfile +41 -0
- README.md +48 -6
- app.py +8 -0
- config.toml +2 -0
- fbx202032_fbxpythonsdk_linux.tar.gz +3 -0
- requirements.txt +1 -0
Dockerfile
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use an official Python runtime as a parent image
|
2 |
+
FROM python:3.7-slim
|
3 |
+
|
4 |
+
RUN apt-get update && \
|
5 |
+
apt-get install -y tar && \
|
6 |
+
apt-get install -y libxml2 && \
|
7 |
+
ln -s /bin/tar /usr/bin/tar
|
8 |
+
|
9 |
+
|
10 |
+
# Set the working directory to /app
|
11 |
+
RUN mkdir /app
|
12 |
+
WORKDIR /app
|
13 |
+
|
14 |
+
# Copy the current directory contents into the container at /app
|
15 |
+
COPY . /app
|
16 |
+
|
17 |
+
# Make install directory
|
18 |
+
RUN mkdir -p /python-fbx/install
|
19 |
+
RUN chmod -R ugo+w /python-fbx
|
20 |
+
|
21 |
+
# Unzip FBX SDK
|
22 |
+
RUN tar -vxzf fbx202032_fbxpythonsdk_linux.tar.gz -C /python-fbx
|
23 |
+
RUN chmod ugo+x /python-fbx/fbx202032_fbxpythonsdk_linux
|
24 |
+
RUN printf "yes\nn" | /python-fbx/fbx202032_fbxpythonsdk_linux /python-fbx/install
|
25 |
+
|
26 |
+
# Install FBX SDK
|
27 |
+
RUN cp /python-fbx/install/lib/Python37_x64/* /usr/local/lib/python3.7/site-packages/
|
28 |
+
# Set fbx file permissions
|
29 |
+
RUN chmod 755 /usr/local/lib/python3.7/site-packages/fbx.so
|
30 |
+
|
31 |
+
# Install any needed packages specified in requirements.txt
|
32 |
+
RUN pip install --trusted-host pypi.python.org -r requirements.txt
|
33 |
+
|
34 |
+
# Make port 7860 available to the world outside this container
|
35 |
+
EXPOSE 7860
|
36 |
+
|
37 |
+
# Define environment variable
|
38 |
+
ENV NAME World
|
39 |
+
|
40 |
+
# Run app.py when the container launches
|
41 |
+
CMD ["streamlit", "run", "app.py", "--server.port", "7860"]
|
README.md
CHANGED
@@ -1,10 +1,52 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji: 🌖
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: yellow
|
6 |
sdk: docker
|
7 |
-
|
|
|
8 |
---
|
9 |
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: Optical Motion Capture AI
|
|
|
|
|
|
|
3 |
sdk: docker
|
4 |
+
app_port: 7860
|
5 |
+
app_file: app.py
|
6 |
---
|
7 |
|
8 |
+
# mocap-ai
|
9 |
+
Functionality to load FBX files, extract animation, process the animation and write it back to the file.
|
10 |
+
|
11 |
+
# Classifier
|
12 |
+
* Globals: file with hardcoded values like the marker names.
|
13 |
+
* Utilities:
|
14 |
+
* Split dataset into train/valid/test sets.
|
15 |
+
* Visualizations
|
16 |
+
* Training file loader:
|
17 |
+
* Load the `.fbx` file.
|
18 |
+
* Go through each frame in the animation frame range and check if all skeleton nodes have a keyframe there.
|
19 |
+
* If a keyframe is missing, remove that frame number from the valid frame numbers.
|
20 |
+
* After finding all valid frames, go through all marker translation channels and store the global transform in a `pandas` DataFrame.
|
21 |
+
* Add the actor numbers as categorical variables.
|
22 |
+
* Save the DataFrame to a `.csv` file.
|
23 |
+
* Inference file loader
|
24 |
+
* Same as training file loader, but this one should process all frames regardless of keyframe presence.
|
25 |
+
* Data augmentation:
|
26 |
+
* Isolate a marker set.
|
27 |
+
* Translate and rotate (optionally scale) with boundary check.
|
28 |
+
* Model builder:
|
29 |
+
* Instantiate a model with various hyperparameters.
|
30 |
+
* Training loop:
|
31 |
+
* Train given model with callbacks.
|
32 |
+
* Test loop:
|
33 |
+
* Validate model on validation/test data.
|
34 |
+
* Development script:
|
35 |
+
* Create new model, train it and test it.
|
36 |
+
* Deployment script:
|
37 |
+
* Deploys the model in a Docker image on HuggingFace.
|
38 |
+
|
39 |
+
|
40 |
+
## References:
|
41 |
+
1. PointNet:
|
42 |
+
- Research paper: Qi, Charles R., et al. "PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation." CVPR. 2017. [arXiv:1612.00593](https://arxiv.org/abs/1612.00593)
|
43 |
+
- Official code repository (TensorFlow): https://github.com/charlesq34/pointnet
|
44 |
+
- Official code repository (PyTorch): https://github.com/fxia22/pointnet.pytorch
|
45 |
+
2. PointNet++:
|
46 |
+
- Research paper: Qi, Charles R., et al. "PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space." NeurIPS. 2017. [arXiv:1706.02413](https://arxiv.org/abs/1706.02413)
|
47 |
+
- Official code repository (TensorFlow): https://github.com/charlesq34/pointnet2
|
48 |
+
- Official code repository (PyTorch): https://github.com/erikwijmans/Pointnet2_PyTorch
|
49 |
+
3. DGCNN:
|
50 |
+
- Research paper: Wang, Yue, et al. "Dynamic Graph CNN for Learning on Point Clouds." ACM Transactions on Graphics (TOG) 38.5 (2019): 1-12. [arXiv:1801.07829](https://arxiv.org/abs/1801.07829)
|
51 |
+
- Official code repository (TensorFlow): https://github.com/WangYueFt/dgcnn
|
52 |
+
- Official code repository (PyTorch): https://github.com/muhanzhang/DGCNN
|
app.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import fbx
|
2 |
+
import streamlit as st
|
3 |
+
|
4 |
+
st.title('Optical Motion Capture AI')
|
5 |
+
|
6 |
+
mng = fbx.FbxManager.Create()
|
7 |
+
st.write('Successfully loaded the fbx module:')
|
8 |
+
st.write(mng)
|
config.toml
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
[server]
|
2 |
+
port = 7860
|
fbx202032_fbxpythonsdk_linux.tar.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d2bf37043864b57e308850e99dd1d45f9c27fde64d768c3e3e95d2e1891c25c3
|
3 |
+
size 5178208
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
streamlit
|