Spaces:
Running
Running
File size: 2,972 Bytes
4ba17b3 9c02460 4ba17b3 56132ca 2bb13f8 706f673 247c098 3478583 706f673 3478583 706f673 3478583 247c098 3478583 247c098 b87aec8 3478583 247c098 3478583 0d12680 3478583 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
import streamlit as st
import os
# Video directory
VIDEO_FOLDER = "./src/synthda_falling_realreal/"
# Set page layout
st.set_page_config(layout="wide")
# Title and description (centered)
st.markdown("""
<h1 style="text-align: center;">Project SynthDa</h1>
<h3 style="text-align: center;">SynthDa Interpolation Demo Viewer</h3>
<p style="text-align: center;">
AutoSynthDa blends two input motion videos to <strong>generate kinematically coherent, synthetic action videos</strong>.<br>
Use the slider below to explore how the system interpolates motion from one video to another.<br>
Source: <a href="https://github.com/nvidia/synthda" target="_blank">github.com/nvidia/synthda</a>
</p>
""", unsafe_allow_html=True)
# Slider instruction (centered)
st.markdown(
'<p style="text-align: center;"><strong>Use the slider to control the interpolation between Input Video 1 (left) and Input Video 2 (right).</strong></p>',
unsafe_allow_html=True
)
# Slider (starts at 0.5)
weight = st.slider("Interpolation Weight", 0.1, 0.9, 0.5, step=0.1)
# Interpolation explanation (centered)
if weight == 0.0:
interp_text = "Showing Input Video 1 (no interpolation)"
elif weight == 1.0:
interp_text = "Showing Input Video 2 (no interpolation)"
else:
w2 = round(1.0 - weight, 1)
interp_text = f"Generated motion: {weight:.1f} from Input Video 1 + {w2:.1f} from Input Video 2"
st.markdown(f'<p style="text-align: center; color: #444;"><strong>{interp_text}</strong></p>', unsafe_allow_html=True)
# Filepaths
filename_interp = f"videos_generated_{weight:.1f}.mp4"
filename_input1 = "videos_generated_0.0.mp4"
filename_input2 = "videos_generated_1.0.mp4"
video_interp = os.path.join(VIDEO_FOLDER, filename_interp)
video_input1 = os.path.join(VIDEO_FOLDER, filename_input1)
video_input2 = os.path.join(VIDEO_FOLDER, filename_input2)
exists_interp = os.path.exists(video_interp)
exists_1 = os.path.exists(video_input1)
exists_2 = os.path.exists(video_input2)
# Layout: 3 columns for video display
col1, col2, col3 = st.columns(3)
with col1:
st.markdown("""
<div style='text-align: center; font-weight: bold;'>
Input Video 1 <span style="font-weight: normal;">(Generated with generative AI)</span>
</div>
""", unsafe_allow_html=True)
if exists_1:
st.video(video_input1)
else:
st.error("Video 1 not found")
with col2:
st.markdown("<div style='text-align: center; font-weight: bold;'>Interpolated Video</div>", unsafe_allow_html=True)
if exists_interp:
st.video(video_interp)
else:
st.error("Interpolated video not found")
with col3:
st.markdown("""<div style='text-align: center; font-weight: bold;'>Input Video 2 <span style="font-weight: normal;">(Obtained from real-world dataset)</span>
</div>""", unsafe_allow_html=True)
if exists_2:
st.video(video_input2)
else:
st.error("Video 2 not found")
|