torch==2.5.1 torchvision>=0.19.0 opencv-python>=4.9.0.80 diffusers>=0.31.0 transformers>=4.49.0 tokenizers>=0.20.3 accelerate>=1.1.1 tqdm imageio easydict ftfy dashscope imageio-ffmpeg gradio>=5.0.0 numpy>=1.23.5,<2 spaces transformers>=4.46.1 accelerate>=1.0.1 tokenizers>=0.20.1 albumentations>=1.4.20 av>=13.1.0 decord>=0.6.0 einops>=0.8.0 fastapi>=0.115.3 gdown>=5.2.0 h5py>=3.12.1 idna>=3.6 imageio>=2.36.0 matplotlib>=3.9.2 numpy>=1.26.3 omegaconf>=2.3.0 opencv-python>=4.10.0.84 opencv-python-headless>=4.10.0.84 pandas>=2.2.3 pillow>=10.2.0 pydub>=0.25.1 pytorch-lightning>=2.4.0 pytorchvideo>=0.1.5 PyYAML>=6.0.1 regex>=2024.9.11 requests>=2.31.0 scikit-learn>=1.5.2 scipy>=1.14.1 six>=1.16.0 test-tube>=0.7.5 timm>=1.0.11 torchdiffeq>=0.2.4 torchmetrics>=1.5.1 tqdm>=4.66.5 urllib3>=2.2.0 uvicorn>=0.32.0 scikit-video>=1.1.11 imageio-ffmpeg>=0.5.1 sentencepiece>=0.2.0 beautifulsoup4>=4.12.3 ftfy>=6.3.0 moviepy>=1.0.3 wandb>=0.18.5 tensorboard>=2.18.0 pydantic>=2.9.2 gradio>=5.3.0 huggingface_hub>=0.26.1 protobuf>=5.28.3 watch gpustat peft>=0.13.2 liger_kernel>=0.4.1 loguru torchvision ninja safetensors packaging https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.5cxx11abiFALSE-cp310-cp310-linux_x86_64.whl