print(' # ************************Begin***********************************') import gradio as gr from huggingface_hub import InferenceClient # Check for T4 GPU import torch import os print('load libs') if torch.cuda.is_available(): #and "T4" in torch.cuda.get_device_name(0): print("✅ GPU detected") else: print(""" ⚠️ WARNING: T4 GPU not detected The recommended runtime for this Colab notebook is "T4 GPU". To change the runtime type: 1. Click on "Runtime" in the top navigation menu 2. Click on "Change runtime type" 3. Select "T4 GPU" 4. Click "OK" if a "Disconnect and delete runtime" window appears 5. Click on "Save" """) # Clone the VibeVoice repository # ![ -d /VibeVoice ] || git clone --quiet --branch main --depth 1 https://github.com/microsoft/VibeVoice.git /VibeVoice os.system(" [ -d /tmp/VibeVoice ] || git clone --branch main https://huggingface.co/vibevoice/VibeVoice-7B /tmp/VibeVoice") print("✅ Cloned VibeVoice repository") # Install project dependencies os.system("uv pip --quiet install --system -e /tmp/VibeVoice") print("✅ Installed dependencies") # Download model (~3 minutes) # !HF_XET_HIGH_PERFORMANCE=1 hf download microsoft/VibeVoice-1.5B --quiet --local-dir /content/models/VibeVoice-1.5B > /dev/null # print("✅ Downloaded model: microsoft/VibeVoice-1.5B") def be_happy(happy): if happy: print('I"am happy') return happy if __name__ == "__main__": print('don"t warry') be_happy(True) LOADED_MODEL = "vibevoice/VibeVoice-7B" # just to break "if LOADED_MODEL is not None:" if LOADED_MODEL is not None: # Ganti 'interface' menjadi 'app' app = gr.Interface( fn=be_happy, inputs=gr.Image(type="numpy", label="Unggah Gambar Model Rambut"), outputs=gr.Markdown(label="# Rekomendasi"), title="vibevoice/VibeVoice-7B", description="vibevoice/VibeVoice-7B", examples=[ # Opsiona # Contoh # atau # "./example.jpg", # "./examplese.jpg" ] ) app.queue().launch() # <= Added