import os, subprocess, sys os.environ["HF_HUB_DISABLE_SYMLINKS_WARNING"] = "1" os.environ["HF_HUB_DISABLE_HF_TRANSFER"] = "1" os.environ["HF_HUB_ENABLE_XET"] = "0" os.environ["NUMBA_CACHE_DIR"] = "/tmp/numba_cache" os.makedirs("/tmp/numba_cache", exist_ok=True) os.environ["NUMBA_DISABLE_JIT"] = "1" from huggingface_hub import HfApi, HfFolder, upload_folder, snapshot_download # 🔒 Eliminar hf_transfer si está presente subprocess.run([sys.executable, "-m", "pip", "uninstall", "-y", "hf_transfer"]) # === Configuración === HF_MODEL_ID = "tu_usuario/xtts-v2-finetuned" # <--- cambia con tu repo en HF HF_TOKEN = os.environ.get("HF_TOKEN") # Debe estar definido en tu Space/entorno DATASET_PATH = "/home/user/app/dataset" # Ruta a tu dataset OUTPUT_PATH = "/tmp/output_model" BASE_MODEL = "coqui/XTTS-v2" os.makedirs("/tmp/xtts_cache", exist_ok=True) os.chmod("/tmp/xtts_cache", 0o777) os.makedirs("/tmp/xtts_model", exist_ok=True) os.chmod("/tmp/xtts_model", 0o777) os.makedirs("/tmp/xtts_model/.huggingface", exist_ok=True) os.chmod("/tmp/xtts_model/.huggingface", 0o777) # Continúa con tu lógica, usando las nuevas rutas de manera consistent # 🔧 Forzar descarga sin symlinks ni hf_transfer model_dir = snapshot_download( repo_id="coqui/XTTS-v2", local_dir="/tmp/xtts_model", # descarga directa aquí cache_dir="/tmp/hf_cache", # cache seguro en /tmp #local_dir_use_symlinks=False, # 🔑 evita enlaces simbólicos resume_download=True, token=HF_TOKEN ) print(f"✅ Modelo descargado en: {model_dir}") CONFIG_PATH = "/tmp/xtts_model/config.json" RESTORE_PATH = "/tmp/xtts_model/model.pth" # === 2. Editar configuración para tu dataset VoxPopuli === print("=== Editando configuración para fine-tuning con VoxPopuli ===") import json with open(CONFIG_PATH, "r") as f: config = json.load(f) config["output_path"] = OUTPUT_PATH config["datasets"] = [ { "formatter": "voxpopuli", "path": DATASET_PATH, "meta_file_train": "metadata.json" } ] config["run_name"] = "xtts-finetune-voxpopuli" config["lr"] = 1e-5 # más bajo para fine-tuning with open(CONFIG_PATH, "w") as f: json.dump(config, f, indent=2) # === 3. Lanzar entrenamiento === print("=== Iniciando fine-tuning de XTTS-v2 ===") import librosa from librosa.core.spectrum import magphase # Parchear dinámicamente librosa.magphase = magphase # subprocess.run([ # "python", "/home/user/TTS/TTS/bin/train_tts.py", # "--config_path", CONFIG_PATH, # "--restore_path", RESTORE_PATH # ], check=True) subprocess.run([ "python", "-m", "TTS.bin.train", "--config_path", CONFIG_PATH, "--restore_path", RESTORE_PATH ], check=True) # === 4. Subir modelo resultante a HF === print("=== Subiendo modelo fine-tuneado a Hugging Face Hub ===") api = HfApi() HfFolder.save_token(HF_TOKEN) upload_folder( repo_id=HF_MODEL_ID, repo_type="model", folder_path=OUTPUT_PATH, token=HF_TOKEN ) print("✅ Fine-tuning completado y modelo subido a Hugging Face.")