import gradio as gr
from convert_diffusion_to_gguf import SUPPORTED_ARCHS, qconfig_map, convert
from huggingface_hub import create_repo, upload_file
from argparse import Namespace
from io import StringIO
from pathlib import Path
log_stream = StringIO()
def upload(args):
url = ""
if args.host_repo_id and args.hf_token:
repo_id = create_repo(args.host_repo_id, repo_type="model", exist_ok=True, token=args.hf_token).repo_id
info = upload_file(
repo_id=repo_id, path_in_repo=str(args.outfile), path_or_fileobj=str(args.outfile), token=args.hf_token
)
url = info.commit_url
print(f"Uploaded to {url}")
return url
def go_gguf(
model_repo_id,
subfolder,
arch,
outtype,
outfile_name,
bigendian,
verbose,
host_repo_id,
hf_token,
progress=gr.Progress(track_tqdm=True),
):
log_stream.truncate(0)
log_stream.seek(0)
args = Namespace(
model=Path(model_repo_id),
subfolder=subfolder,
arch=arch,
outtype=outtype,
outfile=Path(outfile_name),
bigendian=bigendian,
verbose=verbose,
host_repo_id=host_repo_id,
hf_token=hf_token,
cache_dir=None,
)
try:
progress(0.1, desc="Starting conversion... (This may take a while depending on model size)")
convert(args)
progress(0.8, desc="✅ Conversion Complete. Starting upload...")
url = upload(args)
if url:
return log_stream.getvalue(), f"### ✅ Success!\n\nUploaded to: [{url}]({url})"
else:
return (
log_stream.getvalue(),
"### ✅ Conversion Complete!\n\n(File was not uploaded as no repo/token was provided)",
)
except Exception as e:
return log_stream.getvalue(), str(e)
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("
GGUF Converter for Diffusers format model checkpoints
")
gr.Markdown(
"Convert `diffusers` format model checkpoints from the Hub to GGUF format and optionally upload them back. Based on [this repo](https://github.com/ngxson/diffusion-to-gguf)."
)
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### 📥 Input Model")
model_repo_id = gr.Textbox(label="Model Repo ID", placeholder="e.g., Qwen/Qwen-Image")
subfolder = gr.Textbox(label="Subfolder (Optional)", placeholder="e.g., transformer")
gr.Markdown("### ⚙️ Conversion Settings")
arch = gr.Dropdown(choices=SUPPORTED_ARCHS, label="Architecture")
outtype = gr.Dropdown(choices=list(qconfig_map.keys()), label="Quantization Type", value="F16")
outfile_name = gr.Textbox(label="Output Filename", value="{ftype}.gguf")
with gr.Accordion("Advanced Settings", open=False):
bigendian = gr.Checkbox(label="Use Big Endian")
verbose = gr.Checkbox(label="Verbose Logging", value=True)
gr.Markdown("### 📤 Upload to Hub (Optional)")
host_repo_id = gr.Textbox(label="Your Hub Repo ID", placeholder="e.g., YourUsername/My-GGUFs")
hf_token = gr.Textbox(label="Hugging Face Token", type="password", placeholder="hf_...")
convert_btn = gr.Button("Convert & Upload", variant="primary")
with gr.Column(scale=2):
gr.Markdown("### 📝 Logs")
logs_output = gr.Textbox(
label="Conversion Logs", lines=25, max_lines=25, interactive=False, autoscroll=True
)
gr.Markdown("### 🚀 Result")
url_output = gr.Markdown()
gr.Examples(
examples=[
[
"black-forest-labs/FLUX.1-schnell",
"transformer",
"flux",
"Q4_0",
"flux-schnell-q4.gguf",
False,
False,
"YourUsername/MyGGUFs",
"hf_...",
],
[
"Qwen/Qwen-Image",
"transformer",
"qwen",
"Q8_0",
"qwen-q4.gguf",
False,
False,
"YourUsername/MyGGUFs",
"hf_...",
],
],
inputs=[model_repo_id, subfolder, arch, outtype, outfile_name, bigendian, verbose, host_repo_id, hf_token],
)
convert_btn.click(
fn=go_gguf,
inputs=[model_repo_id, subfolder, arch, outtype, outfile_name, bigendian, verbose, host_repo_id, hf_token],
outputs=[logs_output, url_output],
)
if __name__ == "__main__":
demo.launch()