import gradio as gr import json, time from huggingface_hub import create_repo, upload_file, list_models, constants from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status from google import genai from google.genai.types import Tool, GenerateContentConfig, GoogleSearch # — USER INFO & MODEL LISTING — def show_profile(profile: gr.OAuthProfile | None) -> str: return f"✅ Logged in as **{profile.username}**" if profile else "*Not logged in.*" def list_private_models( profile: gr.OAuthProfile | None, oauth_token: gr.OAuthToken | None ) -> str: if not (profile and oauth_token): return "Please log in to see your models." models = [ f"{m.id} ({'private' if m.private else 'public'})" for m in list_models(author=profile.username, token=oauth_token.token) ] return "No models found." if not models else "Models:\n\n" + "\n - ".join(models) # — LOG FETCHING — def _get_space_jwt(repo_id: str): url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt" r = get_session().get(url, headers=build_hf_headers()) hf_raise_for_status(r) return r.json()["token"] def fetch_logs(repo_id: str, level: str): jwt = _get_space_jwt(repo_id) url = f"https://api.hf.space/v1/{repo_id}/logs/{level}" lines = [] with get_session().get(url, headers=build_hf_headers(token=jwt), stream=True) as resp: hf_raise_for_status(resp) for raw in resp.iter_lines(): if raw.startswith(b"data: "): try: ev = json.loads(raw[len(b"data: "):].decode()) lines.append(f"[{ev.get('timestamp','')}] {ev.get('data','')}") except: continue return "\n".join(lines) # — HANDLERS — def handle_user_message( history, # list of dicts {"role","content"} sdk_choice: str, gemini_api_key: str, grounding_enabled: bool, profile: gr.OAuthProfile | None, oauth_token: gr.OAuthToken | None ): if not (profile and oauth_token): return history + [{"role":"assistant","content":"⚠️ Please log in first."}], "", "", "
No Space yet.
", "" client = genai.Client(api_key=gemini_api_key) chat = [{ "role":"system", "content":( f"You are an AI assistant writing a HuggingFace Space using the " f"{sdk_choice} SDK. After producing code, wait for logs; if errors appear, fix them." ) }] + history # choose filenames code_fn = "app.py" if sdk_choice == "gradio" else "streamlit_app.py" readme_fn = "README.md" reqs_fn = "requirements.txt" repo_id = f"{profile.username}/{profile.username}-auto-space" build_logs = run_logs = "" for _ in range(5): # dynamic sdk_version if sdk_choice == "gradio": import gradio as _gr; sdk_version = _gr.__version__ else: import streamlit as _st; sdk_version = _st.__version__ # ask Gemini tools = [Tool(google_search=GoogleSearch())] if grounding_enabled else [] cfg = GenerateContentConfig(tools=tools, response_modalities=["TEXT"]) resp = client.models.generate_content( model="gemini-2.5-flash-preview-04-17", contents=[m["content"] for m in chat], config=cfg ) code = resp.text chat.append({"role":"assistant","content":code}) # write code with open(code_fn, "w") as f: f.write(code) # write README.md with open(readme_fn, "w") as f: f.write(f"""--- title: Wuhp Auto Space emoji: 🐢 colorFrom: red colorTo: pink sdk: {sdk_choice} sdk_version: {sdk_version} app_file: {code_fn} pinned: false --- See config reference → https://huggingface.co/docs/hub/spaces-config-reference """) # write requirements base = "pandas\n" extra = ("streamlit\n" if sdk_choice=="streamlit" else "gradio\n") with open(reqs_fn, "w") as f: f.write(base + extra) # deploy create_repo(repo_id=repo_id, token=oauth_token.token, exist_ok=True, repo_type="space", space_sdk=sdk_choice) for fn in (code_fn, readme_fn, reqs_fn): upload_file(path_or_fileobj=fn, path_in_repo=fn, repo_id=repo_id, token=oauth_token.token, repo_type="space") # fetch logs build_logs = fetch_logs(repo_id, "build") run_logs = fetch_logs(repo_id, "run") if "ERROR" not in build_logs.upper() and "ERROR" not in run_logs.upper(): break chat.append({ "role":"user", "content":( f"Build logs:\n{build_logs}\n\n" f"Run logs:\n{run_logs}\n\n" "Please fix the code." ) }) time.sleep(2) # prepare outputs messages = [{"role":m["role"],"content":m["content"]} for m in chat if m["role"]!="system"] iframe = f'' return messages, build_logs, run_logs, iframe, repo_id def refresh_build_logs(repo_id: str, profile, oauth_token): if not (profile and oauth_token and repo_id): return "⚠️ Please deploy first." return fetch_logs(repo_id, "build") def refresh_run_logs(repo_id: str, profile, oauth_token): if not (profile and oauth_token and repo_id): return "⚠️ Please deploy first." return fetch_logs(repo_id, "run") # — UI — with gr.Blocks() as demo: gr.Markdown("## HF Space Auto‑Builder\n1. Sign in 2. Prompt 3. Deploy & Debug ►") # Login & model list login_btn = gr.LoginButton("huggingface", size="lg") status_md = gr.Markdown("*Not logged in.*") models_md = gr.Markdown() demo.load(show_profile, None, status_md) demo.load(list_private_models, None, models_md) login_btn.click(show_profile, None, status_md) login_btn.click(list_private_models, None, models_md) # Controls with gr.Row(): sdk_choice = gr.Radio(["gradio","streamlit"], "gradio", label="SDK") api_key = gr.Textbox(label="Gemini API Key", type="password") grounding = gr.Checkbox(label="Enable grounding") # Chat + outputs chatbot = gr.Chatbot(type="messages") user_in = gr.Textbox(label="Prompt", placeholder="e.g. CSV inspector…") send_btn = gr.Button("Send") build_box = gr.Textbox(label="Build logs", lines=5) run_box = gr.Textbox(label="Run logs", lines=5) preview = gr.HTML("No Space yet.
") state_repo = gr.Textbox(visible=False) send_btn.click( fn=handle_user_message, inputs=[chatbot, sdk_choice, api_key, grounding], outputs=[chatbot, build_box, run_box, preview, state_repo] ) # Manual refresh with gr.Row(): refresh_build = gr.Button("Refresh Build Logs") refresh_run = gr.Button("Refresh Run Logs") refresh_build.click( fn=refresh_build_logs, inputs=[state_repo], outputs=build_box ) refresh_run.click( fn=refresh_run_logs, inputs=[state_repo], outputs=run_box ) demo.launch()