polu commited on
Commit
846b367
·
1 Parent(s): b87b13c
Files changed (1) hide show
  1. app.py +21 -10
app.py CHANGED
@@ -31,17 +31,28 @@ if not path.exists(cache_path):
31
  os.makedirs(cache_path, exist_ok=True)
32
 
33
  pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
 
34
  pipe.load_lora_weights(hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"))
35
  pipe.fuse_lora(lora_scale=0.125)
36
  pipe.to(device="cuda", dtype=torch.bfloat16)
37
 
38
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
 
 
 
 
 
 
 
 
 
 
39
  gr.Markdown(
40
  """
41
- <div style="text-align: center; max-width: 650px; margin: 0 auto;">
42
- <h1 style="font-size: 2.5rem; font-weight: 700; margin-bottom: 1rem; display: contents;">Hyper-FLUX-8steps-LoRA</h1>
43
- <p style="font-size: 1rem; margin-bottom: 1.5rem;">AutoML team from ByteDance</p>
44
- </div>
45
  """
46
  )
47
 
@@ -49,8 +60,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
49
  with gr.Column(scale=3):
50
  with gr.Group():
51
  prompt = gr.Textbox(
52
- label="Your Image Description",
53
- placeholder="E.g., A serene landscape with mountains and a lake at sunset",
54
  lines=3
55
  )
56
 
@@ -61,10 +72,10 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
61
  width = gr.Slider(label="Width", minimum=256, maximum=1152, step=64, value=1024)
62
 
63
  with gr.Row():
64
- steps = gr.Slider(label="Inference Steps", minimum=6, maximum=25, step=1, value=8)
65
- scales = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=5.0, step=0.1, value=3.5)
66
 
67
- seed = gr.Number(label="Seed (for reproducibility)", value=3413, precision=0)
68
 
69
  generate_btn = gr.Button("Generate Image", variant="primary", scale=1)
70
 
 
31
  os.makedirs(cache_path, exist_ok=True)
32
 
33
  pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
34
+ # pipe.load_lora_weights(hf_hub_download("RED-AIGC/TDD", "TDD-FLUX.1-dev-lora-beta.safetensors"))
35
  pipe.load_lora_weights(hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"))
36
  pipe.fuse_lora(lora_scale=0.125)
37
  pipe.to(device="cuda", dtype=torch.bfloat16)
38
 
39
+ css = """
40
+ h1 {
41
+ text-align: center;
42
+ display:block;
43
+ }
44
+ .gradio-container {
45
+ max-width: 70.5rem !important;
46
+ }
47
+ """
48
+
49
+ with gr.Blocks(css=css) as demo:
50
  gr.Markdown(
51
  """
52
+ # FLUX.1-dev(beta) distilled by ✨Target-Driven Distillation✨
53
+ Compared to Hyper-FLUX, the beta version of TDD has its parameters reduced by half(600M), resulting in more realistic details. Due to the limitations of the machine configuration, there are still many imperfections in the test version. The official version is still being optimized and is expected to be released after the Mid-Autumn Festival. TDD is also available for distilling video generation models. This space presents TDD-distilled [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev).
54
+ [**Project Page**](https://redaigc.github.io/TDD/) **|** [**Paper**](https://arxiv.org/abs/2409.01347) **|** [**Code**](https://github.com/RedAIGC/Target-Driven-Distillation) **|** [**Model**](https://huggingface.co/RED-AIGC/TDD) **|** [🤗 **TDD-SDXL Demo**](https://huggingface.co/spaces/RED-AIGC/TDD) **|** [🤗 **TDD-SVD Demo**](https://huggingface.co/spaces/RED-AIGC/SVD-TDD)
55
+ The codes of this space are built on [Hyper-SD](https://huggingface.co/spaces/ByteDance/Hyper-FLUX-8Steps-LoRA) and we acknowledge their contribution.
56
  """
57
  )
58
 
 
60
  with gr.Column(scale=3):
61
  with gr.Group():
62
  prompt = gr.Textbox(
63
+ label="Prompt",
64
+ placeholder="E.g., portrait photo of a girl, photograph, highly detailed face, depth of field, moody light, golden hour, style by Dan Winters, Russell James, Steve McCurry, centered, extremely detailed, Nikon D850, award winning photography",
65
  lines=3
66
  )
67
 
 
72
  width = gr.Slider(label="Width", minimum=256, maximum=1152, step=64, value=1024)
73
 
74
  with gr.Row():
75
+ steps = gr.Slider(label="Inference Steps", minimum=4, maximum=10, step=1, value=8)
76
+ scales = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=3.5, step=0.1, value=2.0)
77
 
78
+ seed = gr.Number(label="Seed", value=3413, precision=0)
79
 
80
  generate_btn = gr.Button("Generate Image", variant="primary", scale=1)
81