lora exp 2 with config
Browse files
app.py
CHANGED
@@ -9,6 +9,7 @@ from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepSchedu
|
|
9 |
from diffusers.schedulers.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler
|
10 |
from huggingface_hub import hf_hub_download
|
11 |
from lycoris import create_lycoris_from_weights
|
|
|
12 |
|
13 |
# Define model options
|
14 |
MODEL_OPTIONS = {
|
@@ -69,9 +70,12 @@ def generate_video(
|
|
69 |
seed = int(seed)
|
70 |
|
71 |
torch.manual_seed(seed)
|
|
|
|
|
|
|
72 |
|
73 |
-
vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
|
74 |
-
pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.float16)
|
75 |
|
76 |
if scheduler_type == "UniPCMultistepScheduler":
|
77 |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift)
|
@@ -222,30 +226,26 @@ with gr.Blocks() as demo:
|
|
222 |
|
223 |
generate_btn.click(
|
224 |
fn=generate_video,
|
225 |
-
inputs=[
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
|
|
|
|
240 |
outputs=[output_video, used_seed]
|
241 |
)
|
242 |
|
243 |
-
gr.Markdown("""
|
244 |
-
## Tips for best results:
|
245 |
-
- Smaller videos: Flow shift 2.0–5.0
|
246 |
-
- Larger videos: Flow shift 7.0–12.0
|
247 |
-
- Use frame count in 4k+1 form (e.g., 33, 65)
|
248 |
-
- Limit frame count and resolution to avoid timeout
|
249 |
-
""")
|
250 |
|
251 |
demo.launch()
|
|
|
9 |
from diffusers.schedulers.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler
|
10 |
from huggingface_hub import hf_hub_download
|
11 |
from lycoris import create_lycoris_from_weights
|
12 |
+
from transformers import AutoConfig
|
13 |
|
14 |
# Define model options
|
15 |
MODEL_OPTIONS = {
|
|
|
70 |
seed = int(seed)
|
71 |
|
72 |
torch.manual_seed(seed)
|
73 |
+
|
74 |
+
# Load config from the model
|
75 |
+
config = AutoConfig.from_pretrained(model_id)
|
76 |
|
77 |
+
vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", config=config, torch_dtype=torch.float32)
|
78 |
+
pipe = WanPipeline.from_pretrained(model_id, vae=vae, config=config, torch_dtype=torch.float16)
|
79 |
|
80 |
if scheduler_type == "UniPCMultistepScheduler":
|
81 |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift)
|
|
|
226 |
|
227 |
generate_btn.click(
|
228 |
fn=generate_video,
|
229 |
+
inputs=[
|
230 |
+
model_choice,
|
231 |
+
prompt,
|
232 |
+
negative_prompt,
|
233 |
+
lycoris_id,
|
234 |
+
lycoris_weight_name,
|
235 |
+
lycoris_scale,
|
236 |
+
scheduler_type,
|
237 |
+
flow_shift,
|
238 |
+
height,
|
239 |
+
width,
|
240 |
+
num_frames,
|
241 |
+
guidance_scale,
|
242 |
+
num_inference_steps,
|
243 |
+
output_fps,
|
244 |
+
seed
|
245 |
+
],
|
246 |
outputs=[output_video, used_seed]
|
247 |
)
|
248 |
|
249 |
+
gr.Markdown(""" ## Tips for best results: - Smaller videos: Flow shift 2.0–5.0 - Larger videos: Flow shift 7.0–12.0 - Use frame count in 4k+1 form (e.g., 33, 65) - Limit frame count and resolution to avoid timeout """)
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
|
251 |
demo.launch()
|