Nymbo commited on
Commit
e7d32c8
·
verified ·
1 Parent(s): d10312c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -205
app.py CHANGED
@@ -70,189 +70,76 @@ def generate_video(
70
  # Create a unique ID for this generation
71
  generation_id = uuid.uuid4().hex[:8]
72
  print(f"Generation ID: {generation_id}")
73
-
74
- # Initial parameters dictionary - we'll customize it for each provider
75
- base_parameters = {
76
- "negative_prompt": negative_prompt,
77
- "num_inference_steps": num_inference_steps,
78
- "guidance_scale": guidance_scale,
79
- }
80
-
81
- # Add seed if specified
82
- if seed is not None and seed != -1:
83
- base_parameters["seed"] = seed
84
 
85
- # Provider-specific parameter handling
86
- if provider == "hf-inference":
87
- print("Using Hugging Face Inference API, adapting parameters...")
88
- # HF Inference API supports these parameters
89
- parameters = {
90
- "negative_prompt": negative_prompt,
91
- "num_inference_steps": num_inference_steps,
92
- "guidance_scale": guidance_scale,
93
- "num_frames": num_frames,
94
- # HF Inference does NOT support fps
95
- # "fps": fps,
96
- "width": width,
97
- "height": height,
 
 
 
 
98
  }
99
-
100
- # Add motion_bucket_id for SVD models if applicable
101
- if "stable-video-diffusion" in model_to_use:
102
- parameters["motion_bucket_id"] = motion_bucket_id
103
-
104
- # Add seed if specified
105
- if seed is not None and seed != -1:
106
- parameters["seed"] = seed
107
 
108
- elif provider == "fal-ai":
109
- print("Using FalAI provider, adapting parameters...")
110
- # According to Fal-AI API specification, only these parameters are supported
111
- parameters = {
112
- "negative_prompt": negative_prompt,
113
- "num_frames": num_frames,
114
- "num_inference_steps": num_inference_steps,
115
- "guidance_scale": guidance_scale,
116
- }
117
-
118
- # Add seed if specified
119
- if seed is not None and seed != -1:
120
- parameters["seed"] = seed
121
-
122
- # Note: width and height are not supported by Fal-AI's text_to_video API
123
- print("Note: width and height parameters are not supported by Fal-AI and will be ignored")
124
 
125
- elif provider == "novita":
126
- print("Using Novita provider, adapting parameters...")
127
- # Based on documentation, Novita uses specific parameters
128
- parameters = {
129
- "negative_prompt": negative_prompt,
130
- "num_frames": num_frames,
131
- "num_inference_steps": num_inference_steps,
132
- "guidance_scale": guidance_scale,
133
- "width": width,
134
- "height": height
135
- }
136
-
137
- # Add seed if specified
138
- if seed is not None and seed != -1:
139
- parameters["seed"] = seed
140
 
141
- elif provider == "replicate":
142
- print("Using Replicate provider, adapting parameters...")
143
- parameters = {
144
- "negative_prompt": negative_prompt,
145
- "num_frames": num_frames,
146
- "num_inference_steps": num_inference_steps,
147
- "guidance_scale": guidance_scale,
148
- "width": width,
149
- "height": height
150
- }
151
-
152
- # Add seed if specified
153
- if seed is not None and seed != -1:
154
- parameters["seed"] = seed
155
-
156
- # Replicate supports fps in some models
157
- if "stable-video-diffusion" in model_to_use:
158
- parameters["fps"] = fps
159
 
160
- else:
161
- # Default parameters for any other provider
162
- print(f"Using generic parameters for provider: {provider}")
163
- parameters = base_parameters
164
  parameters["num_frames"] = num_frames
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  parameters["width"] = width
 
 
166
  parameters["height"] = height
167
- "height": height,
168
- "num_inference_steps": num_inference_steps,
169
- "guidance_scale": guidance_scale,
170
- }
171
 
172
- # Add motion_bucket_id if applicable (depends on the model)
173
- if motion_bucket_id is not None:
174
  parameters["motion_bucket_id"] = motion_bucket_id
175
-
176
- # Add seed if specified
177
- if seed is not None:
178
- parameters["seed"] = seed
179
 
180
- # For FalAI provider - may need specific formatting
181
- if provider == "fal-ai":
182
- print("Using FalAI provider, adapting parameters...")
183
- # According to Fal-AI API specification, only these parameters are supported
184
- parameters = {
185
- "negative_prompt": negative_prompt,
186
- "num_frames": num_frames,
187
- "num_inference_steps": num_inference_steps,
188
- "guidance_scale": guidance_scale,
189
- }
190
-
191
- # Add seed if specified
192
- if seed is not None and seed != -1:
193
- parameters["seed"] = seed
194
-
195
- # Note: width and height are not supported by Fal-AI's text_to_video API
196
- print("Note: width and height parameters are not supported by Fal-AI and will be ignored")
197
 
198
- # For Novita provider - may need specific formatting
199
- if provider == "novita":
200
- print("Using Novita provider, adapting parameters...")
201
- # Based on documentation, Novita uses text_to_video method
202
- try:
203
- # For Novita, we create a dedicated parameters object
204
- novita_params = {
205
- "negative_prompt": negative_prompt,
206
- "num_frames": num_frames,
207
- "fps": fps,
208
- "width": width,
209
- "height": height,
210
- "num_inference_steps": num_inference_steps,
211
- "guidance_scale": guidance_scale
212
- }
213
-
214
- # Add seed if specified
215
- if seed is not None:
216
- novita_params["seed"] = seed
217
-
218
- # For Novita, we use a different method from the InferenceClient
219
- video_data = client.text_to_video(
220
- prompt=prompt,
221
- model=model_to_use,
222
- **novita_params
223
- )
224
-
225
- # Save the video to a temporary file
226
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
227
- temp_file.write(video_data)
228
- video_path = temp_file.name
229
- temp_file.close()
230
-
231
- print(f"Video saved to temporary file: {video_path}")
232
- return video_path
233
-
234
- except Exception as e:
235
- print(f"Error during Novita video generation: {e}")
236
- return f"Error: {str(e)}"
237
-
238
- # For Replicate provider - may need specific formatting
239
  if provider == "replicate":
240
- print("Using Replicate provider, adapting parameters...")
241
- # Replicate might use different parameter formats
242
  try:
243
- # For Replicate, we use their specific method structure
244
  response = client.post(
245
  model=model_to_use,
246
  input={
247
  "prompt": prompt,
248
- "negative_prompt": negative_prompt,
249
- "num_frames": num_frames,
250
- "fps": fps,
251
- "width": width,
252
- "height": height,
253
- "num_inference_steps": num_inference_steps,
254
- "guidance_scale": guidance_scale,
255
- "seed": seed if seed is not None else 0,
256
  },
257
  )
258
 
@@ -268,16 +155,11 @@ def generate_video(
268
  print(f"Error during Replicate video generation: {e}")
269
  return f"Error: {str(e)}"
270
 
271
- # General approach for other providers
272
  try:
273
  print(f"Sending request to {provider} provider with model {model_to_use}.")
274
- print(f"Parameters: {parameters}")
275
 
276
- # Make sure prompt is not in parameters to avoid duplicate parameters
277
- if "prompt" in parameters:
278
- del parameters["prompt"]
279
-
280
- # Use the text_to_video method of the InferenceClient
281
  video_data = client.text_to_video(
282
  prompt=prompt,
283
  model=model_to_use,
@@ -369,40 +251,6 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
369
  lines=2
370
  )
371
 
372
- with gr.Row():
373
- num_inference_steps = gr.Slider(
374
- minimum=1,
375
- maximum=100,
376
- value=25,
377
- step=1,
378
- label="Inference Steps"
379
- )
380
-
381
- guidance_scale = gr.Slider(
382
- minimum=1.0,
383
- maximum=20.0,
384
- value=7.5,
385
- step=0.5,
386
- label="Guidance Scale"
387
- )
388
-
389
- with gr.Row():
390
- motion_bucket_id = gr.Slider(
391
- minimum=1,
392
- maximum=255,
393
- value=127,
394
- step=1,
395
- label="Motion Bucket ID (for SVD models)"
396
- )
397
-
398
- seed = gr.Slider(
399
- minimum=-1,
400
- maximum=2147483647,
401
- value=-1,
402
- step=1,
403
- label="Seed (-1 for random)"
404
- )
405
-
406
  with gr.Row():
407
  width = gr.Slider(
408
  minimum=256,
 
70
  # Create a unique ID for this generation
71
  generation_id = uuid.uuid4().hex[:8]
72
  print(f"Generation ID: {generation_id}")
 
 
 
 
 
 
 
 
 
 
 
73
 
74
+ # Define supported parameters for each provider
75
+ provider_param_support = {
76
+ "hf-inference": {
77
+ "supported": ["prompt", "model", "negative_prompt", "num_frames", "num_inference_steps", "guidance_scale", "seed"],
78
+ "extra_info": "HF Inference doesn't support 'fps', 'width', 'height', or 'motion_bucket_id' parameters"
79
+ },
80
+ "fal-ai": {
81
+ "supported": ["prompt", "model", "negative_prompt", "num_frames", "num_inference_steps", "guidance_scale", "seed"],
82
+ "extra_info": "Fal-AI doesn't support 'fps', 'width', 'height', or 'motion_bucket_id' parameters"
83
+ },
84
+ "novita": {
85
+ "supported": ["prompt", "model", "negative_prompt", "num_frames", "num_inference_steps", "guidance_scale", "seed", "fps", "width", "height"],
86
+ "extra_info": "Novita may not support 'motion_bucket_id' parameter"
87
+ },
88
+ "replicate": {
89
+ "supported": ["prompt", "model", "negative_prompt", "num_frames", "num_inference_steps", "guidance_scale", "seed", "fps", "width", "height"],
90
+ "extra_info": "Replicate parameters vary by specific model"
91
  }
92
+ }
 
 
 
 
 
 
 
93
 
94
+ # Get supported parameters for the current provider
95
+ supported_params = provider_param_support.get(provider, {}).get("supported", [])
96
+ provider_info = provider_param_support.get(provider, {}).get("extra_info", "No specific information available")
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
+ print(f"Provider info: {provider_info}")
99
+ print(f"Supported parameters: {supported_params}")
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
+ # Create a parameters dictionary with only supported parameters
102
+ parameters = {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
104
+ if "negative_prompt" in supported_params:
105
+ parameters["negative_prompt"] = negative_prompt
106
+
107
+ if "num_frames" in supported_params:
108
  parameters["num_frames"] = num_frames
109
+
110
+ if "num_inference_steps" in supported_params:
111
+ parameters["num_inference_steps"] = num_inference_steps
112
+
113
+ if "guidance_scale" in supported_params:
114
+ parameters["guidance_scale"] = guidance_scale
115
+
116
+ if "seed" in supported_params and seed is not None:
117
+ parameters["seed"] = seed
118
+
119
+ if "fps" in supported_params:
120
+ parameters["fps"] = fps
121
+
122
+ if "width" in supported_params:
123
  parameters["width"] = width
124
+
125
+ if "height" in supported_params:
126
  parameters["height"] = height
 
 
 
 
127
 
128
+ if "motion_bucket_id" in supported_params:
 
129
  parameters["motion_bucket_id"] = motion_bucket_id
 
 
 
 
130
 
131
+ # Now that we have a clean parameter set, handle provider-specific logic
132
+ print(f"Final parameters for {provider}: {parameters}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
 
134
+ # For Replicate provider - uses post method
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  if provider == "replicate":
136
+ print("Using Replicate provider, using post method...")
 
137
  try:
 
138
  response = client.post(
139
  model=model_to_use,
140
  input={
141
  "prompt": prompt,
142
+ **parameters
 
 
 
 
 
 
 
143
  },
144
  )
145
 
 
155
  print(f"Error during Replicate video generation: {e}")
156
  return f"Error: {str(e)}"
157
 
158
+ # For all other providers, use the standard text_to_video method
159
  try:
160
  print(f"Sending request to {provider} provider with model {model_to_use}.")
 
161
 
162
+ # Use the text_to_video method of the InferenceClient with only supported parameters
 
 
 
 
163
  video_data = client.text_to_video(
164
  prompt=prompt,
165
  model=model_to_use,
 
251
  lines=2
252
  )
253
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
  with gr.Row():
255
  width = gr.Slider(
256
  minimum=256,