Spaces:
Running
on
Zero
Running
on
Zero
<fix> fix a gradio version bug
Browse files- app.py +2 -1
- requirements.txt +1 -1
app.py
CHANGED
@@ -103,6 +103,7 @@ def init_basemodel():
|
|
103 |
|
104 |
|
105 |
@spaces.GPU
|
|
|
106 |
def process_image_and_text(condition_image, target_prompt, condition_image_prompt, task, random_seed, num_steps, inpainting, fill_x1, fill_x2, fill_y1, fill_y2):
|
107 |
# set up the model
|
108 |
with pipe_lock:
|
@@ -528,4 +529,4 @@ def create_app():
|
|
528 |
|
529 |
if __name__ == "__main__":
|
530 |
init_basemodel()
|
531 |
-
create_app().launch(debug=True, ssr_mode=False, max_threads=1
|
|
|
103 |
|
104 |
|
105 |
@spaces.GPU
|
106 |
+
@gr.queue(concurrency_count=1)
|
107 |
def process_image_and_text(condition_image, target_prompt, condition_image_prompt, task, random_seed, num_steps, inpainting, fill_x1, fill_x2, fill_y1, fill_y2):
|
108 |
# set up the model
|
109 |
with pipe_lock:
|
|
|
529 |
|
530 |
if __name__ == "__main__":
|
531 |
init_basemodel()
|
532 |
+
create_app().launch(debug=True, ssr_mode=False, max_threads=1)
|
requirements.txt
CHANGED
@@ -3,7 +3,7 @@ torchvision==0.20.1
|
|
3 |
diffusers==0.33.1
|
4 |
transformers==4.45.0
|
5 |
flash-attn @ https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.3/flash_attn-2.7.3+cu12torch2.5cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
6 |
-
gradio
|
7 |
omegaconf
|
8 |
peft
|
9 |
opencv-python
|
|
|
3 |
diffusers==0.33.1
|
4 |
transformers==4.45.0
|
5 |
flash-attn @ https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.3/flash_attn-2.7.3+cu12torch2.5cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
6 |
+
gradio==5.33.2
|
7 |
omegaconf
|
8 |
peft
|
9 |
opencv-python
|