Spaces:
Paused
Paused
Upload 5 files
Browse files- app.py +3 -3
- fl2basepromptgen.py +3 -3
- fl2flux.py +1 -1
- fl2sd3longcap.py +1 -1
app.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
import spaces
|
|
|
|
| 3 |
|
| 4 |
from utils import (
|
| 5 |
gradio_copy_text,
|
|
@@ -39,7 +39,7 @@ def description_ui2():
|
|
| 39 |
[Lamini-Prompt-Enchance](https://huggingface.co/gokaygokay/Lamini-Prompt-Enchance),\
|
| 40 |
[Lamini-Prompt-Enchance-Long](https://huggingface.co/gokaygokay/Lamini-Prompt-Enchance-Long),\
|
| 41 |
[Flux-Prompt-Enhance](https://huggingface.co/gokaygokay/Flux-Prompt-Enhance),\
|
| 42 |
-
MiaoshouAI's [Florence-2-base-PromptGen](https://huggingface.co/MiaoshouAI/Florence-2-base-PromptGen).
|
| 43 |
"""
|
| 44 |
)
|
| 45 |
|
|
@@ -55,7 +55,7 @@ def main():
|
|
| 55 |
input_tag_type = gr.Radio(label="Convert tags to", info="danbooru for Animagine, e621 for Pony.", choices=["danbooru", "e621"], value="danbooru")
|
| 56 |
recom_prompt = gr.Radio(label="Insert reccomended prompt", choices=["None", "Animagine", "Pony"], value="None", interactive=True)
|
| 57 |
keep_tags = gr.Radio(label="Remove tags leaving only the following", choices=["body", "dress", "all"], value="all")
|
| 58 |
-
image_algorithms = gr.CheckboxGroup(["Use WD Tagger", "Use Florence-2-SD3-Long-Captioner", "Use Florence-2-base-PromptGen", "Use Florence-2-Flux","Use Florence-2-Flux-Large"], label="Algorithms", value=["Use WD Tagger", "Use Florence-2-
|
| 59 |
generate_from_image_btn = gr.Button(value="GENERATE TAGS FROM IMAGE", size="lg", variant="primary")
|
| 60 |
with gr.Group():
|
| 61 |
with gr.Row():
|
|
|
|
|
|
|
| 1 |
import spaces
|
| 2 |
+
import gradio as gr
|
| 3 |
|
| 4 |
from utils import (
|
| 5 |
gradio_copy_text,
|
|
|
|
| 39 |
[Lamini-Prompt-Enchance](https://huggingface.co/gokaygokay/Lamini-Prompt-Enchance),\
|
| 40 |
[Lamini-Prompt-Enchance-Long](https://huggingface.co/gokaygokay/Lamini-Prompt-Enchance-Long),\
|
| 41 |
[Flux-Prompt-Enhance](https://huggingface.co/gokaygokay/Flux-Prompt-Enhance),\
|
| 42 |
+
MiaoshouAI's [Florence-2-base-PromptGen-v1.5](https://huggingface.co/MiaoshouAI/Florence-2-base-PromptGen-v1.5).
|
| 43 |
"""
|
| 44 |
)
|
| 45 |
|
|
|
|
| 55 |
input_tag_type = gr.Radio(label="Convert tags to", info="danbooru for Animagine, e621 for Pony.", choices=["danbooru", "e621"], value="danbooru")
|
| 56 |
recom_prompt = gr.Radio(label="Insert reccomended prompt", choices=["None", "Animagine", "Pony"], value="None", interactive=True)
|
| 57 |
keep_tags = gr.Radio(label="Remove tags leaving only the following", choices=["body", "dress", "all"], value="all")
|
| 58 |
+
image_algorithms = gr.CheckboxGroup(["Use WD Tagger", "Use Florence-2-SD3-Long-Captioner", "Use Florence-2-base-PromptGen", "Use Florence-2-Flux", "Use Florence-2-Flux-Large"], label="Algorithms", value=["Use WD Tagger", "Use Florence-2-Flux"])
|
| 59 |
generate_from_image_btn = gr.Button(value="GENERATE TAGS FROM IMAGE", size="lg", variant="primary")
|
| 60 |
with gr.Group():
|
| 61 |
with gr.Row():
|
fl2basepromptgen.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
-
from transformers import AutoProcessor, AutoModelForCausalLM
|
| 2 |
import spaces
|
|
|
|
| 3 |
from PIL import Image
|
| 4 |
import torch
|
| 5 |
|
|
@@ -9,8 +9,8 @@ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENT
|
|
| 9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
|
| 11 |
try:
|
| 12 |
-
fl_model = AutoModelForCausalLM.from_pretrained('MiaoshouAI/Florence-2-base-PromptGen', trust_remote_code=True).to("cpu").eval()
|
| 13 |
-
fl_processor = AutoProcessor.from_pretrained('MiaoshouAI/Florence-2-base-PromptGen', trust_remote_code=True)
|
| 14 |
except Exception as e:
|
| 15 |
print(e)
|
| 16 |
fl_model = fl_processor = None
|
|
|
|
|
|
|
| 1 |
import spaces
|
| 2 |
+
from transformers import AutoProcessor, AutoModelForCausalLM
|
| 3 |
from PIL import Image
|
| 4 |
import torch
|
| 5 |
|
|
|
|
| 9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
|
| 11 |
try:
|
| 12 |
+
fl_model = AutoModelForCausalLM.from_pretrained('MiaoshouAI/Florence-2-base-PromptGen-v1.5', trust_remote_code=True).to("cpu").eval()
|
| 13 |
+
fl_processor = AutoProcessor.from_pretrained('MiaoshouAI/Florence-2-base-PromptGen-v1.5', trust_remote_code=True)
|
| 14 |
except Exception as e:
|
| 15 |
print(e)
|
| 16 |
fl_model = fl_processor = None
|
fl2flux.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
-
from transformers import AutoProcessor, AutoModelForCausalLM
|
| 2 |
import spaces
|
|
|
|
| 3 |
import re
|
| 4 |
from PIL import Image
|
| 5 |
import torch
|
|
|
|
|
|
|
| 1 |
import spaces
|
| 2 |
+
from transformers import AutoProcessor, AutoModelForCausalLM
|
| 3 |
import re
|
| 4 |
from PIL import Image
|
| 5 |
import torch
|
fl2sd3longcap.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
-
from transformers import AutoProcessor, AutoModelForCausalLM
|
| 2 |
import spaces
|
|
|
|
| 3 |
import re
|
| 4 |
from PIL import Image
|
| 5 |
import torch
|
|
|
|
|
|
|
| 1 |
import spaces
|
| 2 |
+
from transformers import AutoProcessor, AutoModelForCausalLM
|
| 3 |
import re
|
| 4 |
from PIL import Image
|
| 5 |
import torch
|