File size: 1,588 Bytes
b3e5d0f
 
137de85
 
 
 
2ca1aab
 
137de85
2ca1aab
 
 
137de85
2ca1aab
 
 
 
137de85
 
15f0d6c
137de85
 
 
b3e5d0f
137de85
 
 
 
 
 
 
 
15f0d6c
137de85
 
 
 
 
 
 
 
15f0d6c
137de85
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from __future__ import annotations
from huggingface_hub import whoami
import gradio as gr
from diffusers import DiffusionPipeline

def generate_image_and_prompt(prompt: str):
    # # Create an "improved" prompt (simple example)
    # improved_prompt = f"Improved: {prompt}"

    # # Load the DiffusionPipeline
    # pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev")
    # pipe.load_lora_weights("EvanZhouDev/open-genmoji")

    # # Generate an image from the prompt
    # image = pipe(prompt).images[0]
    image = null 
    improved_prompt = null

    # Return both the improved prompt and the image
    return improved_prompt, image, os.getenv("OAUTH_CLIENT_ID")

# Create the Gradio interface
with gr.Blocks() as demo:
    gr.LoginButton()
    # Input Textbox for the user prompt
    input_prompt = gr.Textbox(label="Input Prompt", placeholder="Enter your prompt here")
    
    # Output Textbox for the generated improved prompt
    improved_prompt_output = gr.Textbox(label="Improved Prompt")
    
    # Output Image for the generated image
    generated_image_output = gr.Image(label="Generated Image")
    access_token = gr.Textbox(label="Access token")
    
    # Button to trigger the generation
    generate_btn = gr.Button("Generate")
    
    # Set up the event listener for the button click
    generate_btn.click(
        fn=generate_image_and_prompt,
        inputs=input_prompt,
        outputs=[improved_prompt_output, generated_image_output, access_token]
    )

# Launch the Gradio app
if __name__ == "__main__":
    demo.launch(show_error=True)