|
import gradio as gr |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
|
from diffusers import StableDiffusionPipeline |
|
import torch |
|
|
|
|
|
model_name = "meta-llama/Llama-2-7b-chat-hf" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
generator = pipeline("text-generation", model=model, tokenizer=tokenizer) |
|
|
|
|
|
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5").to("cuda") |
|
|
|
|
|
def generate_text(prompt): |
|
response = generator(prompt, max_length=60, num_return_sequences=1, temperature=0.5, top_p=0.85) |
|
return response[0]['generated_text'] |
|
|
|
|
|
def generate_image(prompt): |
|
image = pipe(prompt).images[0] |
|
return image |
|
|
|
|
|
iface_text = gr.Interface(fn=generate_text, inputs="text", outputs="text", description="Generador de Texto con Llama/Nous") |
|
iface_image = gr.Interface(fn=generate_image, inputs="text", outputs="image", description="Generador de Im谩genes con Stable Diffusion") |
|
|
|
|
|
app = gr.TabbedInterface([iface_text, iface_image], ["Texto", "Imagen"]) |
|
app.launch() |
|
|