Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import AutoImageProcessor, AutoModelForObjectDetection | |
from PIL import Image | |
import torch | |
# Cargar modelo | |
ckpt = 'yainage90/fashion-object-detection' | |
processor = AutoImageProcessor.from_pretrained(ckpt) | |
model = AutoModelForObjectDetection.from_pretrained(ckpt) | |
# Precios promedios manuales | |
PRECIOS = { | |
'top': 5000, | |
'bottom': 15000, | |
'shoes': 30000, | |
'bag': 20000, | |
'outer': 25000 | |
} | |
def procesar_imagen(img): | |
inputs = processor(images=[img], return_tensors="pt") | |
outputs = model(**inputs) | |
results = processor.post_process_object_detection(outputs, threshold=0.4, target_sizes=[[img.height, img.width]])[0] | |
total = 0 | |
detalle = [] | |
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): | |
prenda = model.config.id2label[label.item()] | |
precio = PRECIOS.get(prenda, 0) | |
total += precio | |
detalle.append(f"{prenda}: ${precio}") | |
texto = "\n".join(detalle + [f"\nTotal aprox.: ${total}"]) | |
return texto | |
iface = gr.Interface(fn=procesar_imagen, | |
inputs=gr.Image(type="pil"), | |
outputs="textbox", | |
title="Cuánto vale tu outfit?") | |
iface.launch() | |