from huggingface_hub import from_pretrained_fastai import gradio as gr from fastai.vision.all import * from fastai.metrics import JaccardCoeffMulti from albumentations import ( Compose, OneOf, ElasticTransform, GridDistortion, OpticalDistortion, HorizontalFlip, VerticalFlip, Rotate, Transpose, CLAHE, ShiftScaleRotate, RandomScale ) import numpy as np def get_y_fn (x): return Path(str(x).replace("Images","Labels").replace("color","gt").replace(".jpg",".png")) class SegmentationAlbumentationsTransform(ItemTransform): split_idx = 0 def __init__(self, aug): self.aug = aug def encodes(self, x): img,mask = x aug = self.aug(image=np.array(img), mask=np.array(mask)) return PILImage.create(aug["image"]), PILMask.create(aug["mask"]) class TargetMaskConvertTransform(ItemTransform): def __init__(self): pass def encodes(self, x): img,mask = x mask = np.array(mask) new_mask = np.zeros_like(mask, dtype=np.uint8) new_mask[mask==150]=1 #Clase Leaves new_mask[(mask==29) | (mask==25)]=2 #Clase Wood new_mask[(mask==74) | (mask==76)]=3 #Clase Pole new_mask[mask==255]=4 #Clase Wood mask = PILMask.create(new_mask) return img, mask repo_id = "joortif/unet-resnet34-segmentation" learn = from_pretrained_fastai(repo_id) def segment_image(img): pred, _, _ = learn.predict(img) return pred interface = gr.Interface( fn=segment_image, inputs=gr.Image(), outputs=gr.Image(), examples=["color_184.jpg","color_154.jpg","color_180.jpg"], title="Segmentación Semántica con FastAI", ).launch(share=False)