Spaces:
Runtime error
Runtime error
| from PIL import Image | |
| from PIL import ImageFile | |
| ImageFile.LOAD_TRUNCATED_IMAGES = True | |
| import torch | |
| import random | |
| import numpy as np | |
| import pythreejs as p3js | |
| from skimage.measure import find_contours | |
| from transformers import MaskFormerFeatureExtractor, MaskFormerForInstanceSegmentation | |
| from enum import Enum | |
| import json | |
| from flask import Flask | |
| from flask import request | |
| import base64 | |
| import io | |
| import os | |
| os.environ['SENTENCE_TRANSFORMERS_HOME'] = '/code/.cache' | |
| os.environ['TRANSFORMERS_CACHE '] = '/code/.cache' | |
| device = torch.device("cpu") | |
| model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-large-ade").to(device) | |
| model.eval() | |
| preprocessor = MaskFormerFeatureExtractor.from_pretrained("facebook/maskformer-swin-large-ade") | |
| class LABEL_TYPE(Enum): | |
| WINDOW = 8 | |
| WALL = 0 | |
| FLOOR = 3 | |
| def query_image(img): | |
| target_size = (img.shape[0], img.shape[1]) | |
| inputs = preprocessor(images=img, return_tensors="pt") | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| outputs.class_queries_logits = outputs.class_queries_logits.cpu() | |
| outputs.masks_queries_logits = outputs.masks_queries_logits.cpu() | |
| results = preprocessor.post_process_segmentation(outputs=outputs, target_size=target_size)[0].cpu().detach() | |
| results = torch.argmax(results, dim=0).numpy() | |
| return results | |
| def find_boundary(label_value, mask): | |
| contours = find_contours(mask == label_value, 0.5, fully_connected="high") | |
| return contours | |
| def send_response(mask): | |
| vertices=[] | |
| test=[] | |
| for contour in mask: | |
| test.extend(contour.ravel()) | |
| ar=contour.astype(str) | |
| vertices.extend(ar.ravel()) | |
| return vertices | |
| def extract_window_edges(window_contours): | |
| windows=[] | |
| for contour in window_contours: | |
| min_x = str(np.min(contour[:, 0])) | |
| max_x = str(np.max(contour[:, 0])) | |
| min_y = str(np.min(contour[:, 1])) | |
| max_y = str(np.max(contour[:, 1])) | |
| windows.append( | |
| [ | |
| [min_x,min_y], | |
| [min_x,max_y], | |
| [max_x,min_y], | |
| [max_x,max_y], | |
| ] | |
| ) | |
| return windows | |
| app = Flask(__name__) | |
| def process(image, items): | |
| preds = detector("https://visualization.graberblinds.com/assets/sample_sessions/02e1d080-c4bf-4cdc-b1bc-f39f9b2a2230_thumb.jpg", candidate_labels=items) | |
| return preds | |
| def cors(): | |
| response=app.make_response("ok") | |
| response.headers.add("Access-Control-Allow-Origin", "*") | |
| response.headers.add("Access-Control-Allow-Headers", "*") | |
| response.headers.add("Access-Control-Allow-Methods", "*") | |
| return response | |
| def detect(): | |
| body=request.get_json() | |
| base64_str=body['img'] | |
| # Assuming base64_str is the string value without 'data:image/jpeg;base64,' | |
| img = Image.open(io.BytesIO(base64.decodebytes(bytes(base64_str, "utf-8")))) | |
| numpydata = np.asarray(img) | |
| mask = query_image(numpydata) | |
| window_mask = find_boundary(LABEL_TYPE.WINDOW.value, mask) | |
| floor_mask = find_boundary(LABEL_TYPE.FLOOR.value, mask) | |
| window_contours = [np.fliplr(ctr).astype(np.int32) for ctr in window_mask] | |
| floor_contours = [np.fliplr(ctr).astype(np.int32) for ctr in floor_mask] | |
| windows_vertices=extract_window_edges(window_contours) | |
| w=send_response(window_contours) | |
| f=send_response(floor_contours) | |
| print(w) | |
| print(f) | |
| print(type(w)) | |
| print(type(f)) | |
| response=app.make_response(json.dumps({ | |
| "window_contours":w, | |
| "floor_contours" :f, | |
| "windows_vertices":windows_vertices | |
| })) | |
| response.headers.add("Access-Control-Allow-Origin", "*") | |
| response.headers.add("Access-Control-Allow-Headers", "*") | |
| response.headers.add("Access-Control-Allow-Methods", "*") | |
| response.content_type="application/json" | |
| return response | |
| app.run(debug=True,port="7860") |