Spaces:
Sleeping
Sleeping
import cv2 | |
import numpy as np | |
import os | |
import pickle | |
from deepface import DeepFace | |
import gradio as gr | |
from datetime import datetime | |
import fast_colorthief | |
import webcolors | |
from PIL import Image | |
thres = 0.45 | |
classNames= [] | |
classFile = 'coco.names' | |
with open(classFile,'rt') as f: | |
#classNames = f.read().rstrip('n').split('n') | |
classNames = f.readlines() | |
# remove new line characters | |
classNames = [x.strip() for x in classNames] | |
print(classNames) | |
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt' | |
weightsPath = 'frozen_inference_graph.pb' | |
net = cv2.dnn_DetectionModel(weightsPath,configPath) | |
net.setInputSize(320,320) | |
net.setInputScale(1.0/ 127.5) | |
net.setInputMean((127.5, 127.5, 127.5)) | |
net.setInputSwapRB(True) | |
def main(image): | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
rgb=cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
names=[] | |
#object | |
try: | |
classIds, confs, bbox = net.detect(image,confThreshold=thres) | |
except Exception as err: | |
print(err) | |
print(classIds,bbox) | |
try: | |
if len(classIds) != 0: | |
for classId, confidence,box in zip(classIds.flatten(),confs.flatten(),bbox): | |
if names.count(classNames[classId-1]) == 0: | |
names.append(classNames[classId-1]) | |
except Exception as err: | |
print(err) | |
#emotion | |
try: | |
face_analysis_2=DeepFace.analyze(image, actions = ['emotion'], enforce_detection=False) | |
names.append(face_analysis_2["dominant_emotion"]) | |
except: | |
print("No face") | |
names.append("No Face") | |
# #Colour | |
colourimage = Image.fromarray(image) | |
colourimage = colourimage.convert('RGBA') | |
colourimage = np.array(colourimage).astype(np.uint8) | |
palette=fast_colorthief.get_palette(colourimage) | |
for i in range(len(palette)): | |
diff={} | |
for color_hex, color_name in webcolors.CSS3_HEX_TO_NAMES.items(): | |
r, g, b = webcolors.hex_to_rgb(color_hex) | |
diff[sum([(r - palette[i][0])**2, | |
(g - palette[i][1])**2, | |
(b - palette[i][2])**2])]= color_name | |
if names.count(diff[min(diff.keys())])==0: | |
names.append(diff[min(diff.keys())]) | |
return ' '.join(names) | |
interface = gr.Interface(fn=main, | |
inputs=["image"], | |
outputs=[gr.inputs.Textbox(label='Names of person in image')], | |
title='Color Object Emotion ', | |
description='This Space:\n \n2) Detect Emotion \n3) Detect Colors.\n4) Object Detection \n') | |
interface.launch(inline=False) |