File size: 2,690 Bytes
9f8ec13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41700a0
9f8ec13
 
 
 
 
 
41700a0
9f8ec13
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import cv2
import numpy as np
import os
import pickle
from deepface import DeepFace
import gradio as gr
from datetime import datetime
import fast_colorthief
import webcolors
from PIL import Image



thres = 0.45
classNames= []
classFile = 'coco.names'
with open(classFile,'rt') as f:
  #classNames = f.read().rstrip('n').split('n')
  classNames = f.readlines()


# remove new line characters
classNames = [x.strip() for x in classNames]
print(classNames)
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
net = cv2.dnn_DetectionModel(weightsPath,configPath)
net.setInputSize(320,320)
net.setInputScale(1.0/ 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)



def main(image): 
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
  rgb=cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
  names=[]


  #object
  try:
        classIds, confs, bbox = net.detect(image,confThreshold=thres)
  except  Exception as err:
      print(err)
  print(classIds,bbox)
  try:
    if len(classIds) != 0:
        for classId, confidence,box in zip(classIds.flatten(),confs.flatten(),bbox):
          if names.count(classNames[classId-1]) == 0:
              names.append(classNames[classId-1])
  except  Exception as err:
      print(err)
  #emotion

  try:
      face_analysis_2=DeepFace.analyze(image, actions = ['emotion'], enforce_detection=False)
      names.append(face_analysis_2["dominant_emotion"])
  except:
    print("No face")
    names.append("No Face")

  # #Colour

  colourimage = Image.fromarray(image)
  colourimage = colourimage.convert('RGBA')
  colourimage = np.array(colourimage).astype(np.uint8)
  palette=fast_colorthief.get_palette(colourimage)


  for i in range(len(palette)):
    diff={}
    for color_hex, color_name in webcolors.CSS3_HEX_TO_NAMES.items():
      r, g, b = webcolors.hex_to_rgb(color_hex)
      diff[sum([(r - palette[i][0])**2,
                (g - palette[i][1])**2,
                (b - palette[i][2])**2])]= color_name
    if names.count(diff[min(diff.keys())])==0:
      names.append(diff[min(diff.keys())])


  

  return ' '.join(names)
interface = gr.Interface(fn=main, 
                        inputs=["image"],
                        outputs=[gr.inputs.Textbox(label='Names of person in image')], 
                        title='Face Recognition ',
                        description='This Space:\n \n2) Detect Emotion \n3) Detect Colors.\n4) Object Detection \n<br>This space is for personal use. For the time being, it only recognizes few persons. To use it, simply duplicate the space and replace the images in the image folder.')
                        

interface.launch(inline=False)