BilalSardar commited on
Commit
9f8ec13
·
1 Parent(s): dc0136b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +163 -0
app.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import face_recognition
4
+ import os
5
+ import pickle
6
+ from deepface import DeepFace
7
+ import gradio as gr
8
+ from datetime import datetime
9
+ import fast_colorthief
10
+ import webcolors
11
+ from PIL import Image
12
+
13
+ data1 = []
14
+ try:
15
+ data = pickle.loads(open("encodings.pickle", "rb").read())
16
+ data1 = pickle.loads(open("encodings.pickle", "rb").read())
17
+ data = np.array(data)
18
+ encodings = [d["encoding"] for d in data]
19
+ personNames=[d["name"] for d in data]
20
+ except:
21
+ data=[]
22
+ data1=[]
23
+ encodings=[]
24
+ personNames=[]
25
+
26
+ # cluster the embeddings
27
+ print("[INFO] clustering...")
28
+
29
+ print('All Encodings Complete!!!')
30
+
31
+
32
+ thres = 0.45
33
+ path = 'images'
34
+ images = []
35
+ classNames= []
36
+ classFile = 'coco.names'
37
+ with open(classFile,'rt') as f:
38
+ #classNames = f.read().rstrip('n').split('n')
39
+ classNames = f.readlines()
40
+
41
+
42
+ # remove new line characters
43
+ classNames = [x.strip() for x in classNames]
44
+ print(classNames)
45
+ configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
46
+ weightsPath = 'frozen_inference_graph.pb'
47
+ net = cv2.dnn_DetectionModel(weightsPath,configPath)
48
+ net.setInputSize(320,320)
49
+ net.setInputScale(1.0/ 127.5)
50
+ net.setInputMean((127.5, 127.5, 127.5))
51
+ net.setInputSwapRB(True)
52
+
53
+
54
+
55
+ def faceEncodings(images):
56
+
57
+ encodeList = []
58
+ for img in images:
59
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
60
+ encode = face_recognition.face_encodings(img)[0]
61
+ encodeList.append(encode)
62
+ return encodeList
63
+
64
+
65
+ #encodeListKnown = faceEncodings(images)
66
+
67
+ def main(image):
68
+ personsinimage=[]
69
+ index=len(encodings)
70
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
71
+ rgb=cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
72
+ names=[]
73
+
74
+
75
+ #object
76
+ try:
77
+ classIds, confs, bbox = net.detect(image,confThreshold=thres)
78
+ except Exception as err:
79
+ print(err)
80
+ print(classIds,bbox)
81
+ try:
82
+ if len(classIds) != 0:
83
+ for classId, confidence,box in zip(classIds.flatten(),confs.flatten(),bbox):
84
+ if names.count(classNames[classId-1]) == 0:
85
+ names.append(classNames[classId-1])
86
+ except Exception as err:
87
+ print(err)
88
+ #emotion
89
+
90
+ try:
91
+ face_analysis_2=DeepFace.analyze(image, actions = ['emotion'], enforce_detection=False)
92
+ names.append(face_analysis_2["dominant_emotion"])
93
+ except:
94
+ print("No face")
95
+ names.append("No Face")
96
+
97
+ # #Colour
98
+
99
+ colourimage = Image.fromarray(image)
100
+ colourimage = colourimage.convert('RGBA')
101
+ colourimage = np.array(colourimage).astype(np.uint8)
102
+ palette=fast_colorthief.get_palette(colourimage)
103
+
104
+
105
+ for i in range(len(palette)):
106
+ diff={}
107
+ for color_hex, color_name in webcolors.CSS3_HEX_TO_NAMES.items():
108
+ r, g, b = webcolors.hex_to_rgb(color_hex)
109
+ diff[sum([(r - palette[i][0])**2,
110
+ (g - palette[i][1])**2,
111
+ (b - palette[i][2])**2])]= color_name
112
+ if names.count(diff[min(diff.keys())])==0:
113
+ names.append(diff[min(diff.keys())])
114
+
115
+
116
+ faces = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
117
+
118
+ facesCurrentFrame = face_recognition.face_locations(faces)
119
+ encodesCurrentFrame = face_recognition.face_encodings(faces, facesCurrentFrame)
120
+
121
+ for encodeFace, faceLoc in zip(encodesCurrentFrame, facesCurrentFrame):
122
+
123
+ if len(data1)>0:
124
+ matches = face_recognition.compare_faces(encodings, encodeFace)
125
+ faceDis = face_recognition.face_distance(encodings, encodeFace)
126
+ # print(faceDis)
127
+ matchIndex = np.argmin(faceDis)
128
+
129
+ if matches[matchIndex]:
130
+ name = personNames[matchIndex].upper()
131
+ personsinimage.append(name)
132
+ names.append(name)
133
+ else:
134
+ d = [{"name": "unknown"+str(index), "loc": faceLoc, "encoding": encodeFace}]
135
+ encodings.append(encodeFace)
136
+ names.append("unknown"+str(index))
137
+ personNames.append("unknown"+str(index))
138
+ index+=1
139
+ data1.extend(d)
140
+ personsinimage.append("unknown"+str(index))
141
+ else:
142
+ d = [{"name": "unknown"+str(index), "loc": faceLoc, "encoding": encodeFace}]
143
+ encodings.append(encodeFace)
144
+ names.append("unknown"+str(index))
145
+ personNames.append("unknown"+str(index))
146
+ index+=1
147
+ data1.extend(d)
148
+
149
+ # dump the facial encodings data to disk
150
+ print("[INFO] serializing encodings...")
151
+ f = open("encodings.pickle", "wb")
152
+ f.write(pickle.dumps(data1))
153
+ f.close()
154
+
155
+ return ' '.join(names)
156
+ interface = gr.Interface(fn=main,
157
+ inputs=["image"],
158
+ outputs=[gr.inputs.Textbox(label='Names of person in image')],
159
+ title='Face Recognition ',
160
+ description='This Space:\n1) Recognize face \n2) Detect Emotion \n3) Detect Colors.\n4) Object Detection \n<br>This space is for personal use. For the time being, it only recognizes few persons. To use it, simply duplicate the space and replace the images in the image folder.')
161
+
162
+
163
+ interface.launch(inline=False)