BilalSardar commited on
Commit
41700a0
·
1 Parent(s): 53e2ca9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -73
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import cv2
2
  import numpy as np
3
- #import face_recognition
4
  import os
5
  import pickle
6
  from deepface import DeepFace
@@ -10,28 +9,9 @@ import fast_colorthief
10
  import webcolors
11
  from PIL import Image
12
 
13
- # data1 = []
14
- # try:
15
- # data = pickle.loads(open("encodings.pickle", "rb").read())
16
- # data1 = pickle.loads(open("encodings.pickle", "rb").read())
17
- # data = np.array(data)
18
- # encodings = [d["encoding"] for d in data]
19
- # personNames=[d["name"] for d in data]
20
- # except:
21
- # data=[]
22
- # data1=[]
23
- # encodings=[]
24
- # personNames=[]
25
-
26
- # cluster the embeddings
27
- print("[INFO] clustering...")
28
-
29
- print('All Encodings Complete!!!')
30
 
31
 
32
  thres = 0.45
33
- path = 'images'
34
- images = []
35
  classNames= []
36
  classFile = 'coco.names'
37
  with open(classFile,'rt') as f:
@@ -52,21 +32,7 @@ net.setInputSwapRB(True)
52
 
53
 
54
 
55
- def faceEncodings(images):
56
-
57
- encodeList = []
58
- for img in images:
59
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
60
- encode = face_recognition.face_encodings(img)[0]
61
- encodeList.append(encode)
62
- return encodeList
63
-
64
-
65
- #encodeListKnown = faceEncodings(images)
66
-
67
  def main(image):
68
- personsinimage=[]
69
- index=len(encodings)
70
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
71
  rgb=cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
72
  names=[]
@@ -113,51 +79,14 @@ def main(image):
113
  names.append(diff[min(diff.keys())])
114
 
115
 
116
- # faces = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
117
-
118
- # facesCurrentFrame = face_recognition.face_locations(faces)
119
- # encodesCurrentFrame = face_recognition.face_encodings(faces, facesCurrentFrame)
120
-
121
- # for encodeFace, faceLoc in zip(encodesCurrentFrame, facesCurrentFrame):
122
-
123
- # if len(data1)>0:
124
- # matches = face_recognition.compare_faces(encodings, encodeFace)
125
- # faceDis = face_recognition.face_distance(encodings, encodeFace)
126
- # # print(faceDis)
127
- # matchIndex = np.argmin(faceDis)
128
-
129
- # if matches[matchIndex]:
130
- # name = personNames[matchIndex].upper()
131
- # personsinimage.append(name)
132
- # names.append(name)
133
- # else:
134
- # d = [{"name": "UNKNOWN"+str(index), "loc": faceLoc, "encoding": encodeFace}]
135
- # encodings.append(encodeFace)
136
- # names.append("UNKNOWN"+str(index))
137
- # personNames.append("UNKNOWN"+str(index))
138
- # index+=1
139
- # data1.extend(d)
140
- # personsinimage.append("UNKNOWN"+str(index))
141
- # else:
142
- # d = [{"name": "UNKNOWN"+str(index), "loc": faceLoc, "encoding": encodeFace}]
143
- # encodings.append(encodeFace)
144
- # names.append("UNKNOWN"+str(index))
145
- # personNames.append("UNKNOWN"+str(index))
146
- # index+=1
147
- # data1.extend(d)
148
-
149
- # # dump the facial encodings data to disk
150
- # print("[INFO] serializing encodings...")
151
- # f = open("encodings.pickle", "wb")
152
- # f.write(pickle.dumps(data1))
153
- # f.close()
154
 
155
  return ' '.join(names)
156
  interface = gr.Interface(fn=main,
157
  inputs=["image"],
158
  outputs=[gr.inputs.Textbox(label='Names of person in image')],
159
  title='Face Recognition ',
160
- description='This Space:\n1) Recognize face \n2) Detect Emotion \n3) Detect Colors.\n4) Object Detection \n<br>This space is for personal use. For the time being, it only recognizes few persons. To use it, simply duplicate the space and replace the images in the image folder.')
161
 
162
 
163
  interface.launch(inline=False)
 
1
  import cv2
2
  import numpy as np
 
3
  import os
4
  import pickle
5
  from deepface import DeepFace
 
9
  import webcolors
10
  from PIL import Image
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
 
14
  thres = 0.45
 
 
15
  classNames= []
16
  classFile = 'coco.names'
17
  with open(classFile,'rt') as f:
 
32
 
33
 
34
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  def main(image):
 
 
36
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
37
  rgb=cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
38
  names=[]
 
79
  names.append(diff[min(diff.keys())])
80
 
81
 
82
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
  return ' '.join(names)
85
  interface = gr.Interface(fn=main,
86
  inputs=["image"],
87
  outputs=[gr.inputs.Textbox(label='Names of person in image')],
88
  title='Face Recognition ',
89
+ description='This Space:\n \n2) Detect Emotion \n3) Detect Colors.\n4) Object Detection \n<br>This space is for personal use. For the time being, it only recognizes few persons. To use it, simply duplicate the space and replace the images in the image folder.')
90
 
91
 
92
  interface.launch(inline=False)