Spaces:
Runtime error
Runtime error
import cv2 | |
import time | |
import os | |
import mediapipe as mp | |
import gradio as gr | |
from threading import Thread | |
#from cvzone.HandTrackingModule import HandDetector | |
example_flag = False | |
class handDetector(): | |
def __init__(self, mode=True, modelComplexity=1, maxHands=2, detectionCon=0.5, trackCon=0.5): | |
self.mode = mode | |
self.maxHands = maxHands | |
self.detectionCon = detectionCon | |
self.modelComplex = modelComplexity | |
self.trackCon = trackCon | |
self.mpHands = mp.solutions.hands | |
self.hands = self.mpHands.Hands(self.mode, self.maxHands,self.modelComplex,self.detectionCon, self.trackCon) | |
self.mpDraw = mp.solutions.drawing_utils | |
def findHands(self, img, draw=True,flipType=True): | |
""" | |
Finds hands in a BGR image. | |
:param img: Image to find the hands in. | |
:param draw: Flag to draw the output on the image. | |
:return: Image with or without drawings | |
""" | |
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) | |
#cv2.imshow('test',imgRGB) | |
self.results = self.hands.process(imgRGB) | |
allHands = [] | |
h, w, c = img.shape | |
if self.results.multi_hand_landmarks: | |
for handType, handLms in zip(self.results.multi_handedness, self.results.multi_hand_landmarks): | |
myHand = {} | |
## lmList | |
mylmList = [] | |
xList = [] | |
yList = [] | |
for id, lm in enumerate(handLms.landmark): | |
px, py, pz = int(lm.x * w), int(lm.y * h), int(lm.z * w) | |
mylmList.append([px, py, pz]) | |
xList.append(px) | |
yList.append(py) | |
## bbox | |
xmin, xmax = min(xList), max(xList) | |
ymin, ymax = min(yList), max(yList) | |
boxW, boxH = xmax - xmin, ymax - ymin | |
bbox = xmin, ymin, boxW, boxH | |
cx, cy = bbox[0] + (bbox[2] // 2), \ | |
bbox[1] + (bbox[3] // 2) | |
myHand["lmList"] = mylmList | |
myHand["bbox"] = bbox | |
myHand["center"] = (cx, cy) | |
if flipType: | |
if handType.classification[0].label == "Right": | |
myHand["type"] = "Left" | |
else: | |
myHand["type"] = "Right" | |
else: | |
myHand["type"] = handType.classification[0].label | |
allHands.append(myHand) | |
## draw | |
if draw: | |
self.mpDraw.draw_landmarks(img, handLms, | |
self.mpHands.HAND_CONNECTIONS) | |
cv2.rectangle(img, (bbox[0] - 20, bbox[1] - 20), | |
(bbox[0] + bbox[2] + 20, bbox[1] + bbox[3] + 20), | |
(255, 0, 255), 2) | |
#cv2.putText(img, myHand["type"], (bbox[0] - 30, bbox[1] - 30), cv2.FONT_HERSHEY_PLAIN,2, (255, 0, 255), 2) | |
if draw: | |
return allHands, img | |
else: | |
return allHands | |
def findPosition(self, img, handNo=0, draw=True,flipType=False): | |
lmList = [] | |
if self.results.multi_hand_landmarks: | |
myHand = self.results.multi_hand_landmarks[handNo] | |
for id, lm in enumerate(myHand.landmark): | |
# print(id, lm) | |
h, w, c = img.shape | |
cx, cy = int(lm.x * w), int(lm.y * h) | |
# print(id, cx, cy) | |
lmList.append([id, cx, cy]) | |
if draw: | |
cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED) | |
return lmList | |
def set_example_image(example: list) -> dict: | |
return gr.inputs.Image.update(value=example[0]) | |
def count(im): | |
folderPath = "Count" | |
myList = os.listdir(folderPath) | |
overlayList = [] | |
for imPath in sorted(myList): | |
image = cv2.imread(f'{folderPath}/{imPath}') | |
# print(f'{folderPath}/{imPath}') | |
overlayList.append(image) | |
#print(len(overlayList)) | |
tipIds = [4, 8, 12, 16, 20] | |
detector = handDetector(detectionCon=0.75) | |
#img = cv2.imread('test.jpg') | |
allhands,img = detector.findHands(cv2.flip(im[:,:,::-1], 1)) | |
cv2.imwrite('test3.png',img) | |
lmList = detector.findPosition(img, draw=False,) | |
# print(lmList) | |
if len(lmList) != 0: | |
fingers = [] | |
# Thumb | |
if lmList[tipIds[0]][1] > lmList[tipIds[0] - 1][1]: | |
fingers.append(1) | |
else: | |
fingers.append(0) | |
# 4 Fingers | |
for id in range(1, 5): | |
if lmList[tipIds[id]][2] < lmList[tipIds[id] - 2][2]: | |
fingers.append(1) | |
else: | |
fingers.append(0) | |
# print(fingers) | |
totalFingers = fingers.count(1) | |
#print(totalFingers) | |
text = f"Total finger count is {totalFingers}!" | |
h, w, c = overlayList[totalFingers - 1].shape | |
img = cv2.flip(img,1) | |
img[0:h, 0:w] = overlayList[totalFingers - 1] | |
cv2.rectangle(img, (20, 225), (170, 425), (0, 255, 0), cv2.FILLED) | |
cv2.putText(img, str(totalFingers), (45, 375), cv2.FONT_HERSHEY_PLAIN, | |
10, (255, 0, 0), 25) | |
return img[:,:,::-1] | |
else: | |
return cv2.flip(img[:,:,::-1],1) | |
css = """ | |
.gr-button-lg { | |
z-index: 14; | |
width: 113px; | |
height: 30px; | |
left: 0px; | |
top: 0px; | |
padding: 0px; | |
cursor: pointer !important; | |
background: none rgb(17, 20, 45) !important; | |
border: none !important; | |
text-align: center !important; | |
font-size: 14px !important; | |
font-weight: 500 !important; | |
color: rgb(255, 255, 255) !important; | |
line-height: 1 !important; | |
border-radius: 6px !important; | |
transition: box-shadow 200ms ease 0s, background 200ms ease 0s !important; | |
box-shadow: none !important; | |
} | |
.gr-button-lg:hover{ | |
z-index: 14; | |
width: 113px; | |
height: 30px; | |
left: 0px; | |
top: 0px; | |
padding: 0px; | |
cursor: pointer !important; | |
background: none rgb(66, 133, 244) !important; | |
border: none !important; | |
text-align: center !important; | |
font-size: 14px !important; | |
font-weight: 500 !important; | |
color: rgb(255, 255, 255) !important; | |
line-height: 1 !important; | |
border-radius: 6px !important; | |
transition: box-shadow 200ms ease 0s, background 200ms ease 0s !important; | |
box-shadow: rgb(0 0 0 / 23%) 0px 1px 7px 0px !important; | |
} | |
footer {display:none !important} | |
.output-markdown{display:none !important} | |
#out_image {height: 22rem !important;} | |
""" | |
with gr.Blocks(title="Right Hand Finger Counting | Data Science Dojo", css=css) as demo: | |
with gr.Tabs(): | |
with gr.TabItem('Upload'): | |
with gr.Row(): | |
with gr.Column(): | |
img_input = gr.Image(shape=(640,480)) | |
image_button = gr.Button("Submit") | |
with gr.Column(): | |
output = gr.Image(shape=(640,480), elem_id="out_image") | |
with gr.Row(): | |
example_images = gr.Dataset(components=[img_input],samples=[["ex2.jpg"]]) | |
with gr.TabItem('Webcam'): | |
with gr.Row(): | |
with gr.Column(): | |
img_input2 = gr.Webcam() | |
image_button2 = gr.Button("Submit") | |
with gr.Column(): | |
output2 = gr.outputs.Image() | |
image_button.click(fn=count, | |
inputs = img_input, | |
outputs = output) | |
image_button2.click(fn=count, | |
inputs = img_input2, | |
outputs = output2) | |
example_images.click(fn=set_example_image,inputs=[example_images],outputs=[img_input]) | |
demo.launch(debug=True) |