Spaces:
Running
Running
Abhishek Gola
commited on
Commit
·
d828c23
1
Parent(s):
2a89d7c
Adding face detection to opencv space
Browse files
README.md
CHANGED
@@ -7,6 +7,11 @@ sdk: gradio
|
|
7 |
sdk_version: 5.34.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
|
|
|
|
|
|
|
|
|
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
7 |
sdk_version: 5.34.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
+
short_description: Face detection using OpenCV YuNet ONNX model with Gradio UI
|
11 |
+
tags:
|
12 |
+
- opencv
|
13 |
+
- face-detection
|
14 |
+
- yunet
|
15 |
---
|
16 |
|
17 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2 as cv
|
2 |
+
import numpy as np
|
3 |
+
import gradio as gr
|
4 |
+
from yunet import YuNet
|
5 |
+
from huggingface_hub import hf_hub_download
|
6 |
+
|
7 |
+
# Download ONNX model from Hugging Face
|
8 |
+
model_path = hf_hub_download(repo_id="opencv/face_detection_yunet", filename="face_detection_yunet_2023mar.onnx")
|
9 |
+
|
10 |
+
# Initialize YuNet model
|
11 |
+
model = YuNet(
|
12 |
+
modelPath=model_path,
|
13 |
+
inputSize=[320, 320],
|
14 |
+
confThreshold=0.9,
|
15 |
+
nmsThreshold=0.3,
|
16 |
+
topK=5000,
|
17 |
+
backendId=cv.dnn.DNN_BACKEND_OPENCV,
|
18 |
+
targetId=cv.dnn.DNN_TARGET_CPU
|
19 |
+
)
|
20 |
+
|
21 |
+
def visualize(image, results, box_color=(0, 255, 0), text_color=(0, 0, 255)):
|
22 |
+
output = image.copy()
|
23 |
+
landmark_color = [
|
24 |
+
(255, 0, 0), # right eye
|
25 |
+
( 0, 0, 255), # left eye
|
26 |
+
( 0, 255, 0), # nose tip
|
27 |
+
(255, 0, 255), # right mouth corner
|
28 |
+
( 0, 255, 255) # left mouth corner
|
29 |
+
]
|
30 |
+
|
31 |
+
for det in results:
|
32 |
+
bbox = det[0:4].astype(np.int32)
|
33 |
+
cv.rectangle(output, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), box_color, 2)
|
34 |
+
conf = det[-1]
|
35 |
+
cv.putText(output, '{:.2f}'.format(conf), (bbox[0], bbox[1] + 12), cv.FONT_HERSHEY_SIMPLEX, 0.5, text_color)
|
36 |
+
|
37 |
+
landmarks = det[4:14].astype(np.int32).reshape((5, 2))
|
38 |
+
for idx, landmark in enumerate(landmarks):
|
39 |
+
cv.circle(output, tuple(landmark), 2, landmark_color[idx], 2)
|
40 |
+
|
41 |
+
return output
|
42 |
+
|
43 |
+
def detect_faces(input_image):
|
44 |
+
h, w, _ = input_image.shape
|
45 |
+
model.setInputSize([w, h])
|
46 |
+
results = model.infer(input_image)
|
47 |
+
if results is None or len(results) == 0:
|
48 |
+
return input_image
|
49 |
+
return visualize(input_image, results)
|
50 |
+
|
51 |
+
# Gradio Interface
|
52 |
+
demo = gr.Interface(
|
53 |
+
fn=detect_faces,
|
54 |
+
inputs=gr.Image(type="numpy", label="Upload Image"),
|
55 |
+
outputs=gr.Image(type="numpy", label="Detected Faces"),
|
56 |
+
title="Face Detection YuNet (OpenCV DNN)",
|
57 |
+
allow_flagging="never",
|
58 |
+
description="Upload an image to detect faces using OpenCV's ONNX-based YuNet face detector."
|
59 |
+
)
|
60 |
+
|
61 |
+
if __name__ == "__main__":
|
62 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
opencv-python
|
2 |
+
gradio
|
3 |
+
numpy
|
4 |
+
huggingface_hub
|
yunet.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is part of OpenCV Zoo project.
|
2 |
+
# It is subject to the license terms in the LICENSE file found in the same directory.
|
3 |
+
#
|
4 |
+
# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
|
5 |
+
# Third party copyrights are property of their respective owners.
|
6 |
+
|
7 |
+
from itertools import product
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
import cv2 as cv
|
11 |
+
|
12 |
+
class YuNet:
|
13 |
+
def __init__(self, modelPath, inputSize=[320, 320], confThreshold=0.6, nmsThreshold=0.3, topK=5000, backendId=0, targetId=0):
|
14 |
+
self._modelPath = modelPath
|
15 |
+
self._inputSize = tuple(inputSize) # [w, h]
|
16 |
+
self._confThreshold = confThreshold
|
17 |
+
self._nmsThreshold = nmsThreshold
|
18 |
+
self._topK = topK
|
19 |
+
self._backendId = backendId
|
20 |
+
self._targetId = targetId
|
21 |
+
|
22 |
+
self._model = cv.FaceDetectorYN.create(
|
23 |
+
model=self._modelPath,
|
24 |
+
config="",
|
25 |
+
input_size=self._inputSize,
|
26 |
+
score_threshold=self._confThreshold,
|
27 |
+
nms_threshold=self._nmsThreshold,
|
28 |
+
top_k=self._topK,
|
29 |
+
backend_id=self._backendId,
|
30 |
+
target_id=self._targetId)
|
31 |
+
|
32 |
+
@property
|
33 |
+
def name(self):
|
34 |
+
return self.__class__.__name__
|
35 |
+
|
36 |
+
def setBackendAndTarget(self, backendId, targetId):
|
37 |
+
self._backendId = backendId
|
38 |
+
self._targetId = targetId
|
39 |
+
self._model = cv.FaceDetectorYN.create(
|
40 |
+
model=self._modelPath,
|
41 |
+
config="",
|
42 |
+
input_size=self._inputSize,
|
43 |
+
score_threshold=self._confThreshold,
|
44 |
+
nms_threshold=self._nmsThreshold,
|
45 |
+
top_k=self._topK,
|
46 |
+
backend_id=self._backendId,
|
47 |
+
target_id=self._targetId)
|
48 |
+
|
49 |
+
def setInputSize(self, input_size):
|
50 |
+
self._model.setInputSize(tuple(input_size))
|
51 |
+
|
52 |
+
def infer(self, image):
|
53 |
+
# Forward
|
54 |
+
faces = self._model.detect(image)
|
55 |
+
return np.empty(shape=(0, 5)) if faces[1] is None else faces[1]
|