Skip to content

Commit 940f183

Browse files
Format code
1 parent 3200b18 commit 940f183

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

68 files changed

+4894
-2700
lines changed

.flake8

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
[flake8]
2+
ignore = F401
3+
max-line-length = 100
4+
max-complexity = 18
5+
select = B,C,E,F,W,T4,B9

.pre-commit-config.yaml

+38
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
exclude: "^\
2+
(third-party/.*)\
3+
"
4+
5+
repos:
6+
- repo: https://github.com/pre-commit/pre-commit-hooks
7+
rev: v4.1.0
8+
hooks:
9+
- id: check-merge-conflict # checks for some markers such as "<<<<<<<", "=======", and ">>>>>>>".
10+
- id: detect-private-key # detects the presence of private keys.
11+
- id: end-of-file-fixer # ensures that a file is either empty, or ends with one newline.
12+
- id: requirements-txt-fixer # sorts entries in requirements.txt.
13+
- id: trailing-whitespace # trims trailing whitespace at the end of lines.
14+
15+
# Format YAML and other files
16+
- repo: https://github.com/pre-commit/mirrors-prettier
17+
rev: v2.5.1
18+
hooks:
19+
- id: prettier
20+
files: \.(js|ts|jsx|tsx|css|less|html|json|markdown|md|yaml|yml)$
21+
22+
# Sort the order of importing libs
23+
- repo: https://github.com/PyCQA/isort
24+
rev: 5.12.0
25+
hooks:
26+
- id: isort
27+
args: [--profile=black]
28+
29+
# Format Python files
30+
- repo: https://github.com/psf/black
31+
rev: 23.7.0
32+
hooks:
33+
- id: black
34+
35+
# - repo: https://github.com/PyCQA/flake8
36+
# rev: 6.1.0
37+
# hooks:
38+
# - id: flake8

README.md

+28-8
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,17 @@
11
# Real-Time Face Recognition
2+
23
<p align="center">
34
<img src="./static/results/face-recognition.gif" alt="animated" />
45
</p>
56

67
# How to add 1 peason to recognize
8+
79
### Step 1: Create a folder with the folder name being the name of the person
10+
811
### Step 2: Add the person's photo in the folder
12+
913
### Step 3: Move folder to additional-training-datasets folder
14+
1015
#### Example:
1116

1217
- |database
@@ -17,43 +22,58 @@
1722
- ----|full-training-datasets
1823

1924
### Step 4: Set up with Python >= 3.7
20-
````
25+
26+
```
2127
pip install -r requirements.txt
22-
````
28+
```
29+
2330
### Step 5: Run to add person
24-
````
31+
32+
```
2533
python train.py --is-add-user=True
26-
````
34+
```
35+
2736
### Step 6: Run recognize
28-
````
37+
38+
```
2939
python recognize.py
30-
````
31-
# Face Recognition use Yolov5-face, Insightface, Similarity Measure
40+
```
41+
42+
# Face Recognition use Yolov5-face, Insightface, Similarity Measure
43+
3244
<p align="center">
3345
<img src="./static/results/workflow.png" alt="animated" />
3446
</p>
3547

3648
# Yolov5-face to dectect face
49+
3750
<p align="center">
3851
<img src="./static/results/face-detection.gif" alt="animated" />
3952
</p>
4053

4154
# Insight Face to recognition face
55+
4256
![image](https://user-images.githubusercontent.com/80930272/160270088-a3760d88-ebc8-4535-907e-6b684276755a.png)
4357

4458
# Multi thread
59+
4560
<p align="center">
4661
<img src="https://user-images.githubusercontent.com/80930272/165548024-6d25fbe4-057f-4123-a3f9-3912cce2b73b.png" alt="animated" />
4762
</p>
4863

4964
# Cosine Similarity Algorithm
65+
5066
![image](https://user-images.githubusercontent.com/80930272/160270156-37fe3269-ca65-4692-a3b2-e9568b3876f8.png)
5167

5268
# Reference
69+
5370
- https://github.com/deepcam-cn/yolov5-face
5471
- https://github.com/deepinsight/insightface/tree/master/recognition/arcface_torch
5572
- https://github.com/SthPhoenix/InsightFace-REST
5673

5774
pip install torch==1.9.1+cpu torchvision==0.10.1+cpu torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html
5875

59-
python tracking3.py video -f face_tracking/exps/example/mot/yolox_s_mix_det.py -c face_tracking/pretrained/bytetrack_s_mot17.pth.tar --fuse --save_result &> log.txt
76+
python tracking3.py video -f face_tracking/exps/example/mot/yolox_s_mix_det.py -c face_tracking/pretrained/bytetrack_s_mot17.pth.tar --fuse --save_result &> log.txt
77+
78+
vector database
79+
kapka redis

detect.py

+37-117
Original file line numberDiff line numberDiff line change
@@ -1,138 +1,55 @@
1-
#pytorch
2-
import torch
3-
from torchvision import transforms
1+
import time
42

5-
#other lib
6-
import sys
7-
import numpy as np
83
import cv2
9-
import time
104

11-
sys.path.insert(0, "face_detection/yolov5_face")
12-
13-
from face_detection.yolov5_face.models.experimental import attempt_load
14-
from face_detection.yolov5_face.utils.datasets import letterbox
15-
from face_detection.yolov5_face.utils.general import check_img_size, non_max_suppression_face, scale_coords
16-
17-
# Check device
18-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19-
20-
# Get model detect
21-
## Case 1:
22-
# model = attempt_load("face_detection/yolov5_face/yolov5s-face.pt", map_location=device)
23-
24-
## Case 2:
25-
model = attempt_load("face_detection/yolov5_face/yolov5n-0.5.pt", map_location=device)
26-
27-
# Resize image
28-
def resize_image(img0, img_size):
29-
h0, w0 = img0.shape[:2] # orig hw
30-
r = img_size / max(h0, w0) # resize image to img_size
31-
32-
if r != 1: # always resize down, only resize up if training with augmentation
33-
interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR
34-
img0 = cv2.resize(img0, (int(w0 * r), int(h0 * r)), interpolation=interp)
35-
36-
imgsz = check_img_size(img_size, s=model.stride.max()) # check img_size
37-
img = letterbox(img0, new_shape=imgsz)[0]
38-
39-
# Convert
40-
img = img[:, :, ::-1].transpose(2, 0, 1).copy() # BGR to RGB, to 3x416x416
41-
42-
img = torch.from_numpy(img).to(device)
43-
img = img.float() # uint8 to fp16/32
44-
img /= 255.0 # 0 - 255 to 0.0 - 1.0
45-
46-
return img
47-
48-
def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None):
49-
# Rescale coords (xyxy) from img1_shape to img0_shape
50-
if ratio_pad is None: # calculate from img0_shape
51-
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
52-
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
53-
else:
54-
gain = ratio_pad[0][0]
55-
pad = ratio_pad[1]
56-
57-
coords[:, [0, 2, 4, 6, 8]] -= pad[0] # x padding
58-
coords[:, [1, 3, 5, 7, 9]] -= pad[1] # y padding
59-
coords[:, :10] /= gain
60-
#clip_coords(coords, img0_shape)
61-
coords[:, 0].clamp_(0, img0_shape[1]) # x1
62-
coords[:, 1].clamp_(0, img0_shape[0]) # y1
63-
coords[:, 2].clamp_(0, img0_shape[1]) # x2
64-
coords[:, 3].clamp_(0, img0_shape[0]) # y2
65-
coords[:, 4].clamp_(0, img0_shape[1]) # x3
66-
coords[:, 5].clamp_(0, img0_shape[0]) # y3
67-
coords[:, 6].clamp_(0, img0_shape[1]) # x4
68-
coords[:, 7].clamp_(0, img0_shape[0]) # y4
69-
coords[:, 8].clamp_(0, img0_shape[1]) # x5
70-
coords[:, 9].clamp_(0, img0_shape[0]) # y5
71-
return coords
72-
73-
def get_face(input_image):
74-
# Parameters
75-
size_convert = 128
76-
conf_thres = 0.4
77-
iou_thres = 0.5
78-
79-
# Resize image
80-
img = resize_image(input_image.copy(), size_convert)
81-
82-
# Via yolov5-face
83-
with torch.no_grad():
84-
pred = model(img[None, :])[0]
85-
86-
# Apply NMS
87-
det = non_max_suppression_face(pred, conf_thres, iou_thres)[0]
88-
bboxs = np.int32(scale_coords(img.shape[1:], det[:, :4], input_image.shape).round().cpu().numpy())
89-
90-
landmarks = np.int32(scale_coords_landmarks(img.shape[1:], det[:, 5:15], input_image.shape).round().cpu().numpy())
91-
92-
return bboxs, landmarks
5+
from face_detection.yolov5_face.detect import Detector
6+
937

948
def main():
95-
# Open camera
9+
detector = Detector()
10+
11+
# Open camera
9612
cap = cv2.VideoCapture(0)
9713
start = time.time_ns()
9814
frame_count = 0
9915
fps = -1
100-
16+
10117
# Save video
10218
frame_width = int(cap.get(3))
10319
frame_height = int(cap.get(4))
104-
20+
10521
size = (frame_width, frame_height)
106-
video = cv2.VideoWriter('results/face-detection.mp4',cv2.VideoWriter_fourcc(*'mp4v'), 30, size)
107-
22+
video = cv2.VideoWriter(
23+
"results/face-detection.mp4", cv2.VideoWriter_fourcc(*"mp4v"), 30, size
24+
)
25+
10826
# Read until video is completed
109-
while(True):
27+
while True:
11028
# Capture frame-by-frame
11129
_, frame = cap.read()
112-
30+
11331
# Get faces
114-
bboxs, landmarks = get_face(frame)
115-
h,w,c = frame.shape
116-
32+
bboxs, landmarks = detector.inference_detect(input_image=frame)
33+
h, w, c = frame.shape
34+
11735
tl = 1 or round(0.002 * (h + w) / 2) + 1 # line/font thickness
118-
clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)]
119-
36+
clors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255)]
12037

12138
# Get boxs
12239
for i in range(len(bboxs)):
12340
# Get location face
12441
x1, y1, x2, y2 = bboxs[i]
12542
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 146, 230), 2)
126-
43+
12744
# Landmarks
12845
for x in range(5):
12946
point_x = int(landmarks[i][2 * x])
13047
point_y = int(landmarks[i][2 * x + 1])
131-
cv2.circle(frame, (point_x, point_y), tl+1, clors[x], -1)
132-
133-
# Count fps
48+
cv2.circle(frame, (point_x, point_y), tl + 1, clors[x], -1)
49+
50+
# Count fps
13451
frame_count += 1
135-
52+
13653
if frame_count >= 30:
13754
end = time.time_ns()
13855
fps = 1e9 * frame_count / (end - start)
@@ -141,22 +58,25 @@ def main():
14158

14259
if fps > 0:
14360
fps_label = "FPS: %.2f" % fps
144-
cv2.putText(frame, fps_label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
145-
146-
#Save video
61+
cv2.putText(
62+
frame, fps_label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2
63+
)
64+
65+
# Save video
14766
video.write(frame)
148-
149-
#Show result
67+
68+
# Show result
15069
cv2.imshow("Face Detection", frame)
151-
70+
15271
# Press Q on keyboard to exit
153-
if cv2.waitKey(25) & 0xFF == ord('q'):
154-
break
155-
72+
if cv2.waitKey(25) & 0xFF == ord("q"):
73+
break
74+
15675
video.release()
15776
cap.release()
15877
cv2.destroyAllWindows()
15978
cv2.waitKey(0)
16079

161-
if __name__=="__main__":
162-
main()
80+
81+
if __name__ == "__main__":
82+
main()

face_detection/retinaface/LICENSE.MIT

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
MIT License
1+
MIT License
22

33
Copyright (c) 2019
44

face_detection/retinaface/README.md

+2-1
Original file line numberDiff line numberDiff line change
@@ -19,13 +19,14 @@ python camera_test.py --trained_model weights/mobilenet0.25_Final.pth --network
1919
<h3> Using Image </h3>
2020
change image in ./curve, change file path in detect.py (line 87)
2121

22-
2322
backbone: resnet50
23+
2424
```
2525
python detect.py --trained_model weights/Resnet50_Final.pth --network resnet50 --cpu
2626
```
2727

2828
backbone: mobilenet0.25
29+
2930
```
3031
python detect.py --trained_model weights/mobilenet0.25_Final.pth --network mobile0.25 --cpu
3132
```

0 commit comments

Comments
 (0)