wyzely-detect/set-detect-notify/utils/utils.py

112 lines
3.8 KiB
Python
Raw Normal View History

import cv2
import numpy as np
2023-10-14 21:40:36 +01:00
from pathlib import Path
from deepface import DeepFace
2023-10-14 00:16:55 +01:00
def plot_label(
2023-10-14 00:16:55 +01:00
# list of dicts with each dict containing a label, x1, y1, x2, y2
boxes: list = None,
# opencv image
full_frame: np.ndarray = None,
# run_scale is the scale of the image that was used to run the model
# So the coordinates will be scaled up to the view frame size
run_scale: float = None,
# view_scale is the scale of the image, in relation to the full frame
# So the coordinates will be scaled appropriately when coming from run_frame
view_scale: float = None,
font: int = cv2.FONT_HERSHEY_SIMPLEX,
):
view_frame = cv2.resize(full_frame, (0, 0), fx=view_scale, fy=view_scale)
for thing in boxes:
cv2.rectangle(
# Image
view_frame,
# Start point
2023-10-14 00:16:55 +01:00
(
int(thing["x1"] * (run_scale / view_scale)),
int(thing["y1"] * (run_scale / view_scale)),
),
# End point
2023-10-14 00:16:55 +01:00
(
int(thing["x2"] * (run_scale / view_scale)),
int(thing["y2"] * (run_scale / view_scale)),
),
# Color
(0, 255, 0),
# Thickness
2,
)
cv2.putText(
# Image
view_frame,
# Text
thing["label"],
# Origin
2023-10-14 00:16:55 +01:00
(
int(thing["x1"] * (run_scale / view_scale)),
int(thing["y1"] * (run_scale / view_scale)),
),
# Font
font,
# Font Scale
1,
# Color
(0, 255, 0),
# Thickness
2023-10-14 00:16:55 +01:00
1,
)
2023-10-14 00:16:55 +01:00
return view_frame
2023-10-14 21:40:36 +01:00
def recognize_face(
path_to_directory: Path = Path("faces"),
# opencv image
run_frame: np.ndarray = None,
) -> np.ndarray:
'''
Accepts a path to a directory of images of faces to be used as a refference
In addition, accepts an opencv image to be used as the frame to be searched
Returns a list of dictionaries, containing a single dictonary as currently only 1 face can be detected in each frame
dict contains the following keys: label, x1, y1, x2, y2
The directory should be structured as follows:
faces/
name/
image1.jpg
image2.jpg
image3.jpg
name2/
image1.jpg
image2.jpg
image3.jpg
(not neccessarily jpgs, but you get the idea)
Point is, `name` is the name of the person in the images in the directory `name`
That name will be used as the label for the face in the frame
'''
# face_dataframes is a vanilla list of dataframes
face_dataframes = DeepFace.find(run_frame, db_path=str(path_to_directory))
# Iteate over the dataframes
for df in face_dataframes:
# The last row is the highest confidence
# So we can just grab the path from there
# iloc = Integer LOCation
path_to_image = Path(df.iloc[-1]["identity"])
# Get the name of the parent directory
label = path_to_image.parent.name
# Return the coordinates of the box in xyxy format, rather than xywh
# This is because YOLO uses xyxy, and that's how plot_label expects
# Also, xyxy is just the top left and bottom right corners of the box
coordinates = {
"x1": df.iloc[-1]["source_x"],
"y1": df.iloc[-1]["source_y"],
"x2": df.iloc[-1]["source_x"] + df.iloc[-1]["source_w"],
"y2": df.iloc[-1]["source_y"] + df.iloc[-1]["source_h"],
}
return [dict(label=label, **coordinates)]
'''
Example dataframe, for reference
identity (path to image) | source_x | source_y | source_w | source_h | VGG-Face_cosine (pretty much the confidence \_('_')_/)
'''