2023-10-02 01:56:40 +01:00
|
|
|
# import face_recognition
|
|
|
|
from pathlib import Path
|
2023-12-03 03:23:42 +00:00
|
|
|
import os
|
2023-10-27 16:54:36 +01:00
|
|
|
import cv2
|
2023-10-05 03:03:11 +01:00
|
|
|
|
2023-10-02 01:56:40 +01:00
|
|
|
# import hjson as json
|
|
|
|
import torch
|
|
|
|
from ultralytics import YOLO
|
|
|
|
|
2023-10-27 16:54:36 +01:00
|
|
|
from .utils import notify, utils
|
|
|
|
from .utils.cli_args import argparser
|
2023-10-02 01:56:40 +01:00
|
|
|
|
|
|
|
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
|
|
|
|
args = None
|
|
|
|
|
2023-10-14 21:40:36 +01:00
|
|
|
objects_and_peoples = {
|
|
|
|
"objects": {},
|
|
|
|
"peoples": {},
|
|
|
|
}
|
2023-10-05 03:03:11 +01:00
|
|
|
|
|
|
|
|
2023-10-02 01:56:40 +01:00
|
|
|
def main():
|
2023-10-14 21:40:36 +01:00
|
|
|
global objects_and_peoples
|
2023-10-02 01:56:40 +01:00
|
|
|
global args
|
|
|
|
# RUN_BY_COMPOSE = os.getenv("RUN_BY_COMPOSE") # Replace this with code to check for gpu
|
|
|
|
|
|
|
|
args = argparser.parse_args()
|
|
|
|
|
2023-10-22 22:53:29 +01:00
|
|
|
# Check if a CUDA GPU is available. If it is, set it via torch. If not, set it to cpu
|
2023-10-02 01:56:40 +01:00
|
|
|
# https://github.com/ultralytics/ultralytics/issues/3084#issuecomment-1732433168
|
2023-10-05 03:03:11 +01:00
|
|
|
# Currently, I have been unable to set up Poetry to use GPU for Torch
|
2023-10-04 03:03:39 +01:00
|
|
|
for i in range(torch.cuda.device_count()):
|
2023-12-03 03:23:42 +00:00
|
|
|
print(f'Using {torch.cuda.get_device_properties(i).name} for pytorch')
|
2023-10-04 03:03:39 +01:00
|
|
|
if torch.cuda.is_available():
|
2023-10-02 01:56:40 +01:00
|
|
|
torch.cuda.set_device(0)
|
|
|
|
print("Set CUDA device")
|
2023-10-05 03:03:11 +01:00
|
|
|
else:
|
2023-10-02 01:56:40 +01:00
|
|
|
print("No CUDA device available, using CPU")
|
2023-12-03 03:23:42 +00:00
|
|
|
# Seems automatically, deepface (tensorflow) tried to use my GPU on Pop!_OS (I did not set up cudnn or anything)
|
|
|
|
# Not sure the best way, in Poetry, to manage GPU libraries so for now, just use CPU
|
|
|
|
if args.force_disable_tensorflow_gpu:
|
|
|
|
print("Forcing tensorflow to use CPU")
|
|
|
|
import tensorflow as tf
|
|
|
|
tf.config.set_visible_devices([], 'GPU')
|
|
|
|
if tf.config.experimental.list_logical_devices('GPU'):
|
|
|
|
print('GPU disabled unsuccessfully')
|
|
|
|
else:
|
|
|
|
print("GPU disabled successfully")
|
2023-10-05 03:03:11 +01:00
|
|
|
|
2023-10-02 01:56:40 +01:00
|
|
|
model = YOLO("yolov8n.pt")
|
|
|
|
|
2023-10-06 00:40:53 +01:00
|
|
|
# Depending on if the user wants to use a stream or a capture device,
|
|
|
|
# Set the video capture to the appropriate source
|
2023-10-27 18:00:17 +01:00
|
|
|
if args.rtsp_url is not None:
|
|
|
|
video_capture = cv2.VideoCapture(args.rtsp_url)
|
2023-10-06 00:40:53 +01:00
|
|
|
else:
|
|
|
|
video_capture = cv2.VideoCapture(args.capture_device)
|
|
|
|
|
2023-10-02 01:56:40 +01:00
|
|
|
# Eliminate lag by setting the buffer size to 1
|
|
|
|
# This makes it so that the video capture will only grab the most recent frame
|
|
|
|
# However, this means that the video may be choppy
|
|
|
|
video_capture.set(cv2.CAP_PROP_BUFFERSIZE, 1)
|
|
|
|
|
|
|
|
# Print the resolution of the video
|
|
|
|
print(
|
|
|
|
f"Video resolution: {video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)}x{video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)}" # noqa: E501
|
|
|
|
)
|
|
|
|
|
|
|
|
print("Beginning video capture...")
|
|
|
|
while True:
|
|
|
|
# Grab a single frame of video
|
|
|
|
ret, frame = video_capture.read()
|
|
|
|
# Resize frame of video to a smaller size for faster recognition processing
|
|
|
|
run_frame = cv2.resize(frame, (0, 0), fx=args.run_scale, fy=args.run_scale)
|
2023-10-13 23:44:38 +01:00
|
|
|
# view_frame = cv2.resize(frame, (0, 0), fx=args.view_scale, fy=args.view_scale)
|
2023-10-05 03:03:11 +01:00
|
|
|
|
2023-10-04 03:03:39 +01:00
|
|
|
results = model(run_frame, verbose=False)
|
2023-10-22 18:02:07 +01:00
|
|
|
|
|
|
|
path_to_faces = Path(args.faces_directory)
|
2023-10-22 22:54:30 +01:00
|
|
|
path_to_faces_exists = path_to_faces.is_dir()
|
2023-10-22 18:02:07 +01:00
|
|
|
|
2023-10-13 23:44:38 +01:00
|
|
|
for i, r in enumerate(results):
|
|
|
|
# list of dicts with each dict containing a label, x1, y1, x2, y2
|
|
|
|
plot_boxes = []
|
2023-10-14 23:37:42 +01:00
|
|
|
|
|
|
|
# The following is stuff for people
|
|
|
|
# This is still in the for loop as each result, no matter if anything is detected, will be present.
|
|
|
|
# Thus, there will always be one result (r)
|
2023-10-22 18:02:07 +01:00
|
|
|
|
|
|
|
# Only run if path_to_faces exists
|
|
|
|
# May be better to check every iteration, but this also works
|
|
|
|
if path_to_faces_exists:
|
|
|
|
if face_details := utils.recognize_face(
|
2023-10-27 17:33:05 +01:00
|
|
|
path_to_directory=path_to_faces,
|
|
|
|
run_frame=run_frame,
|
|
|
|
min_confidence=args.face_confidence_threshold,
|
|
|
|
no_remove_representations=args.no_remove_representations,
|
2023-10-22 18:02:07 +01:00
|
|
|
):
|
|
|
|
plot_boxes.append(face_details)
|
|
|
|
objects_and_peoples = notify.thing_detected(
|
|
|
|
thing_name=face_details["label"],
|
|
|
|
objects_and_peoples=objects_and_peoples,
|
|
|
|
detection_type="peoples",
|
|
|
|
detection_window=args.detection_window,
|
|
|
|
detection_duration=args.detection_duration,
|
|
|
|
notification_window=args.notification_window,
|
|
|
|
ntfy_url=args.ntfy_url,
|
|
|
|
)
|
2023-10-14 23:37:42 +01:00
|
|
|
|
|
|
|
# The following is stuff for objects
|
2023-10-05 03:03:11 +01:00
|
|
|
# Setup dictionary of object names
|
2023-10-15 01:25:27 +01:00
|
|
|
if (
|
|
|
|
objects_and_peoples["objects"] == {}
|
|
|
|
or objects_and_peoples["objects"] is None
|
|
|
|
):
|
2023-10-05 03:03:11 +01:00
|
|
|
for name in r.names.values():
|
2023-10-14 21:40:36 +01:00
|
|
|
objects_and_peoples["objects"][name] = {
|
2023-10-05 03:03:11 +01:00
|
|
|
"last_detection_time": None,
|
|
|
|
"detection_duration": None,
|
|
|
|
# "first_detection_time": None,
|
2023-10-06 01:12:42 +01:00
|
|
|
"last_notification_time": None,
|
2023-10-06 02:00:12 +01:00
|
|
|
}
|
2023-10-14 21:40:36 +01:00
|
|
|
# Also, make sure that the objects to detect are in the list of objects_and_peoples
|
2023-10-14 00:13:46 +01:00
|
|
|
# If it isn't, print a warning
|
|
|
|
for obj in args.detect_object:
|
2023-10-14 21:40:36 +01:00
|
|
|
if obj not in objects_and_peoples:
|
2023-10-14 00:16:55 +01:00
|
|
|
print(
|
|
|
|
f"Warning: {obj} is not in the list of objects the model can detect!"
|
|
|
|
)
|
|
|
|
|
2023-10-04 03:03:39 +01:00
|
|
|
for box in r.boxes:
|
|
|
|
# Get the name of the object
|
|
|
|
class_id = r.names[box.cls[0].item()]
|
|
|
|
# Get the coordinates of the object
|
|
|
|
cords = box.xyxy[0].tolist()
|
|
|
|
cords = [round(x) for x in cords]
|
|
|
|
# Get the confidence
|
|
|
|
conf = round(box.conf[0].item(), 2)
|
|
|
|
# Print it out, adding a spacer between each object
|
2023-10-05 03:03:11 +01:00
|
|
|
# print("Object type:", class_id)
|
|
|
|
# print("Coordinates:", cords)
|
|
|
|
# print("Probability:", conf)
|
|
|
|
# print("---")
|
|
|
|
|
2023-10-06 02:00:12 +01:00
|
|
|
# Now do stuff (if conf > 0.5)
|
2023-10-27 17:23:44 +01:00
|
|
|
if conf < args.object_confidence_threshold or (
|
2023-10-14 00:16:55 +01:00
|
|
|
class_id not in args.detect_object and args.detect_object != []
|
|
|
|
):
|
2023-10-14 00:13:46 +01:00
|
|
|
# If the confidence is too low
|
|
|
|
# or if the object is not in the list of objects to detect and the list of objects to detect is not empty
|
|
|
|
# then skip this iteration
|
2023-10-06 02:00:12 +01:00
|
|
|
continue
|
|
|
|
|
2023-10-13 23:44:38 +01:00
|
|
|
# Add the object to the list of objects to plot
|
|
|
|
plot_boxes.append(
|
|
|
|
{
|
|
|
|
"label": class_id,
|
|
|
|
"x1": cords[0],
|
|
|
|
"y1": cords[1],
|
|
|
|
"x2": cords[2],
|
|
|
|
"y2": cords[3],
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2023-10-15 01:25:27 +01:00
|
|
|
objects_and_peoples = notify.thing_detected(
|
2023-10-14 21:40:36 +01:00
|
|
|
thing_name=class_id,
|
|
|
|
objects_and_peoples=objects_and_peoples,
|
|
|
|
detection_type="objects",
|
|
|
|
detection_window=args.detection_window,
|
|
|
|
detection_duration=args.detection_duration,
|
|
|
|
notification_window=args.notification_window,
|
|
|
|
ntfy_url=args.ntfy_url,
|
|
|
|
)
|
2023-10-05 03:03:11 +01:00
|
|
|
|
2023-10-15 01:25:27 +01:00
|
|
|
# To debug plotting, use r.plot() to cross reference the bounding boxes drawn by the plot_label() and r.plot()
|
2023-10-13 23:44:38 +01:00
|
|
|
frame_to_show = utils.plot_label(
|
|
|
|
boxes=plot_boxes,
|
|
|
|
full_frame=frame,
|
|
|
|
# full_frame=r.plot(),
|
|
|
|
run_scale=args.run_scale,
|
|
|
|
view_scale=args.view_scale,
|
|
|
|
)
|
2023-10-14 00:16:55 +01:00
|
|
|
|
2023-10-13 23:44:38 +01:00
|
|
|
# Display the resulting frame
|
|
|
|
# cv2.imshow("", r)
|
2023-10-15 04:31:48 +01:00
|
|
|
if not args.no_display:
|
|
|
|
cv2.imshow(f"Video{i}", frame_to_show)
|
2023-10-02 01:56:40 +01:00
|
|
|
|
|
|
|
# Hit 'q' on the keyboard to quit!
|
|
|
|
if cv2.waitKey(1) & 0xFF == ord("q"):
|
|
|
|
break
|
|
|
|
|
|
|
|
# Release handle to the webcam
|
|
|
|
print("Releasing video capture")
|
|
|
|
video_capture.release()
|
|
|
|
cv2.destroyAllWindows()
|
|
|
|
|
2023-10-15 01:25:27 +01:00
|
|
|
|
2023-10-14 21:46:42 +01:00
|
|
|
if __name__ == "__main__":
|
|
|
|
main()
|