wyzely-detect/wyzely_detect/__main__.py

104 lines
3.5 KiB
Python
Raw Normal View History

2023-10-02 01:56:40 +01:00
# import face_recognition
from pathlib import Path
2023-10-27 16:54:36 +01:00
import cv2
2023-10-05 03:03:11 +01:00
2023-10-02 01:56:40 +01:00
# import hjson as json
import torch
from ultralytics import YOLO
from .utils import utils
2023-10-27 16:54:36 +01:00
from .utils.cli_args import argparser
2023-10-02 01:56:40 +01:00
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
args = None
2023-10-05 03:03:11 +01:00
2023-10-02 01:56:40 +01:00
def main():
2023-10-14 21:40:36 +01:00
global objects_and_peoples
global args
2023-10-02 01:56:40 +01:00
# RUN_BY_COMPOSE = os.getenv("RUN_BY_COMPOSE") # Replace this with code to check for gpu
args = argparser.parse_args()
# Check if a CUDA GPU is available. If it is, set it via torch. If not, set it to cpu
2023-10-02 01:56:40 +01:00
# https://github.com/ultralytics/ultralytics/issues/3084#issuecomment-1732433168
2023-10-05 03:03:11 +01:00
# Currently, I have been unable to set up Poetry to use GPU for Torch
for i in range(torch.cuda.device_count()):
print(f'Using {torch.cuda.get_device_properties(i).name} for pytorch')
if torch.cuda.is_available():
2023-10-02 01:56:40 +01:00
torch.cuda.set_device(0)
print("Set CUDA device")
2023-10-05 03:03:11 +01:00
else:
2023-10-02 01:56:40 +01:00
print("No CUDA device available, using CPU")
# Seems automatically, deepface (tensorflow) tried to use my GPU on Pop!_OS (I did not set up cudnn or anything)
# Not sure the best way, in Poetry, to manage GPU libraries so for now, just use CPU
if args.force_disable_tensorflow_gpu:
print("Forcing tensorflow to use CPU")
import tensorflow as tf
tf.config.set_visible_devices([], 'GPU')
if tf.config.experimental.list_logical_devices('GPU'):
print('GPU disabled unsuccessfully')
else:
print("GPU disabled successfully")
2023-10-05 03:03:11 +01:00
2023-10-02 01:56:40 +01:00
model = YOLO("yolov8n.pt")
2023-10-06 00:40:53 +01:00
# Depending on if the user wants to use a stream or a capture device,
# Set the video capture to the appropriate source
if args.rtsp_url is not None:
video_capture = cv2.VideoCapture(args.rtsp_url)
2023-10-06 00:40:53 +01:00
else:
video_capture = cv2.VideoCapture(args.capture_device)
2023-10-02 01:56:40 +01:00
# Eliminate lag by setting the buffer size to 1
# This makes it so that the video capture will only grab the most recent frame
# However, this means that the video may be choppy
video_capture.set(cv2.CAP_PROP_BUFFERSIZE, 1)
# Print the resolution of the video
print(
f"Video resolution: {video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)}x{video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)}" # noqa: E501
)
print("Beginning video capture...")
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
frame_to_show = utils.process_footage(
frame = frame,
run_scale = args.run_scale,
view_scale = args.view_scale,
faces_directory=Path(args.faces_directory),
face_confidence_threshold=args.face_confidence_threshold,
no_remove_representations=args.no_remove_representations,
detection_window=args.detection_window,
detection_duration=args.detection_duration,
notification_window=args.notification_window,
ntfy_url=args.ntfy_url,
model=model,
detect_object=args.detect_object,
object_confidence_threshold=args.object_confidence_threshold,
)
# Display the resulting frame
# TODO: When multi-camera support is added, this needs to be changed to allow all feeds
if not args.no_display:
cv2.imshow("Video", frame_to_show)
2023-10-02 01:56:40 +01:00
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord("q"):
break
# Release handle to the webcam
print("Releasing video capture")
video_capture.release()
cv2.destroyAllWindows()
2023-10-15 01:25:27 +01:00
2023-10-14 21:46:42 +01:00
if __name__ == "__main__":
main()