Compare commits
No commits in common. "6928fdace5b932959942e95424d797d033391956" and "b5d95ed963cac67f4aac8ef480b738bdecd4880b" have entirely different histories.
6928fdace5
...
b5d95ed963
|
@ -22,16 +22,6 @@
|
|||
"module": "wyzely_detect",
|
||||
// "justMyCode": true
|
||||
"justMyCode": false
|
||||
},
|
||||
{
|
||||
"name": "Debug --help",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"module": "wyzely_detect",
|
||||
"args": [
|
||||
"--help"
|
||||
],
|
||||
"justMyCode": false
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
# import face_recognition
|
||||
from pathlib import Path
|
||||
import os
|
||||
|
||||
import cv2
|
||||
|
||||
# import hjson as json
|
||||
|
@ -30,22 +30,12 @@ def main():
|
|||
# https://github.com/ultralytics/ultralytics/issues/3084#issuecomment-1732433168
|
||||
# Currently, I have been unable to set up Poetry to use GPU for Torch
|
||||
for i in range(torch.cuda.device_count()):
|
||||
print(f'Using {torch.cuda.get_device_properties(i).name} for pytorch')
|
||||
print(torch.cuda.get_device_properties(i).name)
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.set_device(0)
|
||||
print("Set CUDA device")
|
||||
else:
|
||||
print("No CUDA device available, using CPU")
|
||||
# Seems automatically, deepface (tensorflow) tried to use my GPU on Pop!_OS (I did not set up cudnn or anything)
|
||||
# Not sure the best way, in Poetry, to manage GPU libraries so for now, just use CPU
|
||||
if args.force_disable_tensorflow_gpu:
|
||||
print("Forcing tensorflow to use CPU")
|
||||
import tensorflow as tf
|
||||
tf.config.set_visible_devices([], 'GPU')
|
||||
if tf.config.experimental.list_logical_devices('GPU'):
|
||||
print('GPU disabled unsuccessfully')
|
||||
else:
|
||||
print("GPU disabled successfully")
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
||||
|
@ -70,6 +60,7 @@ def main():
|
|||
while True:
|
||||
# Grab a single frame of video
|
||||
ret, frame = video_capture.read()
|
||||
# Only process every other frame of video to save time
|
||||
# Resize frame of video to a smaller size for faster recognition processing
|
||||
run_frame = cv2.resize(frame, (0, 0), fx=args.run_scale, fy=args.run_scale)
|
||||
# view_frame = cv2.resize(frame, (0, 0), fx=args.view_scale, fy=args.view_scale)
|
||||
|
|
|
@ -72,15 +72,7 @@ def set_argparse():
|
|||
action="store_true",
|
||||
help="Don't display the video feed",
|
||||
)
|
||||
video_options.add_argument(
|
||||
'-c',
|
||||
'--force-disable-tensorflow-gpu',
|
||||
default=os.environ["FORCE_DISABLE_TENSORFLOW_GPU"]
|
||||
if "FORCE_DISABLE_TENSORFLOW_GPU" in os.environ and os.environ["FORCE_DISABLE_TENSORFLOW_GPU"] != ""
|
||||
else False,
|
||||
action="store_true",
|
||||
help="Force disable tensorflow GPU through env since sometimes it's not worth it to install cudnn and whatnot",
|
||||
)
|
||||
|
||||
|
||||
notifcation_services = argparser.add_argument_group("Notification Services")
|
||||
notifcation_services.add_argument(
|
||||
|
|
|
@ -124,21 +124,13 @@ def recognize_face(
|
|||
model_name="ArcFace",
|
||||
detector_backend="opencv",
|
||||
)
|
||||
|
||||
except (ValueError) as e:
|
||||
except ValueError as e:
|
||||
if (
|
||||
str(e)
|
||||
== "Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False." # noqa: E501
|
||||
):
|
||||
# print("No faces recognized") # For debugging
|
||||
return None
|
||||
elif (
|
||||
# Check if the error message contains "Validate .jpg or .png files exist in this path."
|
||||
"Validate .jpg or .png files exist in this path." in str(e)
|
||||
):
|
||||
# If a verbose/silent flag is added, this should be changed to print only if verbose is true
|
||||
# print("No faces found in database")
|
||||
return None
|
||||
else:
|
||||
raise e
|
||||
# Iteate over the dataframes
|
||||
|
|
Loading…
Reference in New Issue