Compare commits

..

No commits in common. "494708a37664a6b44642c3595ec2fb9b97a36886" and "d3c157df4d2337512dfc33992187f22ace710fea" have entirely different histories.

6 changed files with 781 additions and 751 deletions

View File

@ -1 +1 @@
3.11.5 3.10.5

View File

@ -49,7 +49,6 @@ This assumes you have Python 3.10 or 3.11 installed
#### Poetry #### Poetry
1. `poetry install` 1. `poetry install`
a. For GPU support, use `poetry install -E cuda --with gpu`
2. `poetry run -- wyzely-detect` 2. `poetry run -- wyzely-detect`
### Configuration ### Configuration
The following are some basic CLI options. Most flags have environment variable equivalents which can be helpful when using Docker. The following are some basic CLI options. Most flags have environment variable equivalents which can be helpful when using Docker.

1458
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -21,12 +21,11 @@ ultralytics = "^8.0.190"
hjson = "^3.1.0" hjson = "^3.1.0"
numpy = "^1.23.2" numpy = "^1.23.2"
# https://github.com/python-poetry/poetry/issues/6409#issuecomment-1911735833 # https://github.com/python-poetry/poetry/issues/6409
# To install with GPU, use poetry install -E cuda --with gpu torch = ">=2.0.0, !=2.0.1, !=2.1.0"
torch = {version = "2.1.*", source = "pytorch-cpu", markers = "extra!='cuda'" }
# https://stackoverflow.com/a/76477590/18270659 # https://stackoverflow.com/a/76477590/18270659
# https://discfuss.tensorflow.org/t/tensorflow-io-gcs-filesystem-with-windows/18849/4 # https://discuss.tensorflow.org/t/tensorflow-io-gcs-filesystem-with-windows/18849/4
# Might be able to remove this version constraint later # Might be able to remove this version constraint later
# Working versions: # Working versions:
# Python version 3.10.12 and 3.10.5 both work # Python version 3.10.12 and 3.10.5 both work
@ -34,33 +33,10 @@ torch = {version = "2.1.*", source = "pytorch-cpu", markers = "extra!='cuda'" }
# cuDNN version - 8.8.1 # cuDNN version - 8.8.1
# Installed from Nvidia website - nvidia-cuda-toolkit is not installed, but default PopOS drivers are installed # Installed from Nvidia website - nvidia-cuda-toolkit is not installed, but default PopOS drivers are installed
tensorflow-io-gcs-filesystem = "0.31.0" tensorflow-io-gcs-filesystem = "0.31.0"
tensorflow = {version = "^2.14.0", markers = "extra!='cuda'"} tensorflow = {version = "^2.14.0", extras = ["and-cuda"]}
deepface = "^0.0.79" deepface = "^0.0.79"
prettytable = "^3.9.0"
[tool.poetry.group.gpu]
optional = true
[tool.poetry.group.gpu.dependencies]
torch = {version = "2.1.*", source = "pytorch-cu121", markers = "extra=='cuda'"}
tensorflow = {version = "^2.14.0", extras = ["and-cuda"], markers = "extra=='cuda'"}
[tool.poetry.extras]
# Might be better to rename this to nocpu since it's more accurate
cuda = []
[[tool.poetry.source]]
name = "pytorch-cpu"
url = "https://download.pytorch.org/whl/cpu"
priority = "explicit"
[[tool.poetry.source]]
name = "pytorch-cu121"
url = "https://download.pytorch.org/whl/cu121"
priority = "explicit"
[tool.poetry.group.dev.dependencies] [tool.poetry.group.dev.dependencies]
black = "^23.9.1" black = "^23.9.1"

View File

@ -1,9 +1,8 @@
# import face_recognition # import face_recognition
from pathlib import Path from pathlib import Path
import cv2 import cv2
import os
from prettytable import PrettyTable from PrettyTable import PrettyTable
# import hjson as json # import hjson as json
import torch import torch
@ -19,7 +18,7 @@ args = None
def main(): def main():
global objects_and_peoples global objects_and_peoples
global args global args
# RUN_BY_COMPOSE = os.getenv("RUN_BY_COMPOSE") # Replace this with code to check for gpu
args = argparser.parse_args() args = argparser.parse_args()
@ -50,9 +49,7 @@ def main():
# Set the video capture to the appropriate source # Set the video capture to the appropriate source
if not args.rtsp_url and not args.capture_device: if not args.rtsp_url and not args.capture_device:
print("No stream or capture device set, defaulting to capture device 0") print("No stream or capture device set, defaulting to capture device 0")
video_sources = { args.capture_device = [0]
"devices": [cv2.VideoCapture(0)]
}
else: else:
video_sources = { video_sources = {
"streams": [cv2.VideoCapture(url) for url in args.rtsp_url], "streams": [cv2.VideoCapture(url) for url in args.rtsp_url],
@ -63,22 +60,13 @@ def main():
# This makes it so that the video capture will only grab the most recent frame # This makes it so that the video capture will only grab the most recent frame
# However, this means that the video may be choppy # However, this means that the video may be choppy
# Only do this for streams # Only do this for streams
try: for stream in video_sources["streams"]:
for stream in video_sources["streams"]: stream.set(cv2.CAP_PROP_BUFFERSIZE, 1)
stream.set(cv2.CAP_PROP_BUFFERSIZE, 1)
# If there are no streams, this will throw a KeyError
except KeyError:
pass
# Print out the resolution of the video sources. Ideally, change this so the device ID/url is also printed
pretty_table = PrettyTable(field_names=["Source Type", "Resolution"])
for source_type, sources in video_sources.items():
for source in sources:
pretty_table.add_row(
[source_type, f"{source.get(cv2.CAP_PROP_FRAME_WIDTH)}x{source.get(cv2.CAP_PROP_FRAME_HEIGHT)}"]
)
print(pretty_table)
# Print the resolution of the video
print(
f"Video resolution: {video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)}x{video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)}" # noqa: E501
)
print print
print("Beginning video capture...") print("Beginning video capture...")
while True: while True:

View File

@ -1,13 +1,8 @@
import cv2 import cv2
import os
import numpy as np import numpy as np
from pathlib import Path from pathlib import Path
# https://stackoverflow.com/a/42121886/18270659 from deepface import DeepFace
os.environ['TF_CPP_MIN_LOG_LEVEL']='3' from . import notify
from deepface import DeepFace # noqa: E402
from . import notify # noqa: E402
first_face_try = True first_face_try = True