Manually plot boxes, rather than using `r.plot()`
This commit is contained in:
parent
ccf560ab2a
commit
6dfb06989f
|
@ -1 +1 @@
|
||||||
3.10.5
|
3.10.5
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
{
|
{
|
||||||
// Use IntelliSense to learn about possible attributes.
|
// Use IntelliSense to learn about possible attributes.
|
||||||
// Hover to view descriptions of existing attributes.
|
// Hover to view descriptions of existing attributes.
|
||||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||||
"version": "0.2.0",
|
"version": "0.2.0",
|
||||||
"configurations": [
|
"configurations": [
|
||||||
{
|
{
|
||||||
"name": "Python: Module",
|
"name": "Python: Module",
|
||||||
"type": "python",
|
"type": "python",
|
||||||
"request": "launch",
|
"request": "launch",
|
||||||
"module": "src",
|
"module": "src",
|
||||||
"justMyCode": true
|
"justMyCode": true
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
File diff suppressed because it is too large
Load Diff
118
pyproject.toml
118
pyproject.toml
|
@ -1,59 +1,59 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "set-detect-notify"
|
name = "set-detect-notify"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
description = "Detect all the things"
|
description = "Detect all the things"
|
||||||
authors = ["slashtechno <77907286+slashtechno@users.noreply.github.com>"]
|
authors = ["slashtechno <77907286+slashtechno@users.noreply.github.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
packages = [{include = "set-detect-notify"}]
|
packages = [{include = "set-detect-notify"}]
|
||||||
|
|
||||||
[tool.poetry.dependencies]
|
[tool.poetry.dependencies]
|
||||||
python = "^3.10"
|
python = "^3.10"
|
||||||
python-dotenv = "^1.0.0"
|
python-dotenv = "^1.0.0"
|
||||||
httpx = "^0.25.0"
|
httpx = "^0.25.0"
|
||||||
opencv-python = "^4.8.1.78"
|
opencv-python = "^4.8.1.78"
|
||||||
ultralytics = "^8.0.190"
|
ultralytics = "^8.0.190"
|
||||||
hjson = "^3.1.0"
|
hjson = "^3.1.0"
|
||||||
numpy = "^1.23.2"
|
numpy = "^1.23.2"
|
||||||
# torch = [
|
# torch = [
|
||||||
# { version = "^2.0.0+cu118", source = "torch_cu118", markers = "extra=='cuda'" },
|
# { version = "^2.0.0+cu118", source = "torch_cu118", markers = "extra=='cuda'" },
|
||||||
# { version = "^2.0.0+cpu", source = "torch_cpu", markers = "extra!='cuda'" },
|
# { version = "^2.0.0+cpu", source = "torch_cpu", markers = "extra!='cuda'" },
|
||||||
# ]
|
# ]
|
||||||
# torchaudio = [
|
# torchaudio = [
|
||||||
# { version = "^2.0.0+cu118", source = "torch_cu118", markers = "extra=='cuda'" },
|
# { version = "^2.0.0+cu118", source = "torch_cu118", markers = "extra=='cuda'" },
|
||||||
# { version = "^2.0.0+cpu", source = "torch_cpu", markers = "extra!='cuda'" },
|
# { version = "^2.0.0+cpu", source = "torch_cpu", markers = "extra!='cuda'" },
|
||||||
# ]
|
# ]
|
||||||
# torchvision = [
|
# torchvision = [
|
||||||
# { version = "^0.15+cu118", source = "torch_cu118", markers = "extra=='cuda'" },
|
# { version = "^0.15+cu118", source = "torch_cu118", markers = "extra=='cuda'" },
|
||||||
# { version = "^0.15+cpu", source = "torch_cpu", markers = "extra!='cuda'" },
|
# { version = "^0.15+cpu", source = "torch_cpu", markers = "extra!='cuda'" },
|
||||||
# ]
|
# ]
|
||||||
torch = "^2.1.0"
|
torch = "^2.1.0"
|
||||||
|
|
||||||
|
|
||||||
[tool.poetry.group.dev.dependencies]
|
[tool.poetry.group.dev.dependencies]
|
||||||
black = "^23.9.1"
|
black = "^23.9.1"
|
||||||
ruff = "^0.0.291"
|
ruff = "^0.0.291"
|
||||||
ipykernel = "^6.25.2"
|
ipykernel = "^6.25.2"
|
||||||
|
|
||||||
|
|
||||||
# [[tool.poetry.source]]
|
# [[tool.poetry.source]]
|
||||||
# name = "torch_cpu"
|
# name = "torch_cpu"
|
||||||
# url = "https://download.pytorch.org/whl/cpu"
|
# url = "https://download.pytorch.org/whl/cpu"
|
||||||
# priority = "supplemental"
|
# priority = "supplemental"
|
||||||
#
|
#
|
||||||
# [[tool.poetry.source]]
|
# [[tool.poetry.source]]
|
||||||
# name = "torch_cu118"
|
# name = "torch_cu118"
|
||||||
# url = "https://download.pytorch.org/whl/cu118"
|
# url = "https://download.pytorch.org/whl/cu118"
|
||||||
# priority = "supplemental"
|
# priority = "supplemental"
|
||||||
#
|
#
|
||||||
# [tool.poetry.extras]
|
# [tool.poetry.extras]
|
||||||
# cuda = []
|
# cuda = []
|
||||||
#
|
#
|
||||||
# [[tool.poetry.source]]
|
# [[tool.poetry.source]]
|
||||||
# name = "PyPI"
|
# name = "PyPI"
|
||||||
# priority = "primary"
|
# priority = "primary"
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = ["poetry-core"]
|
requires = ["poetry-core"]
|
||||||
build-backend = "poetry.core.masonry.api"
|
build-backend = "poetry.core.masonry.api"
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@ from ultralytics import YOLO
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
from .utils import notify
|
from .utils import notify
|
||||||
|
from .utils import utils
|
||||||
|
|
||||||
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
|
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||||
args = None
|
args = None
|
||||||
|
@ -48,13 +49,13 @@ def main():
|
||||||
type=float,
|
type=float,
|
||||||
help="The scale to run the detection at, default is 0.25",
|
help="The scale to run the detection at, default is 0.25",
|
||||||
)
|
)
|
||||||
# argparser.add_argument(
|
argparser.add_argument(
|
||||||
# '--view-scale',
|
'--view-scale',
|
||||||
# # Set it to the env VIEW_SCALE if it isn't blank, otherwise set it to 0.75
|
# Set it to the env VIEW_SCALE if it isn't blank, otherwise set it to 0.75
|
||||||
# default=os.environ['VIEW_SCALE'] if 'VIEW_SCALE' in os.environ and os.environ['VIEW_SCALE'] != '' else 0.75, # noqa: E501
|
default=os.environ['VIEW_SCALE'] if 'VIEW_SCALE' in os.environ and os.environ['VIEW_SCALE'] != '' else 0.75, # noqa: E501
|
||||||
# type=float,
|
type=float,
|
||||||
# help="The scale to view the detection at, default is 0.75",
|
help="The scale to view the detection at, default is 0.75",
|
||||||
# )
|
)
|
||||||
|
|
||||||
argparser.add_argument(
|
argparser.add_argument(
|
||||||
"--confidence-threshold",
|
"--confidence-threshold",
|
||||||
|
@ -160,10 +161,12 @@ def main():
|
||||||
# Only process every other frame of video to save time
|
# Only process every other frame of video to save time
|
||||||
# Resize frame of video to a smaller size for faster recognition processing
|
# Resize frame of video to a smaller size for faster recognition processing
|
||||||
run_frame = cv2.resize(frame, (0, 0), fx=args.run_scale, fy=args.run_scale)
|
run_frame = cv2.resize(frame, (0, 0), fx=args.run_scale, fy=args.run_scale)
|
||||||
# view_frame = cv2.resize(frame, (0, 0), fx=`args.`view_scale, fy=args.view_scale)
|
# view_frame = cv2.resize(frame, (0, 0), fx=args.view_scale, fy=args.view_scale)
|
||||||
|
|
||||||
results = model(run_frame, verbose=False)
|
results = model(run_frame, verbose=False)
|
||||||
for r in results:
|
for i, r in enumerate(results):
|
||||||
|
# list of dicts with each dict containing a label, x1, y1, x2, y2
|
||||||
|
plot_boxes = []
|
||||||
# Setup dictionary of object names
|
# Setup dictionary of object names
|
||||||
if not object_names:
|
if not object_names:
|
||||||
for name in r.names.values():
|
for name in r.names.values():
|
||||||
|
@ -174,6 +177,7 @@ def main():
|
||||||
"last_notification_time": None,
|
"last_notification_time": None,
|
||||||
}
|
}
|
||||||
for box in r.boxes:
|
for box in r.boxes:
|
||||||
|
|
||||||
# Get the name of the object
|
# Get the name of the object
|
||||||
class_id = r.names[box.cls[0].item()]
|
class_id = r.names[box.cls[0].item()]
|
||||||
# Get the coordinates of the object
|
# Get the coordinates of the object
|
||||||
|
@ -192,6 +196,18 @@ def main():
|
||||||
# If the confidence is less than 0.5, then SKIP!!!!
|
# If the confidence is less than 0.5, then SKIP!!!!
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
||||||
|
# Add the object to the list of objects to plot
|
||||||
|
plot_boxes.append(
|
||||||
|
{
|
||||||
|
"label": class_id,
|
||||||
|
"x1": cords[0],
|
||||||
|
"y1": cords[1],
|
||||||
|
"x2": cords[2],
|
||||||
|
"y2": cords[3],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
# End goal: Send a notification when an object has been detected for 2 seconds in the past 15 seconds.
|
# End goal: Send a notification when an object has been detected for 2 seconds in the past 15 seconds.
|
||||||
# However, don't send a notification if the last notification was less than 15 seconds ago
|
# However, don't send a notification if the last notification was less than 15 seconds ago
|
||||||
|
|
||||||
|
@ -265,13 +281,17 @@ def main():
|
||||||
# Reset the detection duration
|
# Reset the detection duration
|
||||||
print("Just sent a notification - resetting detection duration")
|
print("Just sent a notification - resetting detection duration")
|
||||||
object_names[class_id]["detection_duration"] = 0
|
object_names[class_id]["detection_duration"] = 0
|
||||||
im_array = r.plot()
|
frame_to_show = utils.plot_label(
|
||||||
# Scale back up the coordinates of the locations of detected objects.
|
boxes=plot_boxes,
|
||||||
# im_array = np.multiply(im_array, 1/args.run_scale)
|
full_frame=frame,
|
||||||
# print(type(im_array))
|
# full_frame=r.plot(),
|
||||||
# print(im_array)
|
run_scale=args.run_scale,
|
||||||
# exit()
|
view_scale=args.view_scale,
|
||||||
cv2.imshow("View", im_array)
|
)
|
||||||
|
|
||||||
|
# Display the resulting frame
|
||||||
|
# cv2.imshow("", r)
|
||||||
|
cv2.imshow(f"Video{i}", frame_to_show)
|
||||||
|
|
||||||
# Hit 'q' on the keyboard to quit!
|
# Hit 'q' on the keyboard to quit!
|
||||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||||
|
|
|
@ -1,32 +1,32 @@
|
||||||
import datetime
|
import datetime
|
||||||
import httpx
|
import httpx
|
||||||
|
|
||||||
|
|
||||||
def construct_ntfy_headers(
|
def construct_ntfy_headers(
|
||||||
title: str = "Object/Person Detected",
|
title: str = "Object/Person Detected",
|
||||||
tag="rotating_light", # https://docs.ntfy.sh/publish/#tags-emojis
|
tag="rotating_light", # https://docs.ntfy.sh/publish/#tags-emojis
|
||||||
priority="default", # https://docs.ntfy.sh/publish/#message-priority
|
priority="default", # https://docs.ntfy.sh/publish/#message-priority
|
||||||
) -> dict:
|
) -> dict:
|
||||||
return {"Title": title, "Priority": priority, "Tags": tag}
|
return {"Title": title, "Priority": priority, "Tags": tag}
|
||||||
|
|
||||||
|
|
||||||
def send_notification(data: str, headers: dict, url: str):
|
def send_notification(data: str, headers: dict, url: str):
|
||||||
if url is None or data is None:
|
if url is None or data is None:
|
||||||
raise ValueError("url and data cannot be None")
|
raise ValueError("url and data cannot be None")
|
||||||
httpx.post(url, data=data.encode("utf-8"), headers=headers)
|
httpx.post(url, data=data.encode("utf-8"), headers=headers)
|
||||||
|
|
||||||
|
|
||||||
def check_last_seen(last_seen: datetime.datetime, seconds: int = 15):
|
def check_last_seen(last_seen: datetime.datetime, seconds: int = 15):
|
||||||
"""
|
"""
|
||||||
Check if a time is older than a given number of seconds
|
Check if a time is older than a given number of seconds
|
||||||
If it is, return True
|
If it is, return True
|
||||||
If last_seen is empty/null, return True
|
If last_seen is empty/null, return True
|
||||||
"""
|
"""
|
||||||
if (
|
if (
|
||||||
datetime.datetime.now() - last_seen > datetime.timedelta(seconds=seconds)
|
datetime.datetime.now() - last_seen > datetime.timedelta(seconds=seconds)
|
||||||
or last_seen == ""
|
or last_seen == ""
|
||||||
or last_seen is None
|
or last_seen is None
|
||||||
):
|
):
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
def plot_label(
|
||||||
|
# list of dicts with each dict containing a label, x1, y1, x2, y2
|
||||||
|
boxes: list = None,
|
||||||
|
# opencv image
|
||||||
|
full_frame: np.ndarray = None,
|
||||||
|
# run_scale is the scale of the image that was used to run the model
|
||||||
|
# So the coordinates will be scaled up to the view frame size
|
||||||
|
run_scale: float = None,
|
||||||
|
# view_scale is the scale of the image, in relation to the full frame
|
||||||
|
# So the coordinates will be scaled appropriately when coming from run_frame
|
||||||
|
view_scale: float = None,
|
||||||
|
font: int = cv2.FONT_HERSHEY_SIMPLEX,
|
||||||
|
):
|
||||||
|
view_frame = cv2.resize(full_frame, (0, 0), fx=view_scale, fy=view_scale)
|
||||||
|
for thing in boxes:
|
||||||
|
cv2.rectangle(
|
||||||
|
# Image
|
||||||
|
view_frame,
|
||||||
|
# Start point
|
||||||
|
(int(thing["x1"] * (run_scale/view_scale)), int(thing["y1"] * (run_scale/view_scale))),
|
||||||
|
# End point
|
||||||
|
(int(thing["x2"] * (run_scale/view_scale)), int(thing["y2"] * (run_scale/view_scale))),
|
||||||
|
# Color
|
||||||
|
(0, 255, 0),
|
||||||
|
# Thickness
|
||||||
|
2,
|
||||||
|
)
|
||||||
|
cv2.putText(
|
||||||
|
# Image
|
||||||
|
view_frame,
|
||||||
|
# Text
|
||||||
|
thing["label"],
|
||||||
|
# Origin
|
||||||
|
(int(thing["x1"] * (run_scale/view_scale)), int(thing["y1"] * (run_scale/view_scale))),
|
||||||
|
# Font
|
||||||
|
font,
|
||||||
|
# Font Scale
|
||||||
|
1,
|
||||||
|
# Color
|
||||||
|
(0, 255, 0),
|
||||||
|
# Thickness
|
||||||
|
1
|
||||||
|
)
|
||||||
|
return view_frame
|
Loading…
Reference in New Issue