Docker is working again

Resorted to reinstalling `torchvision` via `pip`
This commit is contained in:
slashtechno 2024-03-08 16:15:58 -06:00
parent 4c398b9603
commit 771154cbef
Signed by: slashtechno
GPG Key ID: 8EC1D9D9286C2B17
4 changed files with 49 additions and 26 deletions

View File

@ -1,15 +1,19 @@
FROM python:3.10.5-buster
LABEL org.opencontainers.image.description "Dokcer image for running wyzely-detect"
LABEL org.opencontainers.image.description "Docker image for running wyzely-detect"
LABEL org.opencontainers.image.source "https://github.com/slashtechno/wyzely-detect"
RUN apt update && apt install libgl1 -y
RUN pip install poetry
WORKDIR /app
COPY . .
RUN poetry install
ENTRYPOINT ["poetry", "run", "python", "-m", "wyzely_detect"]
RUN poetry run pip uninstall -y torchvision
RUN poetry run pip install torchvision
ENTRYPOINT ["poetry", "run", "python", "-m", "--", "wyzely_detect", "--no-display"]

View File

@ -14,6 +14,7 @@ Recognize faces/objects in a video stream (from a webcam or a security camera) a
### Python
- Camera, either a webcam or a Wyze Cam
- All RTSP feeds _should_ work, however.
- **WSL, by default, does not support USB devices.** It is recommended to natively run this, but it is possible to use it on WSL with streams or some workarounds.
- Python 3.10 or 3.11
- Poetry (optional)
- Windows or Linux
@ -37,20 +38,24 @@ Cloning the repository is not required when installing from PyPi but is required
2. `cd` into the cloned repository
3. Then, either install with [Poetry](https://python-poetry.org/) or run with Docker
#### Docker
1. Modify to `docker-compose.yml` to achieve desired configuration
2. Run in the background with `docker compose up -d
#### Installing from PyPi with pip
#### Installing from PyPi with pip (recommended)
This assumes you have Python 3.10 or 3.11 installed
1. `pip install wyzely-detect`
a. You may need to use `pip3` instead of `pip`
2. `wyzely-detect`
#### Poetry
#### Poetry (best for GPU support)
1. `poetry install`
a. For GPU support, use `poetry install -E cuda --with gpu`
2. `poetry run -- wyzely-detect`
#### Docker
Running with Docker has the benefit of having easier configuration, the ability to run headlessly, and easy setup of Ntfy and [mrlt8/docker-wyze-bridge](https://github.com/mrlt8/docker-wyze-bridge). However, for now, CPU-only is supported. Contributions are welcome to add GPU support. In addition, Docker is tested a less-tested method of running this program.
1. Modify to `docker-compose.yml` to achieve desired configuration
2. Run in the background with `docker compose up -d`
### Configuration
The following are some basic CLI options. Most flags have environment variable equivalents which can be helpful when using Docker.

View File

@ -14,8 +14,11 @@ services:
# - 8888:8888 # HLS
# - 5000:5000 # WEB-UI
environment:
- WYZE_EMAIL=${WYZE_EMAIL} # Replace with wyze email
- WYZE_PASSWORD=${WYZE_PASSWORD} # Replace with wyze password
# This is a simple configuration without 2FA.
# For advanced configuration, including using an API key, see https://github.com/mrlt8/docker-wyze-bridge/wiki/Two-Factor-Authentication
# Either replace the following with your Wyze username and password, or set the environment variables
- WYZE_EMAIL=${WYZE_EMAIL}
- WYZE_PASSWORD=${WYZE_PASSWORD}
networks:
all:
ntfy:
@ -35,12 +38,24 @@ services:
all:
wyzely-detect:
container_name: wyzely-detect
restart: unless-stopped
image: ghcr.io/slashtechno/wyzely-detect:latest
# restart: unless-stopped
# image: ghcr.io/slashtechno/wyzely-detect:latest
# Building from source is also an option
# build:
# context: .
# dockerfile: Dockerfile
build:
context: .
dockerfile: Dockerfile
command:
- "--ntfy-url"
# Replace "wyzely-detect" with the desired notification stream
- "http://ntfy:80/wyzely-detect"
- "--rtsp-url"
# Replace "cv" with the desired rtsp stream
- "rtsp://bridge:8554/cv"
# Example second rtsp stream
# - "--rtsp-url"
# - "rtsp://bridge:8554/camera"
volumes:
- ./faces:/app/faces
networks:

19
poetry.lock generated
View File

@ -1104,13 +1104,13 @@ test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio
[[package]]
name = "ipython"
version = "8.22.1"
version = "8.22.2"
description = "IPython: Productive Interactive Computing"
optional = false
python-versions = ">=3.10"
files = [
{file = "ipython-8.22.1-py3-none-any.whl", hash = "sha256:869335e8cded62ffb6fac8928e5287a05433d6462e3ebaac25f4216474dd6bc4"},
{file = "ipython-8.22.1.tar.gz", hash = "sha256:39c6f9efc079fb19bfb0f17eee903978fe9a290b1b82d68196c641cecb76ea22"},
{file = "ipython-8.22.2-py3-none-any.whl", hash = "sha256:3c86f284c8f3d8f2b6c662f885c4889a91df7cd52056fd02b7d8d6195d7f56e9"},
{file = "ipython-8.22.2.tar.gz", hash = "sha256:2dcaad9049f9056f1fef63514f176c7d41f930daa78d05b82a176202818f2c14"},
]
[package.dependencies]
@ -1676,13 +1676,13 @@ test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=
[[package]]
name = "nbconvert"
version = "7.16.1"
version = "7.16.2"
description = "Converting Jupyter Notebooks (.ipynb files) to other formats. Output formats include asciidoc, html, latex, markdown, pdf, py, rst, script. nbconvert can be used both as a Python library (`import nbconvert`) or as a command line tool (invoked as `jupyter nbconvert ...`)."
optional = false
python-versions = ">=3.8"
files = [
{file = "nbconvert-7.16.1-py3-none-any.whl", hash = "sha256:3188727dffadfdc9c6a1c7250729063d7bc78b355ad7aa023138afa030d1cd07"},
{file = "nbconvert-7.16.1.tar.gz", hash = "sha256:e79e6a074f49ba3ed29428ed86487bf51509d9aab613bd8522ac08f6d28fd7fd"},
{file = "nbconvert-7.16.2-py3-none-any.whl", hash = "sha256:0c01c23981a8de0220255706822c40b751438e32467d6a686e26be08ba784382"},
{file = "nbconvert-7.16.2.tar.gz", hash = "sha256:8310edd41e1c43947e4ecf16614c61469ebc024898eb808cce0999860fc9fb16"},
]
[package.dependencies]
@ -2490,7 +2490,6 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
@ -3506,13 +3505,13 @@ files = [
[[package]]
name = "ultralytics"
version = "8.1.22"
version = "8.1.23"
description = "Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification."
optional = false
python-versions = ">=3.8"
files = [
{file = "ultralytics-8.1.22-py3-none-any.whl", hash = "sha256:219a0e292d5013718883e90b3a5252c08c0094e9d76123b4873a446ed93eb737"},
{file = "ultralytics-8.1.22.tar.gz", hash = "sha256:d3a47863d24cc5c8ea648bb69e8066795d103a0bf1558d8077f5b25d5e000f70"},
{file = "ultralytics-8.1.23-py3-none-any.whl", hash = "sha256:d0df385e5c6a90493ac25a3b18115d756b0e0aa4feb9fa6d196ad0b6c4a393e0"},
{file = "ultralytics-8.1.23.tar.gz", hash = "sha256:2915e8c96a66344fef89f02de3cdf2a52f81d5591319f81d639011214674ab6d"},
]
[package.dependencies]