Don't install TensorFlow with `and-cuda`
Most likely, this will prevent the GPU from being used by Deepface. Thus, the optimal solution would be to do something similar to Torch where the GPU capability is optional.
This commit is contained in:
parent
3ac460a060
commit
401c5cee16
|
@ -1797,120 +1797,6 @@ files = [
|
|||
{file = "numpy-1.26.2.tar.gz", hash = "sha256:f65738447676ab5777f11e6bbbdb8ce11b785e105f690bc45966574816b6d3ea"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cublas-cu11"
|
||||
version = "11.11.3.6"
|
||||
description = "CUBLAS native runtime libraries"
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
files = [
|
||||
{file = "nvidia_cublas_cu11-11.11.3.6-py3-none-manylinux1_x86_64.whl", hash = "sha256:39fb40e8f486dd8a2ddb8fdeefe1d5b28f5b99df01c87ab3676f057a74a5a6f3"},
|
||||
{file = "nvidia_cublas_cu11-11.11.3.6-py3-none-win_amd64.whl", hash = "sha256:6ab12b1302bef8ac1ff4414edd1c059e57f4833abef9151683fb8f4de25900be"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cuda-cupti-cu11"
|
||||
version = "11.8.87"
|
||||
description = "CUDA profiling tools runtime libs."
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
files = [
|
||||
{file = "nvidia_cuda_cupti_cu11-11.8.87-py3-none-manylinux1_x86_64.whl", hash = "sha256:0e50c707df56c75a2c0703dc6b886f3c97a22f37d6f63839f75b7418ba672a8d"},
|
||||
{file = "nvidia_cuda_cupti_cu11-11.8.87-py3-none-win_amd64.whl", hash = "sha256:4332d8550ad5f5b673f98d08e4e4f82030cb604c66d8d5ee919399ea01312e58"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cuda-nvcc-cu11"
|
||||
version = "11.8.89"
|
||||
description = "CUDA nvcc"
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
files = [
|
||||
{file = "nvidia_cuda_nvcc_cu11-11.8.89-py3-none-manylinux1_x86_64.whl", hash = "sha256:3e25894debe6ce87e6dbb99b2311fba6f56c1b647daae2c4e5de537dc5d88876"},
|
||||
{file = "nvidia_cuda_nvcc_cu11-11.8.89-py3-none-win_amd64.whl", hash = "sha256:bcfb622d2449982812eb0fa43b3de0593e877edd33790f423a228ed1be62378c"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cuda-runtime-cu11"
|
||||
version = "11.8.89"
|
||||
description = "CUDA Runtime native Libraries"
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
files = [
|
||||
{file = "nvidia_cuda_runtime_cu11-11.8.89-py3-none-manylinux1_x86_64.whl", hash = "sha256:f587bd726eb2f7612cf77ce38a2c1e65cf23251ff49437f6161ce0d647f64f7c"},
|
||||
{file = "nvidia_cuda_runtime_cu11-11.8.89-py3-none-win_amd64.whl", hash = "sha256:f60c9fdaed8065b38de8097867240efc5556a8a710007146daeb9082334a6e63"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cudnn-cu11"
|
||||
version = "8.7.0.84"
|
||||
description = "cuDNN runtime libraries"
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
files = [
|
||||
{file = "nvidia_cudnn_cu11-8.7.0.84-py3-none-manylinux1_x86_64.whl", hash = "sha256:b3e062498fbbb1c1930435a6a454c1b41c903e1e65b7063bd2b4021e8285408e"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
nvidia-cublas-cu11 = "*"
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cufft-cu11"
|
||||
version = "10.9.0.58"
|
||||
description = "CUFFT native runtime libraries"
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
files = [
|
||||
{file = "nvidia_cufft_cu11-10.9.0.58-py3-none-manylinux1_x86_64.whl", hash = "sha256:222f9da70c80384632fd6035e4c3f16762d64ea7a843829cb278f98b3cb7dd81"},
|
||||
{file = "nvidia_cufft_cu11-10.9.0.58-py3-none-win_amd64.whl", hash = "sha256:c4d316f17c745ec9c728e30409612eaf77a8404c3733cdf6c9c1569634d1ca03"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-curand-cu11"
|
||||
version = "10.3.0.86"
|
||||
description = "CURAND native runtime libraries"
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
files = [
|
||||
{file = "nvidia_curand_cu11-10.3.0.86-py3-none-manylinux1_x86_64.whl", hash = "sha256:ac439548c88580269a1eb6aeb602a5aed32f0dbb20809a31d9ed7d01d77f6bf5"},
|
||||
{file = "nvidia_curand_cu11-10.3.0.86-py3-none-win_amd64.whl", hash = "sha256:8fa8365065fc3e3760d7437b08f164a6bcf8f7124f3b544d2463ded01e6bdc70"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cusolver-cu11"
|
||||
version = "11.4.1.48"
|
||||
description = "CUDA solver native runtime libraries"
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
files = [
|
||||
{file = "nvidia_cusolver_cu11-11.4.1.48-py3-none-manylinux1_x86_64.whl", hash = "sha256:ca538f545645b7e6629140786d3127fe067b3d5a085bd794cde5bfe877c8926f"},
|
||||
{file = "nvidia_cusolver_cu11-11.4.1.48-py3-none-win_amd64.whl", hash = "sha256:7efe43b113495a64e2cf9a0b4365bd53b0a82afb2e2cf91e9f993c9ef5e69ee8"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
nvidia-cublas-cu11 = "*"
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-cusparse-cu11"
|
||||
version = "11.7.5.86"
|
||||
description = "CUSPARSE native runtime libraries"
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
files = [
|
||||
{file = "nvidia_cusparse_cu11-11.7.5.86-py3-none-manylinux1_x86_64.whl", hash = "sha256:4ae709fe78d3f23f60acaba8c54b8ad556cf16ca486e0cc1aa92dca7555d2d2b"},
|
||||
{file = "nvidia_cusparse_cu11-11.7.5.86-py3-none-win_amd64.whl", hash = "sha256:a0f6ee81cd91be606fc2f55992d06b09cd4e86d74b6ae5e8dd1631cf7f5a8706"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nvidia-nccl-cu11"
|
||||
version = "2.16.5"
|
||||
description = "NVIDIA Collective Communication Library (NCCL) Runtime"
|
||||
optional = false
|
||||
python-versions = ">=3"
|
||||
files = [
|
||||
{file = "nvidia_nccl_cu11-2.16.5-py3-none-manylinux1_x86_64.whl", hash = "sha256:948cc9a8c659fc6cc3456dd54844fe203519b82ae87e3e94b9818dd9d94deaad"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "oauthlib"
|
||||
version = "3.2.2"
|
||||
|
@ -2179,6 +2065,23 @@ files = [
|
|||
docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"]
|
||||
test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "prettytable"
|
||||
version = "3.9.0"
|
||||
description = "A simple Python library for easily displaying tabular data in a visually appealing ASCII table format"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "prettytable-3.9.0-py3-none-any.whl", hash = "sha256:a71292ab7769a5de274b146b276ce938786f56c31cf7cea88b6f3775d82fe8c8"},
|
||||
{file = "prettytable-3.9.0.tar.gz", hash = "sha256:f4ed94803c23073a90620b201965e5dc0bccf1760b7a7eaf3158cab8aaffdf34"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
wcwidth = "*"
|
||||
|
||||
[package.extras]
|
||||
tests = ["pytest", "pytest-cov", "pytest-lazy-fixture"]
|
||||
|
||||
[[package]]
|
||||
name = "prompt-toolkit"
|
||||
version = "3.0.43"
|
||||
|
@ -3018,16 +2921,6 @@ keras = ">=2.14.0,<2.15"
|
|||
libclang = ">=13.0.0"
|
||||
ml-dtypes = "0.2.0"
|
||||
numpy = ">=1.23.5,<2.0.0"
|
||||
nvidia-cublas-cu11 = {version = "11.11.3.6", optional = true, markers = "extra == \"and-cuda\""}
|
||||
nvidia-cuda-cupti-cu11 = {version = "11.8.87", optional = true, markers = "extra == \"and-cuda\""}
|
||||
nvidia-cuda-nvcc-cu11 = {version = "11.8.89", optional = true, markers = "extra == \"and-cuda\""}
|
||||
nvidia-cuda-runtime-cu11 = {version = "11.8.89", optional = true, markers = "extra == \"and-cuda\""}
|
||||
nvidia-cudnn-cu11 = {version = "8.7.0.84", optional = true, markers = "extra == \"and-cuda\""}
|
||||
nvidia-cufft-cu11 = {version = "10.9.0.58", optional = true, markers = "extra == \"and-cuda\""}
|
||||
nvidia-curand-cu11 = {version = "10.3.0.86", optional = true, markers = "extra == \"and-cuda\""}
|
||||
nvidia-cusolver-cu11 = {version = "11.4.1.48", optional = true, markers = "extra == \"and-cuda\""}
|
||||
nvidia-cusparse-cu11 = {version = "11.7.5.86", optional = true, markers = "extra == \"and-cuda\""}
|
||||
nvidia-nccl-cu11 = {version = "2.16.5", optional = true, markers = "extra == \"and-cuda\""}
|
||||
opt-einsum = ">=2.3.2"
|
||||
packaging = "*"
|
||||
protobuf = ">=3.20.3,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev"
|
||||
|
@ -3036,7 +2929,6 @@ six = ">=1.12.0"
|
|||
tensorboard = ">=2.14,<2.15"
|
||||
tensorflow-estimator = ">=2.14.0,<2.15"
|
||||
tensorflow-io-gcs-filesystem = ">=0.23.1"
|
||||
tensorrt = {version = "8.5.3.1", optional = true, markers = "extra == \"and-cuda\""}
|
||||
termcolor = ">=1.1.0"
|
||||
typing-extensions = ">=3.6.6"
|
||||
wrapt = ">=1.11.0,<1.15"
|
||||
|
@ -3089,28 +2981,6 @@ tensorflow-cpu = ["tensorflow-cpu (>=2.11.0,<2.12.0)"]
|
|||
tensorflow-gpu = ["tensorflow-gpu (>=2.11.0,<2.12.0)"]
|
||||
tensorflow-rocm = ["tensorflow-rocm (>=2.11.0,<2.12.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "tensorrt"
|
||||
version = "8.5.3.1"
|
||||
description = "A high performance deep learning inference library"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "tensorrt-8.5.3.1-cp310-none-manylinux_2_17_x86_64.whl", hash = "sha256:00375391073e51c1d662cc116f5472a921f27d75bb617c0195b3479633c625f3"},
|
||||
{file = "tensorrt-8.5.3.1-cp36-none-manylinux_2_17_x86_64.whl", hash = "sha256:a88f0e0dc0d604232c4ee155f2266b179a783fc2268291701581b02ac5b90c4c"},
|
||||
{file = "tensorrt-8.5.3.1-cp37-none-manylinux_2_17_x86_64.whl", hash = "sha256:8b7b848a995ccfa08b328c864682a6696d0f01af823af78e73e1ab54fb19d1ae"},
|
||||
{file = "tensorrt-8.5.3.1-cp38-none-manylinux_2_17_x86_64.whl", hash = "sha256:702a122b8d533765534632d8df646497e010b24ff296c957eb5519170ffd9860"},
|
||||
{file = "tensorrt-8.5.3.1-cp39-none-manylinux_2_17_x86_64.whl", hash = "sha256:77f6db65af8ed5f819de0487350f0f447ed14eeccde5dae83fdf1027b89df2a0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
nvidia-cublas-cu11 = "*"
|
||||
nvidia-cuda-runtime-cu11 = "*"
|
||||
nvidia-cudnn-cu11 = "*"
|
||||
|
||||
[package.extras]
|
||||
numpy = ["numpy"]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "2.4.0"
|
||||
|
@ -3568,4 +3438,4 @@ cuda = []
|
|||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.10, <3.12"
|
||||
content-hash = "7a4791f8d500179d0472f176218cafb6a972038997fd3abcfadc751ba0eade3b"
|
||||
content-hash = "afccb2c101f032f98e359c16301255f6b1be4fdb4054264d1a7cb2ebba74717d"
|
||||
|
|
|
@ -33,10 +33,13 @@ torch = {version = "^2.1.2", source = "pytorch-cpu", markers = "extra!='cuda'" }
|
|||
# cuDNN version - 8.8.1
|
||||
# Installed from Nvidia website - nvidia-cuda-toolkit is not installed, but default PopOS drivers are installed
|
||||
tensorflow-io-gcs-filesystem = "0.31.0"
|
||||
tensorflow = {version = "^2.14.0", extras = ["and-cuda"]}
|
||||
# So this wasn't working on Windows, so unless there's a way to optionally install this, we'll install it without and-cuda
|
||||
# tensorflow = {version = "^2.14.0", extras = ["and-cuda"]}
|
||||
tensorflow = {version = "^2.14.0"}
|
||||
|
||||
|
||||
deepface = "^0.0.79"
|
||||
prettytable = "^3.9.0"
|
||||
|
||||
[tool.poetry.group.remote]
|
||||
optional = true
|
||||
|
|
Loading…
Reference in New Issue