diff --git a/.dockerignore b/.dockerignore old mode 100644 new mode 100755 diff --git a/.flake8 b/.flake8 old mode 100644 new mode 100755 diff --git a/.gitattributes b/.gitattributes old mode 100644 new mode 100755 diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/conda-env.yml b/.github/workflows/conda-env.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/docker-push.yml b/.github/workflows/docker-push.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/pypi-publish.yml b/.github/workflows/pypi-publish.yml old mode 100644 new mode 100755 diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml old mode 100644 new mode 100755 diff --git a/.gitignore b/.gitignore old mode 100644 new mode 100755 diff --git a/CHANGELOG.md b/CHANGELOG.md old mode 100644 new mode 100755 diff --git a/LICENSE b/LICENSE old mode 100644 new mode 100755 diff --git a/MANIFEST.in b/MANIFEST.in old mode 100644 new mode 100755 diff --git a/README.md b/README.md old mode 100644 new mode 100755 diff --git a/backup-conda-lock.yml b/backup-conda-lock.yml new file mode 100755 index 0000000..d8cc331 --- /dev/null +++ b/backup-conda-lock.yml @@ -0,0 +1,1353 @@ +# This lock file was generated by conda-lock (https://github.com/conda/conda-lock). DO NOT EDIT! +# +# A "lock file" contains a concrete list of package versions (with checksums) to be installed. Unlike +# e.g. `conda env create`, the resulting environment will not change as new package versions become +# available, unless you explicitly update the lock file. +# +# Install this environment as "YOURENV" with: +# conda-lock install -n YOURENV --file new.conda-lock.yml +# To update a single package to the latest version compatible with the version constraints in the source: +# conda-lock lock --lockfile new.conda-lock.yml --update PACKAGE +# To re-solve the entire environment, e.g. after changing a version constraint in the source file: +# conda-lock -f environment.yml --lockfile new.conda-lock.yml +version: 1 +metadata: + content_hash: + linux-64: 5ad884989a8a0f7345918559305173543c2fbcef136f2c97eab4d8e066b16def + channels: + - url: conda-forge + used_env_vars: [] + platforms: + - linux-64 + sources: + - environment.yml +package: +- name: _libgcc_mutex + version: '0.1' + manager: conda + platform: linux-64 + dependencies: {} + url: https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2 + hash: + md5: d7c89558ba9fa0495403155b64376d81 + sha256: fe51de6107f9edc7aa4f786a70f4a883943bc9d39b3bb7307c04c41410990726 + category: main + optional: false +- name: _openmp_mutex + version: '4.5' + manager: conda + platform: linux-64 + dependencies: + _libgcc_mutex: '0.1' + llvm-openmp: '>=9.0.1' + url: https://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-2_kmp_llvm.tar.bz2 + hash: + md5: 562b26ba2e19059551a811e72ab7f793 + sha256: 84a66275da3a66e3f3e70e9d8f10496d807d01a9e4ec16cd2274cc5e28c478fc + category: main + optional: false +- name: antlr-python-runtime + version: 4.9.3 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.6' + url: https://conda.anaconda.org/conda-forge/noarch/antlr-python-runtime-4.9.3-pyhd8ed1ab_1.tar.bz2 + hash: + md5: c88eaec8de9ae1fa161205aa18e7a5b1 + sha256: b91f8ab4ac2b48972fbee1fc8e092cc452fdf59156e4ff2322c94bbf73650f94 + category: main + optional: false +- name: beautifulsoup4 + version: 4.12.3 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.6' + soupsieve: '>=1.2' + url: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.12.3-pyha770c72_0.conda + hash: + md5: 332493000404d8411859539a5a630865 + sha256: 7b05b2d0669029326c623b9df7a29fa49d1982a9e7e31b2fea34b4c9a4a72317 + category: main + optional: false +- name: brotli-python + version: 1.1.0 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' + python: '>=3.11,<3.12.0a0' + python_abi: 3.11.* + url: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.1.0-py311hb755f60_1.conda + hash: + md5: cce9e7c3f1c307f2a5fb08a2922d6164 + sha256: 559093679e9fdb6061b7b80ca0f9a31fe6ffc213f1dae65bc5c82e2cd1a94107 + category: main + optional: false +- name: bzip2 + version: 1.0.8 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hd590300_5.conda + hash: + md5: 69b8b6202a07720f448be700e300ccf4 + sha256: 242c0c324507ee172c0e0dd2045814e746bb303d1eb78870d182ceb0abc726a8 + category: main + optional: false +- name: ca-certificates + version: 2024.2.2 + manager: conda + platform: linux-64 + dependencies: {} + url: https://conda.anaconda.org/conda-forge/linux-64/ca-certificates-2024.2.2-hbcca054_0.conda + hash: + md5: 2f4327a1cbe7f022401b236e915a5fef + sha256: 91d81bfecdbb142c15066df70cc952590ae8991670198f92c66b62019b251aeb + category: main + optional: false +- name: certifi + version: 2024.2.2 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.7' + url: https://conda.anaconda.org/conda-forge/noarch/certifi-2024.2.2-pyhd8ed1ab_0.conda + hash: + md5: 0876280e409658fc6f9e75d035960333 + sha256: f1faca020f988696e6b6ee47c82524c7806380b37cfdd1def32f92c326caca54 + category: main + optional: false +- name: charset-normalizer + version: 3.3.2 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.7' + url: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.3.2-pyhd8ed1ab_0.conda + hash: + md5: 7f4a9e3fcff3f6356ae99244a014da6a + sha256: 20cae47d31fdd58d99c4d2e65fbdcefa0b0de0c84e455ba9d6356a4bdbc4b5b9 + category: main + optional: false +- name: codetiming + version: 1.4.0 + manager: conda + platform: linux-64 + dependencies: + dataclasses: '' + python: '>=3.6' + url: https://conda.anaconda.org/conda-forge/noarch/codetiming-1.4.0-pyhd8ed1ab_0.tar.bz2 + hash: + md5: a1f3168974155fc60367ef1a88e57569 + sha256: fe95abca179c43648fb36957d7dd5526c5e28ee04092e303e7b1778c9e964355 + category: main + optional: false +- name: colorama + version: 0.4.6 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.7' + url: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_0.tar.bz2 + hash: + md5: 3faab06a954c2a04039983f2c4a50d99 + sha256: 2c1b2e9755ce3102bca8d69e8f26e4f087ece73f50418186aee7c74bef8e1698 + category: main + optional: false +- name: dataclasses + version: '0.8' + manager: conda + platform: linux-64 + dependencies: + python: '>=3.7' + url: https://conda.anaconda.org/conda-forge/noarch/dataclasses-0.8-pyhc8e2a94_3.tar.bz2 + hash: + md5: a362b2124b06aad102e2ee4581acee7d + sha256: 63a83e62e0939bc1ab32de4ec736f6403084198c4639638b354a352113809c92 + category: main + optional: false +- name: filelock + version: 3.13.1 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.7' + url: https://conda.anaconda.org/conda-forge/noarch/filelock-3.13.1-pyhd8ed1ab_0.conda + hash: + md5: 0c1729b74a8152fde6a38ba0a2ab9f45 + sha256: 4d742d91412d1f163e5399d2b50c5d479694ebcd309127abb549ca3977f89d2b + category: main + optional: false +- name: freetype + version: 2.12.1 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + libpng: '>=1.6.39,<1.7.0a0' + libzlib: '>=1.2.13,<1.3.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.12.1-h267a509_2.conda + hash: + md5: 9ae35c3d96db2c94ce0cef86efdfa2cb + sha256: b2e3c449ec9d907dd4656cb0dc93e140f447175b125a3824b31368b06c666bb6 + category: main + optional: false +- name: fsspec + version: 2024.2.0 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.8' + url: https://conda.anaconda.org/conda-forge/noarch/fsspec-2024.2.0-pyhca7485f_0.conda + hash: + md5: fad86b90138cf5d82c6f5a2ed6e683d9 + sha256: 3f7e123dd82fe99450d1e0ffa389e8218ef8c9ee257c836e21b489548c039ae6 + category: main + optional: false +- name: gdown + version: 5.1.0 + manager: conda + platform: linux-64 + dependencies: + beautifulsoup4: '' + filelock: '' + python: '>=3.8' + requests: '' + tqdm: '' + url: https://conda.anaconda.org/conda-forge/noarch/gdown-5.1.0-pyhd8ed1ab_0.conda + hash: + md5: 6f880647c0270648f710f334c60bc76c + sha256: 1ab1e5cf5c851f91abebfc6a6c094bc6e2afa3639e6586f6ff890acc8551a63d + category: main + optional: false +- name: gmp + version: 6.3.0 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/gmp-6.3.0-h59595ed_0.conda + hash: + md5: 0e33ef437202db431aa5a928248cf2e8 + sha256: 2a50495b6bbbacb03107ea0b752d8358d4a40b572d124a8cade068c147f344f5 + category: main + optional: false +- name: gmpy2 + version: 2.1.2 + manager: conda + platform: linux-64 + dependencies: + gmp: '>=6.2.1,<7.0a0' + libgcc-ng: '>=12' + mpc: '>=1.2.1,<2.0a0' + mpfr: '>=4.1.0,<5.0a0' + python: '>=3.11,<3.12.0a0' + python_abi: 3.11.* + url: https://conda.anaconda.org/conda-forge/linux-64/gmpy2-2.1.2-py311h6a5fa03_1.tar.bz2 + hash: + md5: 3515bd4a3d92bbd3cc2d25aac335e34d + sha256: 20862200f4d07ba583ab6ae9b56d7de2462474240872100973711dfa20d562d7 + category: main + optional: false +- name: googleapis-common-protos + version: 1.62.0 + manager: conda + platform: linux-64 + dependencies: + protobuf: '>=3.19.5,<5.0.0dev0,!=3.20.0,!=3.20.1,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5' + python: '>=3.7' + url: https://conda.anaconda.org/conda-forge/noarch/googleapis-common-protos-1.62.0-pyhd8ed1ab_0.conda + hash: + md5: ca3d0c7ba3a15e943d9c715aba03ae62 + sha256: 70da3fc08a742022c666d9807f0caba60be1ddbf09b6642c168001bace18c724 + category: main + optional: false +- name: hydra-core + version: 1.3.2 + manager: conda + platform: linux-64 + dependencies: + antlr-python-runtime: 4.9.* + importlib_resources: '' + omegaconf: '>=2.2,<2.4' + packaging: '' + python: '>=3.6' + url: https://conda.anaconda.org/conda-forge/noarch/hydra-core-1.3.2-pyhd8ed1ab_0.conda + hash: + md5: 297d09ccdcec5b347d44c88f2b61cf03 + sha256: 35044b4bb1059c4ed7d8392b776e663a390ad7a2bb6f7e2f09ecd5e9b5d40b75 + category: main + optional: false +- name: icu + version: '73.2' + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/icu-73.2-h59595ed_0.conda + hash: + md5: cc47e1facc155f91abd89b11e48e72ff + sha256: e12fd90ef6601da2875ebc432452590bc82a893041473bc1c13ef29001a73ea8 + category: main + optional: false +- name: idna + version: '3.6' + manager: conda + platform: linux-64 + dependencies: + python: '>=3.6' + url: https://conda.anaconda.org/conda-forge/noarch/idna-3.6-pyhd8ed1ab_0.conda + hash: + md5: 1a76f09108576397c41c0b0c5bd84134 + sha256: 6ee4c986d69ce61e60a20b2459b6f2027baeba153f0a64995fd3cb47c2cc7e07 + category: main + optional: false +- name: importlib_resources + version: 6.1.1 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.8' + zipp: '>=3.1.0' + url: https://conda.anaconda.org/conda-forge/noarch/importlib_resources-6.1.1-pyhd8ed1ab_0.conda + hash: + md5: 3d5fa25cf42f3f32a12b2d874ace8574 + sha256: e584f9ae08fb2d242af0ce7e19e3cd2f85f362d8523119e08f99edb962db99ed + category: main + optional: false +- name: jinja2 + version: 3.1.3 + manager: conda + platform: linux-64 + dependencies: + markupsafe: '>=2.0' + python: '>=3.7' + url: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.3-pyhd8ed1ab_0.conda + hash: + md5: e7d8df6509ba635247ff9aea31134262 + sha256: fd517b7dd3a61eca34f8a6f9f92f306397149cae1204fce72ac3d227107dafdc + category: main + optional: false +- name: lcms2 + version: '2.16' + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + libjpeg-turbo: '>=3.0.0,<4.0a0' + libtiff: '>=4.6.0,<4.7.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.16-hb7c19ff_0.conda + hash: + md5: 51bb7010fc86f70eee639b4bb7a894f5 + sha256: 5c878d104b461b7ef922abe6320711c0d01772f4cd55de18b674f88547870041 + category: main + optional: false +- name: ld_impl_linux-64 + version: '2.40' + manager: conda + platform: linux-64 + dependencies: {} + url: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.40-h41732ed_0.conda + hash: + md5: 7aca3059a1729aa76c597603f10b0dd3 + sha256: f6cc89d887555912d6c61b295d398cff9ec982a3417d38025c45d5dd9b9e79cd + category: main + optional: false +- name: lerc + version: 4.0.0 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/lerc-4.0.0-h27087fc_0.tar.bz2 + hash: + md5: 76bbff344f0134279f225174e9064c8f + sha256: cb55f36dcd898203927133280ae1dc643368af041a48bcf7c026acb7c47b0c12 + category: main + optional: false +- name: libabseil + version: '20230802.1' + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/libabseil-20230802.1-cxx17_h59595ed_0.conda + hash: + md5: 2785ddf4cb0e7e743477991d64353947 + sha256: 8729021a93e67bb93b4e73ef0a132499db516accfea11561b667635bcd0507e7 + category: main + optional: false +- name: libblas + version: 3.9.0 + manager: conda + platform: linux-64 + dependencies: + libopenblas: '>=0.3.26,<1.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.9.0-21_linux64_openblas.conda + hash: + md5: 0ac9f44fc096772b0aa092119b00c3ca + sha256: ebd5c91f029f779fb88a1fcbd1e499559a9c258e3674ff58a2fbb4e375ae56d9 + category: main + optional: false +- name: libcblas + version: 3.9.0 + manager: conda + platform: linux-64 + dependencies: + libblas: 3.9.0 + url: https://conda.anaconda.org/conda-forge/linux-64/libcblas-3.9.0-21_linux64_openblas.conda + hash: + md5: 4a3816d06451c4946e2db26b86472cb6 + sha256: 467bbfbfe1a1aeb8b1f9f6485eedd8ed1b6318941bf3702da72336ccf4dc25a6 + category: main + optional: false +- name: libdeflate + version: '1.19' + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/libdeflate-1.19-hd590300_0.conda + hash: + md5: 1635570038840ee3f9c71d22aa5b8b6d + sha256: 985ad27aa0ba7aad82afa88a8ede6a1aacb0aaca950d710f15d85360451e72fd + category: main + optional: false +- name: libexpat + version: 2.5.0 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.5.0-hcb278e6_1.conda + hash: + md5: 6305a3dd2752c76335295da4e581f2fd + sha256: 74c98a563777ae2ad71f1f74d458a8ab043cee4a513467c159ccf159d0e461f3 + category: main + optional: false +- name: libffi + version: 3.4.2 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=9.4.0' + url: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.2-h7f98852_5.tar.bz2 + hash: + md5: d645c6d2ac96843a2bfaccd2d62b3ac3 + sha256: ab6e9856c21709b7b517e940ae7028ae0737546122f83c2aa5d692860c3b149e + category: main + optional: false +- name: libgcc-ng + version: 13.2.0 + manager: conda + platform: linux-64 + dependencies: + _libgcc_mutex: '0.1' + _openmp_mutex: '>=4.5' + url: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-13.2.0-h807b86a_5.conda + hash: + md5: d4ff227c46917d3b4565302a2bbb276b + sha256: d32f78bfaac282cfe5205f46d558704ad737b8dbf71f9227788a5ca80facaba4 + category: main + optional: false +- name: libgfortran-ng + version: 13.2.0 + manager: conda + platform: linux-64 + dependencies: + libgfortran5: 13.2.0 + url: https://conda.anaconda.org/conda-forge/linux-64/libgfortran-ng-13.2.0-h69a702a_5.conda + hash: + md5: e73e9cfd1191783392131e6238bdb3e9 + sha256: 238c16c84124d58307376715839aa152bd4a1bf5a043052938ad6c3137d30245 + category: main + optional: false +- name: libgfortran5 + version: 13.2.0 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=13.2.0' + url: https://conda.anaconda.org/conda-forge/linux-64/libgfortran5-13.2.0-ha4646dd_5.conda + hash: + md5: 7a6bd7a12a4bd359e2afe6c0fa1acace + sha256: ba8d94e8493222ce155bb264d9de4200e41498a458e866fedf444de809bde8b6 + category: main + optional: false +- name: libhwloc + version: 2.9.3 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' + libxml2: '>=2.11.5,<3.0.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/libhwloc-2.9.3-default_h554bfaf_1009.conda + hash: + md5: f36ddc11ca46958197a45effdd286e45 + sha256: 6950fee24766d03406e0f6f965262a5d98829c71eed8d1004f313892423b559b + category: main + optional: false +- name: libiconv + version: '1.17' + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/libiconv-1.17-hd590300_2.conda + hash: + md5: d66573916ffcf376178462f1b61c941e + sha256: 8ac2f6a9f186e76539439e50505d98581472fedb347a20e7d1f36429849f05c9 + category: main + optional: false +- name: libjpeg-turbo + version: 3.0.0 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/libjpeg-turbo-3.0.0-hd590300_1.conda + hash: + md5: ea25936bb4080d843790b586850f82b8 + sha256: b954e09b7e49c2f2433d6f3bb73868eda5e378278b0f8c1dd10a7ef090e14f2f + category: main + optional: false +- name: liblapack + version: 3.9.0 + manager: conda + platform: linux-64 + dependencies: + libblas: 3.9.0 + url: https://conda.anaconda.org/conda-forge/linux-64/liblapack-3.9.0-21_linux64_openblas.conda + hash: + md5: 1a42f305615c3867684e049e85927531 + sha256: 64b5c35dce00dd6f9f53178b2fe87116282e00967970bd6551a5a42923806ded + category: main + optional: false +- name: libnsl + version: 2.0.1 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.1-hd590300_0.conda + hash: + md5: 30fd6e37fe21f86f4bd26d6ee73eeec7 + sha256: 26d77a3bb4dceeedc2a41bd688564fe71bf2d149fdcf117049970bc02ff1add6 + category: main + optional: false +- name: libopenblas + version: 0.3.26 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + libgfortran-ng: '' + libgfortran5: '>=12.3.0' + url: https://conda.anaconda.org/conda-forge/linux-64/libopenblas-0.3.26-pthreads_h413a1c8_0.conda + hash: + md5: 760ae35415f5ba8b15d09df5afe8b23a + sha256: b626954b5a1113dafec8df89fa8bf18ce9b4701464d9f084ddd7fc9fac404bbd + category: main + optional: false +- name: libpng + version: 1.6.42 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + libzlib: '>=1.2.13,<1.3.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/libpng-1.6.42-h2797004_0.conda + hash: + md5: d67729828dc6ff7ba44a61062ad79880 + sha256: 1a0c3a4b7fd1e101cb37dd6d2f8b5ec93409c8cae422f04470fe39a01ef59024 + category: main + optional: false +- name: libprotobuf + version: 4.24.4 + manager: conda + platform: linux-64 + dependencies: + libabseil: '>=20230802.1,<20230803.0a0' + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' + libzlib: '>=1.2.13,<1.3.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/libprotobuf-4.24.4-hf27288f_0.conda + hash: + md5: 1a0287ab734591ad63603734f923016b + sha256: 3e0f6454190abb27edd2aeb724688ee440de133edb02cbb17d5609ba36aa8be0 + category: main + optional: false +- name: libsqlite + version: 3.45.1 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + libzlib: '>=1.2.13,<1.3.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.45.1-h2797004_0.conda + hash: + md5: fc4ccadfbf6d4784de88c41704792562 + sha256: 1b379d1c652b25d0540251d422ef767472e768fd36b77261045e97f9ba6d3faa + category: main + optional: false +- name: libstdcxx-ng + version: 13.2.0 + manager: conda + platform: linux-64 + dependencies: {} + url: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-13.2.0-h7e041cc_5.conda + hash: + md5: f6f6600d18a4047b54f803cf708b868a + sha256: a56c5b11f1e73a86e120e6141a42d9e935a99a2098491ac9e15347a1476ce777 + category: main + optional: false +- name: libtiff + version: 4.6.0 + manager: conda + platform: linux-64 + dependencies: + lerc: '>=4.0.0,<5.0a0' + libdeflate: '>=1.19,<1.20.0a0' + libgcc-ng: '>=12' + libjpeg-turbo: '>=3.0.0,<4.0a0' + libstdcxx-ng: '>=12' + libwebp-base: '>=1.3.2,<2.0a0' + libzlib: '>=1.2.13,<1.3.0a0' + xz: '>=5.2.6,<6.0a0' + zstd: '>=1.5.5,<1.6.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/libtiff-4.6.0-ha9c0a0a_2.conda + hash: + md5: 55ed21669b2015f77c180feb1dd41930 + sha256: 45158f5fbee7ee3e257e6b9f51b9f1c919ed5518a94a9973fe7fa4764330473e + category: main + optional: false +- name: libuuid + version: 2.38.1 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.38.1-h0b41bf4_0.conda + hash: + md5: 40b61aab5c7ba9ff276c41cfffe6b80b + sha256: 787eb542f055a2b3de553614b25f09eefb0a0931b0c87dbcce6efdfd92f04f18 + category: main + optional: false +- name: libuv + version: 1.47.0 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/libuv-1.47.0-hd590300_0.conda + hash: + md5: a7a94e1b751a9fe2be88f3934b3a0739 + sha256: 53bd8f6bebc85555c5dd648072693e37fcdf777f993e9a108c4a7badf2e8810c + category: main + optional: false +- name: libwebp-base + version: 1.3.2 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/libwebp-base-1.3.2-hd590300_0.conda + hash: + md5: 30de3fd9b3b602f7473f30e684eeea8c + sha256: 68764a760fa81ef35dacb067fe8ace452bbb41476536a4a147a1051df29525f0 + category: main + optional: false +- name: libxcb + version: '1.15' + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + pthread-stubs: '' + xorg-libxau: '' + xorg-libxdmcp: '' + url: https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.15-h0b41bf4_0.conda + hash: + md5: 33277193f5b92bad9fdd230eb700929c + sha256: a670902f0a3173a466c058d2ac22ca1dd0df0453d3a80e0212815c20a16b0485 + category: main + optional: false +- name: libxcrypt + version: 4.4.36 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/libxcrypt-4.4.36-hd590300_1.conda + hash: + md5: 5aa797f8787fe7a17d1b0821485b5adc + sha256: 6ae68e0b86423ef188196fff6207ed0c8195dd84273cb5623b85aa08033a410c + category: main + optional: false +- name: libxml2 + version: 2.12.5 + manager: conda + platform: linux-64 + dependencies: + icu: '>=73.2,<74.0a0' + libgcc-ng: '>=12' + libiconv: '>=1.17,<2.0a0' + libzlib: '>=1.2.13,<1.3.0a0' + xz: '>=5.2.6,<6.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/libxml2-2.12.5-h232c23b_0.conda + hash: + md5: c442ebfda7a475f5e78f1c8e45f1e919 + sha256: db9bf97e9e367985204331b58a059ebd5a4e0cb9e1c8754e9ecb23046b7b7bc1 + category: main + optional: false +- name: libzlib + version: 1.2.13 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.2.13-hd590300_5.conda + hash: + md5: f36c115f1ee199da648e0597ec2047ad + sha256: 370c7c5893b737596fd6ca0d9190c9715d89d888b8c88537ae1ef168c25e82e4 + category: main + optional: false +- name: llvm-openmp + version: 17.0.6 + manager: conda + platform: linux-64 + dependencies: + libzlib: '>=1.2.13,<1.3.0a0' + zstd: '>=1.5.5,<1.6.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/llvm-openmp-17.0.6-h4dfa4b3_0.conda + hash: + md5: c1665f9c1c9f6c93d8b4e492a6a39056 + sha256: 18a9db4cc139e72e8eac80a34f6536491fe318d3785bc2c35fac42cd00676376 + category: main + optional: false +- name: markupsafe + version: 2.1.5 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + python: '>=3.11,<3.12.0a0' + python_abi: 3.11.* + url: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-2.1.5-py311h459d7ec_0.conda + hash: + md5: a322b4185121935c871d201ae00ac143 + sha256: 14912e557a6576e03f65991be89e9d289c6e301921b6ecfb4e7186ba974f453d + category: main + optional: false +- name: mkl + version: 2022.2.1 + manager: conda + platform: linux-64 + dependencies: + _openmp_mutex: '>=4.5' + llvm-openmp: '>=15.0.6' + tbb: 2021.* + url: https://conda.anaconda.org/conda-forge/linux-64/mkl-2022.2.1-h84fe81f_16997.conda + hash: + md5: a7ce56d5757f5b57e7daabe703ade5bb + sha256: 5322750d5e96ff5d96b1457db5fb6b10300f2bc4030545e940e17b57c4e96d00 + category: main + optional: false +- name: mpc + version: 1.3.1 + manager: conda + platform: linux-64 + dependencies: + gmp: '>=6.2.1,<7.0a0' + libgcc-ng: '>=12' + mpfr: '>=4.1.0,<5.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/mpc-1.3.1-hfe3b2da_0.conda + hash: + md5: 289c71e83dc0daa7d4c81f04180778ca + sha256: 2f88965949ba7b4b21e7e5facd62285f7c6efdb17359d1b365c3bb4ecc968d29 + category: main + optional: false +- name: mpfr + version: 4.2.1 + manager: conda + platform: linux-64 + dependencies: + gmp: '>=6.2.1,<7.0a0' + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/mpfr-4.2.1-h9458935_0.conda + hash: + md5: 4c28f3210b30250037a4a627eeee9e0f + sha256: 008230a53ff15cf61966476b44f7ba2c779826825b9ca639a0a2b44d8f7aa6cb + category: main + optional: false +- name: mpmath + version: 1.3.0 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.6' + url: https://conda.anaconda.org/conda-forge/noarch/mpmath-1.3.0-pyhd8ed1ab_0.conda + hash: + md5: dbf6e2d89137da32fa6670f3bffc024e + sha256: a4f025c712ec1502a55c471b56a640eaeebfce38dd497d5a1a33729014cac47a + category: main + optional: false +- name: ncurses + version: '6.4' + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.4-h59595ed_2.conda + hash: + md5: 7dbaa197d7ba6032caf7ae7f32c1efa0 + sha256: 91cc03f14caf96243cead96c76fe91ab5925a695d892e83285461fb927dece5e + category: main + optional: false +- name: networkx + version: 3.2.1 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.9' + url: https://conda.anaconda.org/conda-forge/noarch/networkx-3.2.1-pyhd8ed1ab_0.conda + hash: + md5: 425fce3b531bed6ec3c74fab3e5f0a1c + sha256: 7629aa4f9f8cdff45ea7a4701fe58dccce5bf2faa01c26eb44cbb27b7e15ca9d + category: main + optional: false +- name: numpy + version: 1.26.4 + manager: conda + platform: linux-64 + dependencies: + libblas: '>=3.9.0,<4.0a0' + libcblas: '>=3.9.0,<4.0a0' + libgcc-ng: '>=12' + liblapack: '>=3.9.0,<4.0a0' + libstdcxx-ng: '>=12' + python: '>=3.11,<3.12.0a0' + python_abi: 3.11.* + url: https://conda.anaconda.org/conda-forge/linux-64/numpy-1.26.4-py311h64a7726_0.conda + hash: + md5: a502d7aad449a1206efb366d6a12c52d + sha256: 3f4365e11b28e244c95ba8579942b0802761ba7bb31c026f50d1a9ea9c728149 + category: main + optional: false +- name: omegaconf + version: 2.3.0 + manager: conda + platform: linux-64 + dependencies: + antlr-python-runtime: 4.9.* + python: '>=3.7' + pyyaml: '>=5.1.0' + typing_extensions: '' + url: https://conda.anaconda.org/conda-forge/noarch/omegaconf-2.3.0-pyhd8ed1ab_0.conda + hash: + md5: 23cc056834cab53849b91f78d6ee3ea0 + sha256: df806841be847e5287b22b6ae7f380874f81ea51f1b51ae14a570f3385c7b133 + category: main + optional: false +- name: openjpeg + version: 2.5.0 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + libpng: '>=1.6.39,<1.7.0a0' + libstdcxx-ng: '>=12' + libtiff: '>=4.6.0,<4.7.0a0' + libzlib: '>=1.2.13,<1.3.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.0-h488ebb8_3.conda + hash: + md5: 128c25b7fe6a25286a48f3a6a9b5b6f3 + sha256: 9fe91b67289267de68fda485975bb48f0605ac503414dc663b50d8b5f29bc82a + category: main + optional: false +- name: openssl + version: 3.2.1 + manager: conda + platform: linux-64 + dependencies: + ca-certificates: '' + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.2.1-hd590300_0.conda + hash: + md5: 51a753e64a3027bd7e23a189b1f6e91e + sha256: c02c12bdb898daacf7eb3d09859f93ea8f285fd1a6132ff6ff0493ab52c7fe57 + category: main + optional: false +- name: packaging + version: '23.2' + manager: conda + platform: linux-64 + dependencies: + python: '>=3.7' + url: https://conda.anaconda.org/conda-forge/noarch/packaging-23.2-pyhd8ed1ab_0.conda + hash: + md5: 79002079284aa895f883c6b7f3f88fd6 + sha256: 69b3ace6cca2dab9047b2c24926077d81d236bef45329d264b394001e3c3e52f + category: main + optional: false +- name: pillow + version: 10.2.0 + manager: conda + platform: linux-64 + dependencies: + freetype: '>=2.12.1,<3.0a0' + lcms2: '>=2.16,<3.0a0' + libgcc-ng: '>=12' + libjpeg-turbo: '>=3.0.0,<4.0a0' + libtiff: '>=4.6.0,<4.7.0a0' + libwebp-base: '>=1.3.2,<2.0a0' + libxcb: '>=1.15,<1.16.0a0' + libzlib: '>=1.2.13,<1.3.0a0' + openjpeg: '>=2.5.0,<3.0a0' + python: '>=3.11,<3.12.0a0' + python_abi: 3.11.* + tk: '>=8.6.13,<8.7.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/pillow-10.2.0-py311ha6c5da5_0.conda + hash: + md5: a5ccd7f2271f28b7d2de0b02b64e3796 + sha256: 3cd4827d822c9888b672bfac9017e905348ac5bd2237a98b30a734ed6573b248 + category: main + optional: false +- name: pip + version: '24.0' + manager: conda + platform: linux-64 + dependencies: + python: '>=3.7' + setuptools: '' + wheel: '' + url: https://conda.anaconda.org/conda-forge/noarch/pip-24.0-pyhd8ed1ab_0.conda + hash: + md5: f586ac1e56c8638b64f9c8122a7b8a67 + sha256: b7c1c5d8f13e8cb491c4bd1d0d1896a4cf80fc47de01059ad77509112b664a4a + category: main + optional: false +- name: protobuf + version: 4.24.4 + manager: conda + platform: linux-64 + dependencies: + libabseil: '>=20230802.1,<20230803.0a0' + libgcc-ng: '>=12' + libprotobuf: '>=4.24.4,<4.24.5.0a0' + libstdcxx-ng: '>=12' + python: '>=3.11,<3.12.0a0' + python_abi: 3.11.* + setuptools: '' + url: https://conda.anaconda.org/conda-forge/linux-64/protobuf-4.24.4-py311h46cbc50_0.conda + hash: + md5: 83b241e2db8adb55d7ec110a913fea80 + sha256: 1f664f5fc370c28809024387e2f991003fcabf8b025c787c70dbc99a8fcb2088 + category: main + optional: false +- name: pthread-stubs + version: '0.4' + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=7.5.0' + url: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-h36c2ea0_1001.tar.bz2 + hash: + md5: 22dad4df6e8630e8dff2428f6f6a7036 + sha256: 67c84822f87b641d89df09758da498b2d4558d47b920fd1d3fe6d3a871e000ff + category: main + optional: false +- name: pysocks + version: 1.7.1 + manager: conda + platform: linux-64 + dependencies: + __unix: '' + python: '>=3.8' + url: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha2e5f31_6.tar.bz2 + hash: + md5: 2a7de29fb590ca14b5243c4c812c8025 + sha256: a42f826e958a8d22e65b3394f437af7332610e43ee313393d1cf143f0a2d274b + category: main + optional: false +- name: python + version: 3.11.7 + manager: conda + platform: linux-64 + dependencies: + bzip2: '>=1.0.8,<2.0a0' + ld_impl_linux-64: '>=2.36.1' + libexpat: '>=2.5.0,<3.0a0' + libffi: '>=3.4,<4.0a0' + libgcc-ng: '>=12' + libnsl: '>=2.0.1,<2.1.0a0' + libsqlite: '>=3.44.2,<4.0a0' + libuuid: '>=2.38.1,<3.0a0' + libxcrypt: '>=4.4.36' + libzlib: '>=1.2.13,<1.3.0a0' + ncurses: '>=6.4,<7.0a0' + openssl: '>=3.2.0,<4.0a0' + readline: '>=8.2,<9.0a0' + tk: '>=8.6.13,<8.7.0a0' + tzdata: '' + xz: '>=5.2.6,<6.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/python-3.11.7-hab00c5b_1_cpython.conda + hash: + md5: 27cf681282c11dba7b0b1fd266e8f289 + sha256: 8266801d3f21ae3018b997dcd05503b034016a3335aa3ab5b8c3f482af1e6580 + category: main + optional: false +- name: python-json-logger + version: 2.0.7 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.6' + url: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda + hash: + md5: a61bf9ec79426938ff785eb69dbb1960 + sha256: 4790787fe1f4e8da616edca4acf6a4f8ed4e7c6967aa31b920208fc8f95efcca + category: main + optional: false +- name: python_abi + version: '3.11' + manager: conda + platform: linux-64 + dependencies: {} + url: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.11-4_cp311.conda + hash: + md5: d786502c97404c94d7d58d258a445a65 + sha256: 0be3ac1bf852d64f553220c7e6457e9c047dfb7412da9d22fbaa67e60858b3cf + category: main + optional: false +- name: pytorch + version: 2.1.0 + manager: conda + platform: linux-64 + dependencies: + __glibc: '>=2.17,<3.0.a0' + _openmp_mutex: '>=4.5' + filelock: '' + fsspec: '' + jinja2: '' + libcblas: '>=3.9.0,<4.0a0' + libgcc-ng: '>=12' + libprotobuf: '>=4.24.4,<4.24.5.0a0' + libstdcxx-ng: '>=12' + libuv: '>=1.46.0,<2.0a0' + mkl: '>=2022.2.1,<2023.0a0' + networkx: '' + numpy: '>=1.23.5,<2.0a0' + python: '>=3.11,<3.12.0a0' + python_abi: 3.11.* + sleef: '>=3.5.1,<4.0a0' + sympy: '' + typing_extensions: '' + url: https://conda.anaconda.org/conda-forge/linux-64/pytorch-2.1.0-cpu_mkl_py311h0c8a311_100.conda + hash: + md5: 81dafdfca905f63e43094252048446b4 + sha256: 17da98806c1b87a2c81eaf59b6e781ced850733bb4ea90046da3aa1ba85138ec + category: main + optional: false +- name: pytorch-cpu + version: 2.1.0 + manager: conda + platform: linux-64 + dependencies: + pytorch: 2.1.0 + url: https://conda.anaconda.org/conda-forge/linux-64/pytorch-cpu-2.1.0-cpu_mkl_py311ha33ad28_100.conda + hash: + md5: 3c54dcbd0f2605c9234f4edc1565c8a1 + sha256: 7c5005eeff582c6e0f20897dfc8c303f45baaf3d51fbc69e601dee8fe7fa77eb + category: main + optional: false +- name: pyyaml + version: 6.0.1 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + python: '>=3.11,<3.12.0a0' + python_abi: 3.11.* + yaml: '>=0.2.5,<0.3.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0.1-py311h459d7ec_1.conda + hash: + md5: 52719a74ad130de8fb5d047dc91f247a + sha256: 28729ef1ffa7f6f9dfd54345a47c7faac5d34296d66a2b9891fb147f4efe1348 + category: main + optional: false +- name: readline + version: '8.2' + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + ncurses: '>=6.3,<7.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/readline-8.2-h8228510_1.conda + hash: + md5: 47d31b792659ce70f470b5c82fdfb7a4 + sha256: 5435cf39d039387fbdc977b0a762357ea909a7694d9528ab40f005e9208744d7 + category: main + optional: false +- name: requests + version: 2.31.0 + manager: conda + platform: linux-64 + dependencies: + certifi: '>=2017.4.17' + charset-normalizer: '>=2,<4' + idna: '>=2.5,<4' + python: '>=3.7' + urllib3: '>=1.21.1,<3' + url: https://conda.anaconda.org/conda-forge/noarch/requests-2.31.0-pyhd8ed1ab_0.conda + hash: + md5: a30144e4156cdbb236f99ebb49828f8b + sha256: 9f629d6fd3c8ac5f2a198639fe7af87c4db2ac9235279164bfe0fcb49d8c4bad + category: main + optional: false +- name: setuptools + version: 69.0.3 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.7' + url: https://conda.anaconda.org/conda-forge/noarch/setuptools-69.0.3-pyhd8ed1ab_0.conda + hash: + md5: 40695fdfd15a92121ed2922900d0308b + sha256: 0fe2a0473ad03dac6c7f5c42ef36a8e90673c88a0350dfefdea4b08d43803db2 + category: main + optional: false +- name: sleef + version: 3.5.1 + manager: conda + platform: linux-64 + dependencies: + _openmp_mutex: '>=4.5' + libgcc-ng: '>=9.4.0' + url: https://conda.anaconda.org/conda-forge/linux-64/sleef-3.5.1-h9b69904_2.tar.bz2 + hash: + md5: 6e016cf4c525d04a7bd038cee53ad3fd + sha256: 77d644a16f682e6d01df63fe9d25315011393498b63cf08c0e548780e46b2170 + category: main + optional: false +- name: soupsieve + version: '2.5' + manager: conda + platform: linux-64 + dependencies: + python: '>=3.8' + url: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.5-pyhd8ed1ab_1.conda + hash: + md5: 3f144b2c34f8cb5a9abd9ed23a39c561 + sha256: 54ae221033db8fbcd4998ccb07f3c3828b4d77e73b0c72b18c1d6a507059059c + category: main + optional: false +- name: sympy + version: '1.12' + manager: conda + platform: linux-64 + dependencies: + __unix: '' + gmpy2: '>=2.0.8' + mpmath: '>=0.19' + python: '>=3.8' + url: https://conda.anaconda.org/conda-forge/noarch/sympy-1.12-pypyh9d50eac_103.conda + hash: + md5: 2f7d6347d7acf6edf1ac7f2189f44c8f + sha256: 0025dd4e6411423903bf478d1b9fbff0cbbbe546f51c9375dfd6729ef2e1a1ac + category: main + optional: false +- name: tbb + version: 2021.11.0 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + libhwloc: '>=2.9.3,<2.9.4.0a0' + libstdcxx-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/tbb-2021.11.0-h00ab1b0_1.conda + hash: + md5: 4531d2927578e7e254ff3bcf6457518c + sha256: ded4de0d5a3eb7b47ed829f0ed0e3c61ccd428308bde52d8d22ced228038223b + category: main + optional: false +- name: tk + version: 8.6.13 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + libzlib: '>=1.2.13,<1.3.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h4845f30_101.conda + hash: + md5: d453b98d9c83e71da0741bb0ff4d76bc + sha256: e0569c9caa68bf476bead1bed3d79650bb080b532c64a4af7d8ca286c08dea4e + category: main + optional: false +- name: torchvision + version: 0.16.1 + manager: conda + platform: linux-64 + dependencies: + __glibc: '>=2.17,<3.0.a0' + libgcc-ng: '>=12' + libjpeg-turbo: '>=3.0.0,<4.0a0' + libpng: '>=1.6.39,<1.7.0a0' + libstdcxx-ng: '>=12' + numpy: '>=1.23.5,<2.0a0' + pillow: '>=5.3.0,!=8.3.0,!=8.3.1' + python: '>=3.11,<3.12.0a0' + python_abi: 3.11.* + pytorch: '>=2.1.0,<2.2.0a0' + requests: '' + url: https://conda.anaconda.org/conda-forge/linux-64/torchvision-0.16.1-cpu_py311h38ab453_2.conda + hash: + md5: 6796c9f44a0fe55fc064007dc3ac65ef + sha256: 2852110869387876c291a8c911b30336b7fec5c543cd58ddee4db1fc5555b3e0 + category: main + optional: false +- name: tqdm + version: 4.66.2 + manager: conda + platform: linux-64 + dependencies: + colorama: '' + python: '>=3.7' + url: https://conda.anaconda.org/conda-forge/noarch/tqdm-4.66.2-pyhd8ed1ab_0.conda + hash: + md5: 2b8dfb969f984497f3f98409a9545776 + sha256: 416d1d9318f3267325ad7e2b8a575df20ff9031197b30c0222c3d3b023877260 + category: main + optional: false +- name: typing_extensions + version: 4.9.0 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.8' + url: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.9.0-pyha770c72_0.conda + hash: + md5: a92a6440c3fe7052d63244f3aba2a4a7 + sha256: f3c5be8673bfd905c4665efcb27fa50192f24f84fa8eff2f19cba5d09753d905 + category: main + optional: false +- name: tzdata + version: 2024a + manager: conda + platform: linux-64 + dependencies: {} + url: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda + hash: + md5: 161081fc7cec0bfda0d86d7cb595f8d8 + sha256: 7b2b69c54ec62a243eb6fba2391b5e443421608c3ae5dbff938ad33ca8db5122 + category: main + optional: false +- name: urllib3 + version: 2.2.0 + manager: conda + platform: linux-64 + dependencies: + brotli-python: '>=1.0.9' + pysocks: '>=1.5.6,<2.0,!=1.5.7' + python: '>=3.7' + url: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.2.0-pyhd8ed1ab_0.conda + hash: + md5: 6a7e0694921f668a030d52f0c47baebd + sha256: 61a8a3bd36d235c349aedaf1aa6a79cce15d6fe89dca4bb593b596d0211513c6 + category: main + optional: false +- name: wheel + version: 0.42.0 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.7' + url: https://conda.anaconda.org/conda-forge/noarch/wheel-0.42.0-pyhd8ed1ab_0.conda + hash: + md5: 1cdea58981c5cbc17b51973bcaddcea7 + sha256: 80be0ccc815ce22f80c141013302839b0ed938a2edb50b846cf48d8a8c1cfa01 + category: main + optional: false +- name: xorg-libxau + version: 1.0.11 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/xorg-libxau-1.0.11-hd590300_0.conda + hash: + md5: 2c80dc38fface310c9bd81b17037fee5 + sha256: 309751371d525ce50af7c87811b435c176915239fc9e132b99a25d5e1703f2d4 + category: main + optional: false +- name: xorg-libxdmcp + version: 1.1.3 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=9.3.0' + url: https://conda.anaconda.org/conda-forge/linux-64/xorg-libxdmcp-1.1.3-h7f98852_0.tar.bz2 + hash: + md5: be93aabceefa2fac576e971aef407908 + sha256: 4df7c5ee11b8686d3453e7f3f4aa20ceef441262b49860733066c52cfd0e4a77 + category: main + optional: false +- name: xz + version: 5.2.6 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + url: https://conda.anaconda.org/conda-forge/linux-64/xz-5.2.6-h166bdaf_0.tar.bz2 + hash: + md5: 2161070d867d1b1204ea749c8eec4ef0 + sha256: 03a6d28ded42af8a347345f82f3eebdd6807a08526d47899a42d62d319609162 + category: main + optional: false +- name: yaml + version: 0.2.5 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=9.4.0' + url: https://conda.anaconda.org/conda-forge/linux-64/yaml-0.2.5-h7f98852_2.tar.bz2 + hash: + md5: 4cb3ad778ec2d5a7acbdf254eb1c42ae + sha256: a4e34c710eeb26945bdbdaba82d3d74f60a78f54a874ec10d373811a5d217535 + category: main + optional: false +- name: zipp + version: 3.17.0 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.8' + url: https://conda.anaconda.org/conda-forge/noarch/zipp-3.17.0-pyhd8ed1ab_0.conda + hash: + md5: 2e4d6bc0b14e10f895fc6791a7d9b26a + sha256: bced1423fdbf77bca0a735187d05d9b9812d2163f60ab426fc10f11f92ecbe26 + category: main + optional: false +- name: zstd + version: 1.5.5 + manager: conda + platform: linux-64 + dependencies: + libgcc-ng: '>=12' + libstdcxx-ng: '>=12' + libzlib: '>=1.2.13,<1.3.0a0' + url: https://conda.anaconda.org/conda-forge/linux-64/zstd-1.5.5-hfc55251_0.conda + hash: + md5: 04b88013080254850d6c01ed54810589 + sha256: 607cbeb1a533be98ba96cf5cdf0ddbb101c78019f1fda063261871dad6248609 + category: main + optional: false diff --git a/conda-lock.yml b/conda-lock.yml old mode 100644 new mode 100755 diff --git a/conf/analyzer/default.yaml b/conf/analyzer/default.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/detector/retinaface.yaml b/conf/analyzer/detector/retinaface.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/logger/json_format.yaml b/conf/analyzer/logger/json_format.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/predictor/align/synergy_mobilenet_v2.yaml b/conf/analyzer/predictor/align/synergy_mobilenet_v2.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/predictor/au/open_graph_swin_base.yaml b/conf/analyzer/predictor/au/open_graph_swin_base.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/predictor/deepfake/efficientnet_b7.yaml b/conf/analyzer/predictor/deepfake/efficientnet_b7.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/predictor/embed/r50_vggface_1m.yaml b/conf/analyzer/predictor/embed/r50_vggface_1m.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/predictor/fer/efficientnet_b0_7.yaml b/conf/analyzer/predictor/fer/efficientnet_b0_7.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/predictor/fer/efficientnet_b2_8.yaml b/conf/analyzer/predictor/fer/efficientnet_b2_8.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/predictor/va/elim_al_alexnet.yaml b/conf/analyzer/predictor/va/elim_al_alexnet.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/predictor/verify/adaface_ir101_webface12m.yaml b/conf/analyzer/predictor/verify/adaface_ir101_webface12m.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/predictor/verify/r100_magface_unpg.yaml b/conf/analyzer/predictor/verify/r100_magface_unpg.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/reader/default.yaml b/conf/analyzer/reader/default.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/reader/file.yaml b/conf/analyzer/reader/file.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/reader/tensor.yaml b/conf/analyzer/reader/tensor.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/reader/universal.yaml b/conf/analyzer/reader/universal.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/unifier/img_244.yaml b/conf/analyzer/unifier/img_244.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/unifier/img_260.yaml b/conf/analyzer/unifier/img_260.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/unifier/img_380.yaml b/conf/analyzer/unifier/img_380.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/utilizer/align/lmk3d_mesh_pose.yaml b/conf/analyzer/utilizer/align/lmk3d_mesh_pose.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/utilizer/draw_boxes/torchvision_boxes.yaml b/conf/analyzer/utilizer/draw_boxes/torchvision_boxes.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/utilizer/draw_landmarks/torchvision_keypoints.yaml b/conf/analyzer/utilizer/draw_landmarks/torchvision_keypoints.yaml old mode 100644 new mode 100755 diff --git a/conf/analyzer/utilizer/save/image_saver.yaml b/conf/analyzer/utilizer/save/image_saver.yaml old mode 100644 new mode 100755 diff --git a/conf/config.yaml b/conf/config.yaml old mode 100644 new mode 100755 diff --git a/conf/merged/gpu.merged.config.yaml b/conf/merged/gpu.merged.config.yaml old mode 100644 new mode 100755 diff --git a/conf/merged/merged.config.yaml b/conf/merged/merged.config.yaml old mode 100644 new mode 100755 diff --git a/conf/tensor.config.yaml b/conf/tensor.config.yaml old mode 100644 new mode 100755 diff --git a/conf/tests.config.1.yaml b/conf/tests.config.1.yaml old mode 100644 new mode 100755 diff --git a/conf/tests.config.2.yaml b/conf/tests.config.2.yaml old mode 100644 new mode 100755 diff --git a/conf/tests.config.3.yaml b/conf/tests.config.3.yaml old mode 100644 new mode 100755 diff --git a/conf/tests.config.4.yaml b/conf/tests.config.4.yaml old mode 100644 new mode 100755 diff --git a/conf/tests.config.5.yaml b/conf/tests.config.5.yaml old mode 100644 new mode 100755 diff --git a/data/facetorch-logo-42.png b/data/facetorch-logo-42.png old mode 100644 new mode 100755 diff --git a/data/facetorch-logo-64.png b/data/facetorch-logo-64.png old mode 100644 new mode 100755 diff --git a/data/input/tensor.pt b/data/input/tensor.pt old mode 100644 new mode 100755 diff --git a/data/input/test.jpg b/data/input/test.jpg old mode 100644 new mode 100755 diff --git a/data/input/test2.jpg b/data/input/test2.jpg old mode 100644 new mode 100755 diff --git a/data/input/test3.jpg b/data/input/test3.jpg old mode 100644 new mode 100755 diff --git a/data/input/test4.jpg b/data/input/test4.jpg old mode 100644 new mode 100755 diff --git a/data/input/test5.jpg b/data/input/test5.jpg old mode 100644 new mode 100755 diff --git a/data/output/test.png b/data/output/test.png old mode 100644 new mode 100755 diff --git a/data/output/test2.png b/data/output/test2.png old mode 100644 new mode 100755 diff --git a/data/output/test3.png b/data/output/test3.png old mode 100644 new mode 100755 diff --git a/data/output/test4.png b/data/output/test4.png old mode 100644 new mode 100755 diff --git a/data/output/test5.png b/data/output/test5.png old mode 100644 new mode 100755 diff --git a/data/output/test_tensor.png b/data/output/test_tensor.png old mode 100644 new mode 100755 diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml old mode 100644 new mode 100755 diff --git a/docker-compose.yml b/docker-compose.yml old mode 100644 new mode 100755 diff --git a/docker/Dockerfile b/docker/Dockerfile old mode 100644 new mode 100755 diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev old mode 100644 new mode 100755 diff --git a/docker/Dockerfile.dev.gpu b/docker/Dockerfile.dev.gpu old mode 100644 new mode 100755 diff --git a/docker/Dockerfile.gpu b/docker/Dockerfile.gpu old mode 100644 new mode 100755 diff --git a/docker/Dockerfile.lock b/docker/Dockerfile.lock old mode 100644 new mode 100755 diff --git a/docker/Dockerfile.tests b/docker/Dockerfile.tests old mode 100644 new mode 100755 diff --git a/docs/doc-search.html b/docs/doc-search.html index 0f0cf80..ff3beb0 100644 --- a/docs/doc-search.html +++ b/docs/doc-search.html @@ -4,8 +4,8 @@ Search - - + + - + + + + + + - - + +
@@ -22,204 +25,6 @@

Module facetorch.analyzer.core

-
- -Expand source code - -
from typing import Optional, Union
-
-import torch
-import numpy as np
-from codetiming import Timer
-from PIL import Image
-from facetorch.analyzer.predictor.core import FacePredictor
-from facetorch.datastruct import ImageData, Response
-from facetorch.logger import LoggerJsonFile
-from importlib.metadata import version
-from hydra.utils import instantiate
-from omegaconf import OmegaConf
-
-logger = LoggerJsonFile().logger
-
-
-class FaceAnalyzer(object):
-    @Timer(
-        "FaceAnalyzer.__init__", "{name}: {milliseconds:.2f} ms", logger=logger.debug
-    )
-    def __init__(self, cfg: OmegaConf):
-        """FaceAnalyzer is the main class that reads images, runs face detection, tensor unification and facial feature prediction.
-        It also draws bounding boxes and facial landmarks over the image.
-
-        The following components are used:
-
-        1. Reader - reads the image and returns an ImageData object containing the image tensor.
-        2. Detector - wrapper around a neural network that detects faces.
-        3. Unifier - processor that unifies sizes of all faces and normalizes them between 0 and 1.
-        4. Predictor dict - dict of wrappers around neural networks trained to analyze facial features.
-        5. Utilizer dict - dict of utilizer processors that can for example extract 3D face landmarks or draw boxes over the image.
-
-        Args:
-            cfg (OmegaConf): Config object with image reader, face detector, unifier and predictor configurations.
-
-        Attributes:
-            cfg (OmegaConf): Config object with image reader, face detector, unifier and predictor configurations.
-            reader (BaseReader): Reader object that reads the image and returns an ImageData object containing the image tensor.
-            detector (FaceDetector): FaceDetector object that wraps a neural network that detects faces.
-            unifier (FaceUnifier): FaceUnifier object that unifies sizes of all faces and normalizes them between 0 and 1.
-            predictors (Dict[str, FacePredictor]): Dict of FacePredictor objects that predict facial features. Key is the name of the predictor.
-            utilizers (Dict[str, FaceUtilizer]): Dict of FaceUtilizer objects that can extract 3D face landmarks, draw boxes over the image, etc. Key is the name of the utilizer.
-            logger (logging.Logger): Logger object that logs messages to the console or to a file.
-
-        """
-        self.cfg = cfg
-        self.logger = instantiate(self.cfg.logger).logger
-
-        self.logger.info("Initializing FaceAnalyzer")
-        self.logger.debug("Config", extra=self.cfg.__dict__["_content"])
-
-        self.logger.info("Initializing BaseReader")
-        self.reader = instantiate(self.cfg.reader)
-
-        self.logger.info("Initializing FaceDetector")
-        self.detector = instantiate(self.cfg.detector)
-
-        self.logger.info("Initializing FaceUnifier")
-        if "unifier" in self.cfg:
-            self.unifier = instantiate(self.cfg.unifier)
-        else:
-            self.unifier = None
-
-        self.logger.info("Initializing FacePredictor objects")
-        self.predictors = {}
-        if "predictor" in self.cfg:
-            for predictor_name in self.cfg.predictor:
-                self.logger.info(f"Initializing FacePredictor {predictor_name}")
-                self.predictors[predictor_name] = instantiate(
-                    self.cfg.predictor[predictor_name]
-                )
-
-        self.utilizers = {}
-        if "utilizer" in self.cfg:
-            self.logger.info("Initializing BaseUtilizer objects")
-            for utilizer_name in self.cfg.utilizer:
-                self.logger.info(f"Initializing BaseUtilizer {utilizer_name}")
-                self.utilizers[utilizer_name] = instantiate(
-                    self.cfg.utilizer[utilizer_name]
-                )
-
-    @Timer("FaceAnalyzer.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
-    def run(
-        self,
-        image_source: Optional[
-            Union[str, torch.Tensor, np.ndarray, bytes, Image.Image]
-        ] = None,
-        path_image: Optional[str] = None,
-        batch_size: int = 8,
-        fix_img_size: bool = False,
-        return_img_data: bool = False,
-        include_tensors: bool = False,
-        path_output: Optional[str] = None,
-        tensor: Optional[torch.Tensor] = None,
-    ) -> Union[Response, ImageData]:
-        """Reads image, detects faces, unifies the detected faces, predicts facial features
-         and returns analyzed data.
-
-        Args:
-            image_source (Optional[Union[str, torch.Tensor, np.ndarray, bytes, Image.Image]]): Input to be analyzed. If None, path_image or tensor must be provided. Default: None.
-            path_image (Optional[str]): Path to the image to be analyzed. If None, tensor must be provided. Default: None.
-            batch_size (int): Batch size for making predictions on the faces. Default is 8.
-            fix_img_size (bool): If True, resizes the image to the size specified in reader. Default is False.
-            return_img_data (bool): If True, returns all image data including tensors, otherwise only returns the faces. Default is False.
-            include_tensors (bool): If True, removes tensors from the returned data object. Default is False.
-            path_output (Optional[str]): Path where to save the image with detected faces. If None, the image is not saved. Default: None.
-            tensor (Optional[torch.Tensor]): Image tensor to be analyzed. If None, path_image must be provided. Default: None.
-
-        Returns:
-            Union[Response, ImageData]: If return_img_data is False, returns a Response object containing the faces and their facial features. If return_img_data is True, returns the entire ImageData object.
-
-        """
-
-        def _predict_batch(
-            data: ImageData, predictor: FacePredictor, predictor_name: str
-        ) -> ImageData:
-            n_faces = len(data.faces)
-
-            for face_indx_start in range(0, n_faces, batch_size):
-                face_indx_end = min(face_indx_start + batch_size, n_faces)
-
-                face_batch_tensor = torch.stack(
-                    [face.tensor for face in data.faces[face_indx_start:face_indx_end]]
-                )
-                preds = predictor.run(face_batch_tensor)
-                data.add_preds(preds, predictor_name, face_indx_start)
-
-            return data
-
-        self.logger.info("Running FaceAnalyzer")
-
-        if path_image is None and tensor is None and image_source is None:
-            raise ValueError("Either input, path_image or tensor must be provided.")
-
-        if image_source is not None:
-            self.logger.debug("Using image_source as input")
-            reader_input = image_source
-        elif path_image is not None:
-            self.logger.debug(
-                "Using path_image as input", extra={"path_image": path_image}
-            )
-            reader_input = path_image
-        else:
-            self.logger.debug("Using tensor as input")
-            reader_input = tensor
-
-        self.logger.info("Reading image", extra={"input": reader_input})
-        data = self.reader.run(reader_input, fix_img_size=fix_img_size)
-
-        path_output = None if path_output == "None" else path_output
-        data.path_output = path_output
-
-        try:
-            data.version = version("facetorch")
-        except Exception as e:
-            self.logger.warning("Could not get version number", extra={"error": e})
-
-        self.logger.info("Detecting faces")
-        data = self.detector.run(data)
-        n_faces = len(data.faces)
-        self.logger.info(f"Number of faces: {n_faces}")
-
-        if n_faces > 0 and self.unifier is not None:
-            self.logger.info("Unifying faces")
-            data = self.unifier.run(data)
-
-            self.logger.info("Predicting facial features")
-            for predictor_name, predictor in self.predictors.items():
-                self.logger.info(f"Running FacePredictor: {predictor_name}")
-                data = _predict_batch(data, predictor, predictor_name)
-
-            self.logger.info("Utilizing facial features")
-            for utilizer_name, utilizer in self.utilizers.items():
-                self.logger.info(f"Running BaseUtilizer: {utilizer_name}")
-                data = utilizer.run(data)
-        else:
-            if "save" in self.utilizers:
-                self.utilizers["save"].run(data)
-
-        if not include_tensors:
-            self.logger.debug(
-                "Removing tensors from response as include_tensors is False"
-            )
-            data.reset_tensors()
-
-        response = Response(faces=data.faces, version=data.version)
-
-        if return_img_data:
-            self.logger.debug("Returning image data object", extra=data.__dict__)
-            return data
-        else:
-            self.logger.debug("Returning response with faces", extra=response.__dict__)
-            return response
-
@@ -481,123 +286,6 @@

Returns

Union[Response, ImageData]
If return_img_data is False, returns a Response object containing the faces and their facial features. If return_img_data is True, returns the entire ImageData object.
-
- -Expand source code - -
@Timer("FaceAnalyzer.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
-def run(
-    self,
-    image_source: Optional[
-        Union[str, torch.Tensor, np.ndarray, bytes, Image.Image]
-    ] = None,
-    path_image: Optional[str] = None,
-    batch_size: int = 8,
-    fix_img_size: bool = False,
-    return_img_data: bool = False,
-    include_tensors: bool = False,
-    path_output: Optional[str] = None,
-    tensor: Optional[torch.Tensor] = None,
-) -> Union[Response, ImageData]:
-    """Reads image, detects faces, unifies the detected faces, predicts facial features
-     and returns analyzed data.
-
-    Args:
-        image_source (Optional[Union[str, torch.Tensor, np.ndarray, bytes, Image.Image]]): Input to be analyzed. If None, path_image or tensor must be provided. Default: None.
-        path_image (Optional[str]): Path to the image to be analyzed. If None, tensor must be provided. Default: None.
-        batch_size (int): Batch size for making predictions on the faces. Default is 8.
-        fix_img_size (bool): If True, resizes the image to the size specified in reader. Default is False.
-        return_img_data (bool): If True, returns all image data including tensors, otherwise only returns the faces. Default is False.
-        include_tensors (bool): If True, removes tensors from the returned data object. Default is False.
-        path_output (Optional[str]): Path where to save the image with detected faces. If None, the image is not saved. Default: None.
-        tensor (Optional[torch.Tensor]): Image tensor to be analyzed. If None, path_image must be provided. Default: None.
-
-    Returns:
-        Union[Response, ImageData]: If return_img_data is False, returns a Response object containing the faces and their facial features. If return_img_data is True, returns the entire ImageData object.
-
-    """
-
-    def _predict_batch(
-        data: ImageData, predictor: FacePredictor, predictor_name: str
-    ) -> ImageData:
-        n_faces = len(data.faces)
-
-        for face_indx_start in range(0, n_faces, batch_size):
-            face_indx_end = min(face_indx_start + batch_size, n_faces)
-
-            face_batch_tensor = torch.stack(
-                [face.tensor for face in data.faces[face_indx_start:face_indx_end]]
-            )
-            preds = predictor.run(face_batch_tensor)
-            data.add_preds(preds, predictor_name, face_indx_start)
-
-        return data
-
-    self.logger.info("Running FaceAnalyzer")
-
-    if path_image is None and tensor is None and image_source is None:
-        raise ValueError("Either input, path_image or tensor must be provided.")
-
-    if image_source is not None:
-        self.logger.debug("Using image_source as input")
-        reader_input = image_source
-    elif path_image is not None:
-        self.logger.debug(
-            "Using path_image as input", extra={"path_image": path_image}
-        )
-        reader_input = path_image
-    else:
-        self.logger.debug("Using tensor as input")
-        reader_input = tensor
-
-    self.logger.info("Reading image", extra={"input": reader_input})
-    data = self.reader.run(reader_input, fix_img_size=fix_img_size)
-
-    path_output = None if path_output == "None" else path_output
-    data.path_output = path_output
-
-    try:
-        data.version = version("facetorch")
-    except Exception as e:
-        self.logger.warning("Could not get version number", extra={"error": e})
-
-    self.logger.info("Detecting faces")
-    data = self.detector.run(data)
-    n_faces = len(data.faces)
-    self.logger.info(f"Number of faces: {n_faces}")
-
-    if n_faces > 0 and self.unifier is not None:
-        self.logger.info("Unifying faces")
-        data = self.unifier.run(data)
-
-        self.logger.info("Predicting facial features")
-        for predictor_name, predictor in self.predictors.items():
-            self.logger.info(f"Running FacePredictor: {predictor_name}")
-            data = _predict_batch(data, predictor, predictor_name)
-
-        self.logger.info("Utilizing facial features")
-        for utilizer_name, utilizer in self.utilizers.items():
-            self.logger.info(f"Running BaseUtilizer: {utilizer_name}")
-            data = utilizer.run(data)
-    else:
-        if "save" in self.utilizers:
-            self.utilizers["save"].run(data)
-
-    if not include_tensors:
-        self.logger.debug(
-            "Removing tensors from response as include_tensors is False"
-        )
-        data.reset_tensors()
-
-    response = Response(faces=data.faces, version=data.version)
-
-    if return_img_data:
-        self.logger.debug("Returning image data object", extra=data.__dict__)
-        return data
-    else:
-        self.logger.debug("Returning response with faces", extra=response.__dict__)
-        return response
-
@@ -651,7 +339,6 @@

Returns

}).setContent('').open(); } -

Index

@@ -675,7 +362,7 @@

-

Generated by pdoc 0.10.0.

+

Generated by pdoc 0.11.1.

- \ No newline at end of file + diff --git a/docs/facetorch/analyzer/detector/core.html b/docs/facetorch/analyzer/detector/core.html index 457ff6c..e2fca1a 100644 --- a/docs/facetorch/analyzer/detector/core.html +++ b/docs/facetorch/analyzer/detector/core.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.detector.core API documentation - - - - - - + + + + + + - - + +
@@ -22,64 +25,6 @@

Module facetorch.analyzer.detector.core

-
- -Expand source code - -
import torch
-from codetiming import Timer
-from facetorch.base import BaseDownloader, BaseModel
-from facetorch.datastruct import ImageData
-from facetorch.logger import LoggerJsonFile
-
-from .post import BaseDetPostProcessor
-from .pre import BaseDetPreProcessor
-
-logger = LoggerJsonFile().logger
-
-
-class FaceDetector(BaseModel):
-    @Timer(
-        "FaceDetector.__init__", "{name}: {milliseconds:.2f} ms", logger=logger.debug
-    )
-    def __init__(
-        self,
-        downloader: BaseDownloader,
-        device: torch.device,
-        preprocessor: BaseDetPreProcessor,
-        postprocessor: BaseDetPostProcessor,
-        **kwargs
-    ):
-        """FaceDetector is a wrapper around a neural network model that is trained to detect faces.
-
-        Args:
-            downloader (BaseDownloader): Downloader that downloads the model.
-            device (torch.device): Torch device cpu or cuda for the model.
-            preprocessor (BaseDetPreProcessor): Preprocessor that runs before the model.
-            postprocessor (BaseDetPostProcessor): Postprocessor that runs after the model.
-        """
-        self.__dict__.update(kwargs)
-        super().__init__(downloader, device)
-
-        self.preprocessor = preprocessor
-        self.postprocessor = postprocessor
-
-    @Timer("FaceDetector.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
-    def run(self, data: ImageData) -> ImageData:
-        """Detect all faces in the image.
-
-        Args:
-            ImageData: ImageData object containing the image tensor with values between 0 - 255 and shape (batch_size, channels, height, width).
-
-        Returns:
-            ImageData: Image data object with Detection tensors and detected Face objects.
-        """
-        data = self.preprocessor.run(data)
-        logits = self.inference(data.tensor)
-        data = self.postprocessor.run(data, logits)
-
-        return data
-
@@ -174,26 +119,6 @@

Returns

ImageData
Image data object with Detection tensors and detected Face objects.
-
- -Expand source code - -
@Timer("FaceDetector.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
-def run(self, data: ImageData) -> ImageData:
-    """Detect all faces in the image.
-
-    Args:
-        ImageData: ImageData object containing the image tensor with values between 0 - 255 and shape (batch_size, channels, height, width).
-
-    Returns:
-        ImageData: Image data object with Detection tensors and detected Face objects.
-    """
-    data = self.preprocessor.run(data)
-    logits = self.inference(data.tensor)
-    data = self.postprocessor.run(data, logits)
-
-    return data
-

Inherited members

@@ -256,7 +181,6 @@

Inherited members

}).setContent('').open(); } -

Index

    @@ -280,7 +204,7 @@

    -

    Generated by pdoc 0.10.0.

    +

    Generated by pdoc 0.11.1.

    - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/detector/index.html b/docs/facetorch/analyzer/detector/index.html index c104158..adce873 100644 --- a/docs/facetorch/analyzer/detector/index.html +++ b/docs/facetorch/analyzer/detector/index.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.detector API documentation - - - - - - + + + + + + - - + +
    @@ -22,15 +25,6 @@

    Module facetorch.analyzer.detector

    -
    - -Expand source code - -
    from .core import FaceDetector
    -
    -
    -__all__ = ["FaceDetector"]
    -

    Sub-modules

    @@ -140,26 +134,6 @@

    Returns

    ImageData
    Image data object with Detection tensors and detected Face objects.
    -
    - -Expand source code - -
    @Timer("FaceDetector.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
    -def run(self, data: ImageData) -> ImageData:
    -    """Detect all faces in the image.
    -
    -    Args:
    -        ImageData: ImageData object containing the image tensor with values between 0 - 255 and shape (batch_size, channels, height, width).
    -
    -    Returns:
    -        ImageData: Image data object with Detection tensors and detected Face objects.
    -    """
    -    data = self.preprocessor.run(data)
    -    logits = self.inference(data.tensor)
    -    data = self.postprocessor.run(data, logits)
    -
    -    return data
    -

    Inherited members

    @@ -222,7 +196,6 @@

    Inherited members

    }).setContent('').open(); } -

    Index

      @@ -253,7 +226,7 @@

      -

      Generated by pdoc 0.10.0.

      +

      Generated by pdoc 0.11.1.

      - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/detector/post.html b/docs/facetorch/analyzer/detector/post.html index cb67c87..b77f9d5 100644 --- a/docs/facetorch/analyzer/detector/post.html +++ b/docs/facetorch/analyzer/detector/post.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.detector.post API documentation - - - - - - + + + + + + - - + +
      @@ -22,319 +25,6 @@

      Module facetorch.analyzer.detector.post

      -
      - -Expand source code - -
      from abc import abstractmethod
      -from itertools import product as product
      -from math import ceil
      -from typing import List, Tuple, Union
      -
      -import torch
      -from codetiming import Timer
      -from facetorch.base import BaseProcessor
      -from facetorch.datastruct import Detection, Dimensions, Face, ImageData, Location
      -from facetorch.logger import LoggerJsonFile
      -from facetorch.utils import rgb2bgr
      -from torchvision import transforms
      -
      -logger = LoggerJsonFile().logger
      -
      -
      -class BaseDetPostProcessor(BaseProcessor):
      -    @Timer(
      -        "BaseDetPostProcessor.__init__",
      -        "{name}: {milliseconds:.2f} ms",
      -        logger=logger.debug,
      -    )
      -    def __init__(
      -        self,
      -        transform: transforms.Compose,
      -        device: torch.device,
      -        optimize_transform: bool,
      -    ):
      -        """Base class for detector post processors.
      -
      -        All detector post processors should subclass it.
      -        All subclass should overwrite:
      -
      -        - Methods:``run``, used for running the processing
      -
      -        Args:
      -            device (torch.device): Torch device cpu or cuda.
      -            transform (transforms.Compose): Transform compose object to be applied to the image.
      -            optimize_transform (bool): Whether to optimize the transform.
      -
      -        """
      -        super().__init__(transform, device, optimize_transform)
      -
      -    @abstractmethod
      -    def run(
      -        self, data: ImageData, logits: Union[torch.Tensor, Tuple[torch.Tensor]]
      -    ) -> ImageData:
      -        """Abstract method that runs the detector post processing functionality
      -        and returns the data object.
      -
      -        Args:
      -            data (ImageData): ImageData object containing the image tensor.
      -            logits (Union[torch.Tensor, Tuple[torch.Tensor]]): Output of the detector model.
      -
      -        Returns:
      -            ImageData: Image data object with Detection tensors and detected Face objects.
      -
      -
      -        """
      -
      -
      -class PriorBox:
      -    """
      -    PriorBox class for generating prior boxes.
      -
      -    Args:
      -        min_sizes (List[List[int]]): List of list of minimum sizes for each feature map.
      -        steps (List[int]): List of steps for each feature map.
      -        clip (bool): Whether to clip the prior boxes to the image boundaries.
      -    """
      -
      -    def __init__(self, min_sizes: List[List[int]], steps: List[int], clip: bool):
      -        self.min_sizes = [list(min_size) for min_size in min_sizes]
      -        self.steps = list(steps)
      -        self.clip = clip
      -
      -    def forward(self, dims: Dimensions) -> torch.Tensor:
      -        """Generate prior boxes for each feature map.
      -
      -        Args:
      -            dims (Dimensions): Dimensions of the image.
      -
      -        Returns:
      -            torch.Tensor: Tensor of prior boxes.
      -        """
      -        feature_maps = [
      -            [ceil(dims.height / step), ceil(dims.width / step)] for step in self.steps
      -        ]
      -        anchors = []
      -        for k, f in enumerate(feature_maps):
      -            min_sizes = self.min_sizes[k]
      -            for i, j in product(range(f[0]), range(f[1])):
      -                for min_size in min_sizes:
      -                    s_kx = min_size / dims.width
      -                    s_ky = min_size / dims.height
      -                    dense_cx = [x * self.steps[k] / dims.width for x in [j + 0.5]]
      -                    dense_cy = [y * self.steps[k] / dims.height for y in [i + 0.5]]
      -                    for cy, cx in product(dense_cy, dense_cx):
      -                        anchors.append([cx, cy, s_kx, s_ky])
      -
      -        output = torch.Tensor(anchors)
      -        if self.clip:
      -            output.clamp_(min=0, max=1)
      -        return output
      -
      -
      -class PostRetFace(BaseDetPostProcessor):
      -    @Timer("PostRetFace.__init__", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
      -    def __init__(
      -        self,
      -        transform: transforms.Compose,
      -        device: torch.device,
      -        optimize_transform: bool,
      -        confidence_threshold: float,
      -        top_k: int,
      -        nms_threshold: float,
      -        keep_top_k: int,
      -        score_threshold: float,
      -        prior_box: PriorBox,
      -        variance: List[float],
      -        reverse_colors: bool = False,
      -        expand_box_ratio: float = 0.0,
      -    ):
      -        """Initialize the detector postprocessor. Modified from https://github.com/biubug6/Pytorch_Retinaface.
      -
      -        Args:
      -            transform (Compose): Composed Torch transform object.
      -            device (torch.device): Torch device cpu or cuda.
      -            optimize_transform (bool): Whether to optimize the transform.
      -            confidence_threshold (float): Confidence threshold for face detection.
      -            top_k (int): Top K faces to keep before NMS.
      -            nms_threshold (float): NMS threshold.
      -            keep_top_k (int): Keep top K faces after NMS.
      -            score_threshold (float): Score threshold for face detection.
      -            prior_box (PriorBox): PriorBox object.
      -            variance (List[float]): Prior box variance.
      -            reverse_colors (bool): Whether to reverse the colors of the image tensor from RGB to BGR or vice versa. If False, the colors remain unchanged. Default: False.
      -            expand_box_ratio (float): Expand the box by this ratio. Default: 0.0.
      -        """
      -        super().__init__(transform, device, optimize_transform)
      -        self.confidence_threshold = confidence_threshold
      -        self.top_k = top_k
      -        self.nms_threshold = nms_threshold
      -        self.keep_top_k = keep_top_k
      -        self.score_threshold = score_threshold
      -        self.prior_box = prior_box
      -        self.variance = list(variance)
      -        self.reverse_colors = reverse_colors
      -        self.expand_box_ratio = expand_box_ratio
      -
      -    @Timer("PostRetFace.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
      -    def run(
      -        self,
      -        data: ImageData,
      -        logits: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]],
      -    ) -> ImageData:
      -        """Run the detector postprocessor.
      -
      -        Args:
      -            data (ImageData): ImageData object containing the image tensor.
      -            logits (Union[torch.Tensor, Tuple[torch.Tensor]]): Output of the detector model.
      -
      -        Returns:
      -            ImageData: Image data object with detection tensors and detected Face objects.
      -        """
      -        data.det = Detection(loc=logits[0], conf=logits[1], landmarks=logits[2])
      -
      -        if self.reverse_colors:
      -            data.tensor = rgb2bgr(data.tensor)
      -
      -        data = self._process_dets(data)
      -        data = self._extract_faces(data)
      -        return data
      -
      -    def _process_dets(self, data: ImageData) -> ImageData:
      -        """Compute the detections and add them to the data detector.
      -
      -        Args:
      -            data (ImageData): Image data with with locations and confidences from detector.
      -
      -        Returns:
      -            ImageData: Image data object with detections.
      -        """
      -
      -        def _decode(
      -            _loc: torch.Tensor, _priors: torch.Tensor, variances: List[float]
      -        ) -> torch.Tensor:
      -            _boxes = torch.cat(
      -                (
      -                    _priors[:, :2] + _loc[:, :2] * variances[0] * _priors[:, 2:],
      -                    _priors[:, 2:] * torch.exp(_loc[:, 2:] * variances[1]),
      -                ),
      -                1,
      -            )
      -            _boxes[:, :2] -= _boxes[:, 2:] / 2
      -            _boxes[:, 2:] += _boxes[:, :2]
      -            return _boxes
      -
      -        def _extract_boxes(_loc: torch.Tensor) -> torch.Tensor:
      -            priors = self.prior_box.forward(data.dims)
      -            priors = priors.to(self.device)
      -            prior_data = priors.data
      -            _boxes = _decode(_loc.data.squeeze(0), prior_data, self.variance)
      -            img_scale = torch.Tensor([data.dims.width, data.dims.height]).repeat(2)
      -            _boxes = _boxes * img_scale.to(self.device)
      -            return _boxes
      -
      -        def _nms(dets: torch.Tensor, thresh: float) -> torch.Tensor:
      -            """Non-maximum suppression."""
      -            x1 = dets[:, 0]
      -            y1 = dets[:, 1]
      -            x2 = dets[:, 2]
      -            y2 = dets[:, 3]
      -
      -            areas = (x2 - x1 + 1) * (y2 - y1 + 1)
      -            order = torch.arange(dets.shape[0], device=self.device)
      -
      -            zero_tensor = torch.tensor(0.0).to(self.device)
      -            keep = []
      -            while order.size()[0] > 0:
      -                i = order[0]
      -                keep.append(i)
      -                xx1 = torch.maximum(x1[i], x1[order[1:]])
      -                yy1 = torch.maximum(y1[i], y1[order[1:]])
      -                xx2 = torch.minimum(x2[i], x2[order[1:]])
      -                yy2 = torch.minimum(y2[i], y2[order[1:]])
      -
      -                w = torch.maximum(zero_tensor, xx2 - xx1 + 1)
      -                h = torch.maximum(zero_tensor, yy2 - yy1 + 1)
      -                inter = torch.multiply(w, h)
      -                ovr = inter / (areas[i] + areas[order[1:]] - inter)
      -
      -                inds = ovr <= thresh
      -                order = order[1:][inds]
      -
      -            if len(keep) > 0:
      -                keep = torch.stack(keep)
      -            else:
      -                keep = torch.tensor([])
      -
      -            return keep
      -
      -        def _extract_dets(_conf: torch.Tensor, _boxes: torch.Tensor) -> torch.Tensor:
      -            scores = _conf.squeeze(0).data[:, 1]
      -            # ignore low scores
      -            inds = scores > self.confidence_threshold
      -            _boxes = _boxes[inds]
      -            scores = scores[inds]
      -            # keep top-K before NMS
      -            order = torch.argsort(scores, descending=True)[: self.top_k]
      -            _boxes = _boxes[order]
      -            scores = scores[order]
      -            # do NMS
      -            _dets = torch.hstack((_boxes, scores.unsqueeze(1)))
      -            keep = _nms(_dets, self.nms_threshold)
      -
      -            if not keep.shape[0] == 0:
      -                _dets = _dets[keep, :]
      -                # keep top-K after NMS
      -                _dets = _dets[: self.keep_top_k, :]
      -                # keep dets with score > score_threshold
      -                _dets = _dets[_dets[:, 4] > self.score_threshold]
      -
      -            return _dets
      -
      -        data.det.boxes = _extract_boxes(data.det.loc)
      -        data.det.dets = _extract_dets(data.det.conf, data.det.boxes)
      -        return data
      -
      -    def _extract_faces(self, data: ImageData) -> ImageData:
      -        """Extracts the faces from the original image using the detections.
      -
      -        Args:
      -            data (ImageData): Image data with image tensor and detections.
      -
      -        Returns:
      -            ImageData: Image data object with extracted faces.
      -
      -        """
      -
      -        def _get_coordinates(_det: torch.Tensor) -> Location:
      -            _det = torch.round(_det).int()
      -            loc = Location(
      -                x1=int(_det[0]),
      -                y1=int(_det[1]),
      -                x2=int(_det[2]),
      -                y2=int(_det[3]),
      -            )
      -
      -            loc.expand(amount=self.expand_box_ratio)
      -            loc.form_square()
      -
      -            return loc
      -
      -        for indx, det in enumerate(data.det.dets):
      -            loc = _get_coordinates(det)
      -            face_tensor = data.tensor[0, :, loc.y1 : loc.y2, loc.x1 : loc.x2]
      -            dims = Dimensions(face_tensor.shape[-2], face_tensor.shape[-1])
      -            size_img = data.tensor.shape[-2] * data.tensor.shape[-1]
      -            size_ratio = (dims.height * dims.width) / size_img
      -
      -            if not any([dim == 0 for dim in face_tensor.shape]):
      -                face = Face(
      -                    indx=indx, loc=loc, tensor=face_tensor, dims=dims, ratio=size_ratio
      -                )
      -                data.faces.append(face)
      -
      -        return data
      -
      @@ -441,27 +131,6 @@

      Returns

      ImageData
      Image data object with Detection tensors and detected Face objects.
      -
      - -Expand source code - -
      @abstractmethod
      -def run(
      -    self, data: ImageData, logits: Union[torch.Tensor, Tuple[torch.Tensor]]
      -) -> ImageData:
      -    """Abstract method that runs the detector post processing functionality
      -    and returns the data object.
      -
      -    Args:
      -        data (ImageData): ImageData object containing the image tensor.
      -        logits (Union[torch.Tensor, Tuple[torch.Tensor]]): Output of the detector model.
      -
      -    Returns:
      -        ImageData: Image data object with Detection tensors and detected Face objects.
      -
      -
      -    """
      -

      Inherited members

      @@ -553,39 +222,6 @@

      Returns

      torch.Tensor
      Tensor of prior boxes.
      -
      - -Expand source code - -
      def forward(self, dims: Dimensions) -> torch.Tensor:
      -    """Generate prior boxes for each feature map.
      -
      -    Args:
      -        dims (Dimensions): Dimensions of the image.
      -
      -    Returns:
      -        torch.Tensor: Tensor of prior boxes.
      -    """
      -    feature_maps = [
      -        [ceil(dims.height / step), ceil(dims.width / step)] for step in self.steps
      -    ]
      -    anchors = []
      -    for k, f in enumerate(feature_maps):
      -        min_sizes = self.min_sizes[k]
      -        for i, j in product(range(f[0]), range(f[1])):
      -            for min_size in min_sizes:
      -                s_kx = min_size / dims.width
      -                s_ky = min_size / dims.height
      -                dense_cx = [x * self.steps[k] / dims.width for x in [j + 0.5]]
      -                dense_cy = [y * self.steps[k] / dims.height for y in [i + 0.5]]
      -                for cy, cx in product(dense_cy, dense_cx):
      -                    anchors.append([cx, cy, s_kx, s_ky])
      -
      -    output = torch.Tensor(anchors)
      -    if self.clip:
      -        output.clamp_(min=0, max=1)
      -    return output
      -
      @@ -853,34 +489,6 @@

      Returns

      ImageData
      Image data object with detection tensors and detected Face objects.
      -
      - -Expand source code - -
      @Timer("PostRetFace.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
      -def run(
      -    self,
      -    data: ImageData,
      -    logits: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]],
      -) -> ImageData:
      -    """Run the detector postprocessor.
      -
      -    Args:
      -        data (ImageData): ImageData object containing the image tensor.
      -        logits (Union[torch.Tensor, Tuple[torch.Tensor]]): Output of the detector model.
      -
      -    Returns:
      -        ImageData: Image data object with detection tensors and detected Face objects.
      -    """
      -    data.det = Detection(loc=logits[0], conf=logits[1], landmarks=logits[2])
      -
      -    if self.reverse_colors:
      -        data.tensor = rgb2bgr(data.tensor)
      -
      -    data = self._process_dets(data)
      -    data = self._extract_faces(data)
      -    return data
      -

      Inherited members

      @@ -942,7 +550,6 @@

      Inherited members

      }).setContent('').open(); } -

      Index

        @@ -978,7 +585,7 @@

        -

        Generated by pdoc 0.10.0.

        +

        Generated by pdoc 0.11.1.

        - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/detector/pre.html b/docs/facetorch/analyzer/detector/pre.html index 0e17f18..2db7ba4 100644 --- a/docs/facetorch/analyzer/detector/pre.html +++ b/docs/facetorch/analyzer/detector/pre.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.detector.pre API documentation - - - - - - + + + + + + - - + +
        @@ -22,111 +25,6 @@

        Module facetorch.analyzer.detector.pre

        -
        - -Expand source code - -
        from abc import abstractmethod
        -
        -import torch
        -from codetiming import Timer
        -from facetorch.base import BaseProcessor
        -from facetorch.datastruct import ImageData
        -from facetorch.logger import LoggerJsonFile
        -from facetorch.utils import rgb2bgr
        -from torchvision import transforms
        -
        -logger = LoggerJsonFile().logger
        -
        -
        -class BaseDetPreProcessor(BaseProcessor):
        -    @Timer(
        -        "BaseDetPreProcessor.__init__",
        -        "{name}: {milliseconds:.2f} ms",
        -        logger=logger.debug,
        -    )
        -    def __init__(
        -        self,
        -        transform: transforms.Compose,
        -        device: torch.device,
        -        optimize_transform: bool,
        -    ):
        -        """Base class for detector pre processors.
        -
        -        All detector pre processors should subclass it.
        -        All subclass should overwrite:
        -
        -        - Methods:``run``, used for running the processing
        -
        -        Args:
        -            device (torch.device): Torch device cpu or cuda.
        -            transform (transforms.Compose): Transform compose object to be applied to the image.
        -            optimize_transform (bool): Whether to optimize the transform.
        -
        -        """
        -        super().__init__(transform, device, optimize_transform)
        -
        -    @abstractmethod
        -    def run(self, data: ImageData) -> ImageData:
        -        """Abstract method that runs the detector pre processing functionality.
        -        Returns a batch of preprocessed face tensors.
        -
        -        Args:
        -            data (ImageData): ImageData object containing the image tensor.
        -
        -        Returns:
        -            ImageData: ImageData object containing the image tensor preprocessed for the detector.
        -
        -        """
        -
        -
        -class DetectorPreProcessor(BaseDetPreProcessor):
        -    @Timer(
        -        "DetectorPreProcessor.__init__",
        -        "{name}: {milliseconds:.2f} ms",
        -        logger=logger.debug,
        -    )
        -    def __init__(
        -        self,
        -        transform: transforms.Compose,
        -        device: torch.device,
        -        optimize_transform: bool,
        -        reverse_colors: bool,
        -    ):
        -        """Initialize the detector preprocessor.
        -
        -        Args:
        -            transform (Compose): Composed Torch transform object.
        -            device (torch.device): Torch device cpu or cuda.
        -            optimize_transform (bool): Whether to optimize the transform.
        -            reverse_colors (bool): Whether to reverse the colors of the image tensor from RGB to BGR or vice versa. If False, the colors remain unchanged.
        -
        -        """
        -        super().__init__(transform, device, optimize_transform)
        -        self.reverse_colors = reverse_colors
        -
        -    @Timer(
        -        "DetectorPreProcessor.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug
        -    )
        -    def run(self, data: ImageData) -> ImageData:
        -        """Run the detector preprocessor on the image tensor in BGR format and return the transformed image tensor.
        -
        -        Args:
        -            data (ImageData): ImageData object containing the image tensor.
        -
        -        Returns:
        -            ImageData: ImageData object containing the preprocessed image tensor.
        -        """
        -        if data.tensor.device != self.device:
        -            data.tensor = data.tensor.to(self.device)
        -
        -        data.tensor = self.transform(data.tensor)
        -
        -        if self.reverse_colors:
        -            data.tensor = rgb2bgr(data.tensor)
        -
        -        return data
        -
        @@ -227,23 +125,6 @@

        Returns

        ImageData
        ImageData object containing the image tensor preprocessed for the detector.
        -
        - -Expand source code - -
        @abstractmethod
        -def run(self, data: ImageData) -> ImageData:
        -    """Abstract method that runs the detector pre processing functionality.
        -    Returns a batch of preprocessed face tensors.
        -
        -    Args:
        -        data (ImageData): ImageData object containing the image tensor.
        -
        -    Returns:
        -        ImageData: ImageData object containing the image tensor preprocessed for the detector.
        -
        -    """
        -

        Inherited members

        @@ -345,32 +226,6 @@

        Returns

        ImageData
        ImageData object containing the preprocessed image tensor.
        -
        - -Expand source code - -
        @Timer(
        -    "DetectorPreProcessor.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug
        -)
        -def run(self, data: ImageData) -> ImageData:
        -    """Run the detector preprocessor on the image tensor in BGR format and return the transformed image tensor.
        -
        -    Args:
        -        data (ImageData): ImageData object containing the image tensor.
        -
        -    Returns:
        -        ImageData: ImageData object containing the preprocessed image tensor.
        -    """
        -    if data.tensor.device != self.device:
        -        data.tensor = data.tensor.to(self.device)
        -
        -    data.tensor = self.transform(data.tensor)
        -
        -    if self.reverse_colors:
        -        data.tensor = rgb2bgr(data.tensor)
        -
        -    return data
        -

        Inherited members

        @@ -432,7 +287,6 @@

        Inherited members

        }).setContent('').open(); } -

        Index

          @@ -462,7 +316,7 @@

          -

          Generated by pdoc 0.10.0.

          +

          Generated by pdoc 0.11.1.

          - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/index.html b/docs/facetorch/analyzer/index.html index 275ddc2..62128a8 100644 --- a/docs/facetorch/analyzer/index.html +++ b/docs/facetorch/analyzer/index.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer API documentation - - - - - - + + + + + + - - + +
          @@ -22,14 +25,6 @@

          Module facetorch.analyzer

          -
          - -Expand source code - -
          from .core import FaceAnalyzer
          -
          -__all__ = ["FaceAnalyzer"]
          -

          Sub-modules

          @@ -318,123 +313,6 @@

          Returns

          Union[Response, ImageData]
          If return_img_data is False, returns a Response object containing the faces and their facial features. If return_img_data is True, returns the entire ImageData object.
          -
          - -Expand source code - -
          @Timer("FaceAnalyzer.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
          -def run(
          -    self,
          -    image_source: Optional[
          -        Union[str, torch.Tensor, np.ndarray, bytes, Image.Image]
          -    ] = None,
          -    path_image: Optional[str] = None,
          -    batch_size: int = 8,
          -    fix_img_size: bool = False,
          -    return_img_data: bool = False,
          -    include_tensors: bool = False,
          -    path_output: Optional[str] = None,
          -    tensor: Optional[torch.Tensor] = None,
          -) -> Union[Response, ImageData]:
          -    """Reads image, detects faces, unifies the detected faces, predicts facial features
          -     and returns analyzed data.
          -
          -    Args:
          -        image_source (Optional[Union[str, torch.Tensor, np.ndarray, bytes, Image.Image]]): Input to be analyzed. If None, path_image or tensor must be provided. Default: None.
          -        path_image (Optional[str]): Path to the image to be analyzed. If None, tensor must be provided. Default: None.
          -        batch_size (int): Batch size for making predictions on the faces. Default is 8.
          -        fix_img_size (bool): If True, resizes the image to the size specified in reader. Default is False.
          -        return_img_data (bool): If True, returns all image data including tensors, otherwise only returns the faces. Default is False.
          -        include_tensors (bool): If True, removes tensors from the returned data object. Default is False.
          -        path_output (Optional[str]): Path where to save the image with detected faces. If None, the image is not saved. Default: None.
          -        tensor (Optional[torch.Tensor]): Image tensor to be analyzed. If None, path_image must be provided. Default: None.
          -
          -    Returns:
          -        Union[Response, ImageData]: If return_img_data is False, returns a Response object containing the faces and their facial features. If return_img_data is True, returns the entire ImageData object.
          -
          -    """
          -
          -    def _predict_batch(
          -        data: ImageData, predictor: FacePredictor, predictor_name: str
          -    ) -> ImageData:
          -        n_faces = len(data.faces)
          -
          -        for face_indx_start in range(0, n_faces, batch_size):
          -            face_indx_end = min(face_indx_start + batch_size, n_faces)
          -
          -            face_batch_tensor = torch.stack(
          -                [face.tensor for face in data.faces[face_indx_start:face_indx_end]]
          -            )
          -            preds = predictor.run(face_batch_tensor)
          -            data.add_preds(preds, predictor_name, face_indx_start)
          -
          -        return data
          -
          -    self.logger.info("Running FaceAnalyzer")
          -
          -    if path_image is None and tensor is None and image_source is None:
          -        raise ValueError("Either input, path_image or tensor must be provided.")
          -
          -    if image_source is not None:
          -        self.logger.debug("Using image_source as input")
          -        reader_input = image_source
          -    elif path_image is not None:
          -        self.logger.debug(
          -            "Using path_image as input", extra={"path_image": path_image}
          -        )
          -        reader_input = path_image
          -    else:
          -        self.logger.debug("Using tensor as input")
          -        reader_input = tensor
          -
          -    self.logger.info("Reading image", extra={"input": reader_input})
          -    data = self.reader.run(reader_input, fix_img_size=fix_img_size)
          -
          -    path_output = None if path_output == "None" else path_output
          -    data.path_output = path_output
          -
          -    try:
          -        data.version = version("facetorch")
          -    except Exception as e:
          -        self.logger.warning("Could not get version number", extra={"error": e})
          -
          -    self.logger.info("Detecting faces")
          -    data = self.detector.run(data)
          -    n_faces = len(data.faces)
          -    self.logger.info(f"Number of faces: {n_faces}")
          -
          -    if n_faces > 0 and self.unifier is not None:
          -        self.logger.info("Unifying faces")
          -        data = self.unifier.run(data)
          -
          -        self.logger.info("Predicting facial features")
          -        for predictor_name, predictor in self.predictors.items():
          -            self.logger.info(f"Running FacePredictor: {predictor_name}")
          -            data = _predict_batch(data, predictor, predictor_name)
          -
          -        self.logger.info("Utilizing facial features")
          -        for utilizer_name, utilizer in self.utilizers.items():
          -            self.logger.info(f"Running BaseUtilizer: {utilizer_name}")
          -            data = utilizer.run(data)
          -    else:
          -        if "save" in self.utilizers:
          -            self.utilizers["save"].run(data)
          -
          -    if not include_tensors:
          -        self.logger.debug(
          -            "Removing tensors from response as include_tensors is False"
          -        )
          -        data.reset_tensors()
          -
          -    response = Response(faces=data.faces, version=data.version)
          -
          -    if return_img_data:
          -        self.logger.debug("Returning image data object", extra=data.__dict__)
          -        return data
          -    else:
          -        self.logger.debug("Returning response with faces", extra=response.__dict__)
          -        return response
          -
          @@ -488,7 +366,6 @@

          Returns

          }).setContent('').open(); } -

          Index

            @@ -522,7 +399,7 @@

            -

            Generated by pdoc 0.10.0.

            +

            Generated by pdoc 0.11.1.

            - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/predictor/core.html b/docs/facetorch/analyzer/predictor/core.html index 2e3c553..b51040a 100644 --- a/docs/facetorch/analyzer/predictor/core.html +++ b/docs/facetorch/analyzer/predictor/core.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.predictor.core API documentation - - - - - - + + + + + + - - + +
            @@ -22,66 +25,6 @@

            Module facetorch.analyzer.predictor.core

            -
            - -Expand source code - -
            from typing import List
            -
            -import torch
            -from codetiming import Timer
            -from facetorch.base import BaseDownloader, BaseModel
            -from facetorch.datastruct import Prediction
            -from facetorch.logger import LoggerJsonFile
            -
            -from .post import BasePredPostProcessor
            -from .pre import BasePredPreProcessor
            -
            -logger = LoggerJsonFile().logger
            -
            -
            -class FacePredictor(BaseModel):
            -    @Timer(
            -        "FacePredictor.__init__", "{name}: {milliseconds:.2f} ms", logger=logger.debug
            -    )
            -    def __init__(
            -        self,
            -        downloader: BaseDownloader,
            -        device: torch.device,
            -        preprocessor: BasePredPreProcessor,
            -        postprocessor: BasePredPostProcessor,
            -        **kwargs
            -    ):
            -        """FacePredictor is a wrapper around a neural network model that is trained to predict facial features.
            -
            -        Args:
            -            downloader (BaseDownloader): Downloader that downloads the model.
            -            device (torch.device): Torch device cpu or cuda for the model.
            -            preprocessor (BasePredPostProcessor): Preprocessor that runs before the model.
            -            postprocessor (BasePredPostProcessor): Postprocessor that runs after the model.
            -        """
            -        self.__dict__.update(kwargs)
            -        super().__init__(downloader, device)
            -
            -        self.preprocessor = preprocessor
            -        self.postprocessor = postprocessor
            -
            -    @Timer("FacePredictor.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
            -    def run(self, faces: torch.Tensor) -> List[Prediction]:
            -        """Predicts facial features.
            -
            -        Args:
            -            faces (torch.Tensor): Torch tensor containing a batch of faces with values between 0-1 and shape (batch_size, channels, height, width).
            -
            -        Returns:
            -            (List[Prediction]): List of Prediction data objects. One for each face in the batch.
            -        """
            -        faces = self.preprocessor.run(faces)
            -        preds = self.inference(faces)
            -        preds_list = self.postprocessor.run(preds)
            -
            -        return preds_list
            -
            @@ -173,26 +116,6 @@

            Args

            Returns

            (List[Prediction]): List of Prediction data objects. One for each face in the batch.

            -
            - -Expand source code - -
            @Timer("FacePredictor.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
            -def run(self, faces: torch.Tensor) -> List[Prediction]:
            -    """Predicts facial features.
            -
            -    Args:
            -        faces (torch.Tensor): Torch tensor containing a batch of faces with values between 0-1 and shape (batch_size, channels, height, width).
            -
            -    Returns:
            -        (List[Prediction]): List of Prediction data objects. One for each face in the batch.
            -    """
            -    faces = self.preprocessor.run(faces)
            -    preds = self.inference(faces)
            -    preds_list = self.postprocessor.run(preds)
            -
            -    return preds_list
            -

            Inherited members

            @@ -255,7 +178,6 @@

            Inherited members

            }).setContent('').open(); } -

            Index

              @@ -279,7 +201,7 @@

              -

              Generated by pdoc 0.10.0.

              +

              Generated by pdoc 0.11.1.

              - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/predictor/index.html b/docs/facetorch/analyzer/predictor/index.html index 9e958df..913af84 100644 --- a/docs/facetorch/analyzer/predictor/index.html +++ b/docs/facetorch/analyzer/predictor/index.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.predictor API documentation - - - - - - + + + + + + - - + +
              @@ -22,14 +25,6 @@

              Module facetorch.analyzer.predictor

              -
              - -Expand source code - -
              from .core import FacePredictor
              -
              -__all__ = ["FacePredictor"]
              -

              Sub-modules

              @@ -136,26 +131,6 @@

              Args

              Returns

              (List[Prediction]): List of Prediction data objects. One for each face in the batch.

              -
              - -Expand source code - -
              @Timer("FacePredictor.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
              -def run(self, faces: torch.Tensor) -> List[Prediction]:
              -    """Predicts facial features.
              -
              -    Args:
              -        faces (torch.Tensor): Torch tensor containing a batch of faces with values between 0-1 and shape (batch_size, channels, height, width).
              -
              -    Returns:
              -        (List[Prediction]): List of Prediction data objects. One for each face in the batch.
              -    """
              -    faces = self.preprocessor.run(faces)
              -    preds = self.inference(faces)
              -    preds_list = self.postprocessor.run(preds)
              -
              -    return preds_list
              -

              Inherited members

              @@ -218,7 +193,6 @@

              Inherited members

              }).setContent('').open(); } -

              Index

                @@ -249,7 +223,7 @@

                -

                Generated by pdoc 0.10.0.

                +

                Generated by pdoc 0.11.1.

                - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/predictor/post.html b/docs/facetorch/analyzer/predictor/post.html index fc91e7a..6d7fbaa 100644 --- a/docs/facetorch/analyzer/predictor/post.html +++ b/docs/facetorch/analyzer/predictor/post.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.predictor.post API documentation - - - - - - + + + + + + - - + +
                @@ -22,326 +25,6 @@

                Module facetorch.analyzer.predictor.post

                -
                - -Expand source code - -
                from abc import abstractmethod
                -from typing import List, Optional, Tuple, Union
                -
                -import torch
                -from codetiming import Timer
                -from itertools import compress
                -from facetorch.base import BaseProcessor
                -from facetorch.datastruct import Prediction
                -from facetorch.logger import LoggerJsonFile
                -from torchvision import transforms
                -
                -logger = LoggerJsonFile().logger
                -
                -
                -class BasePredPostProcessor(BaseProcessor):
                -    @Timer(
                -        "BasePredPostProcessor.__init__",
                -        "{name}: {milliseconds:.2f} ms",
                -        logger=logger.debug,
                -    )
                -    def __init__(
                -        self,
                -        transform: transforms.Compose,
                -        device: torch.device,
                -        optimize_transform: bool,
                -        labels: List[str],
                -    ):
                -        """Base class for predictor post processors.
                -
                -        All predictor post processors should subclass it.
                -        All subclass should overwrite:
                -
                -        - Methods:``run``, used for running the processing
                -
                -        Args:
                -            device (torch.device): Torch device cpu or cuda.
                -            transform (transforms.Compose): Transform compose object to be applied to the image.
                -            optimize_transform (bool): Whether to optimize the transform.
                -            labels (List[str]): List of labels.
                -
                -        """
                -        super().__init__(transform, device, optimize_transform)
                -        self.labels = labels
                -
                -    def create_pred_list(
                -        self, preds: torch.Tensor, indices: List[int]
                -    ) -> List[Prediction]:
                -        """Create a list of predictions.
                -
                -        Args:
                -            preds (torch.Tensor): Tensor of predictions, shape (batch, _).
                -            indices (List[int]): List of label indices, one for each sample.
                -
                -        Returns:
                -            List[Prediction]: List of predictions.
                -
                -        """
                -        assert (
                -            len(indices) == preds.shape[0]
                -        ), "Predictions and indices must have the same length."
                -
                -        pred_labels = [self.labels[indx] for indx in indices]
                -
                -        pred_list = []
                -        for i, label in enumerate(pred_labels):
                -            pred = Prediction(label, preds[i])
                -            pred_list.append(pred)
                -        return pred_list
                -
                -    @abstractmethod
                -    def run(self, preds: Union[torch.Tensor, Tuple[torch.Tensor]]) -> List[Prediction]:
                -        """Abstract method that runs the predictor post processing functionality and returns a list of prediction data structures, one for each face in the batch.
                -
                -        Args:
                -            preds (Union[torch.Tensor, Tuple[torch.Tensor]]): Output of the predictor model.
                -
                -        Returns:
                -            List[Prediction]: List of predictions.
                -
                -        """
                -
                -
                -class PostArgMax(BasePredPostProcessor):
                -    @Timer("PostArgMax.__init__", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                -    def __init__(
                -        self,
                -        transform: transforms.Compose,
                -        device: torch.device,
                -        optimize_transform: bool,
                -        labels: List[str],
                -        dim: int,
                -    ):
                -        """Initialize the predictor postprocessor that runs argmax on the prediction tensor and returns a list of prediction data structures.
                -
                -        Args:
                -            transform (Compose): Composed Torch transform object.
                -            device (torch.device): Torch device cpu or cuda.
                -            optimize_transform (bool): Whether to optimize the transform using TorchScript.
                -            labels (List[str]): List of labels.
                -            dim (int): Axis along which to apply the argmax.
                -        """
                -        super().__init__(transform, device, optimize_transform, labels)
                -        self.dim = dim
                -
                -    @Timer("PostArgMax.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                -    def run(self, preds: torch.Tensor) -> List[Prediction]:
                -        """Post-processes the prediction tensor using argmax and returns a list of prediction data structures, one for each face.
                -
                -        Args:
                -            preds (torch.Tensor): Batch prediction tensor.
                -
                -        Returns:
                -            List[Prediction]: List of prediction data structures containing the predicted labels and confidence scores for each face in the batch.
                -        """
                -        indices = torch.argmax(preds, dim=self.dim).cpu().numpy().tolist()
                -        pred_list = self.create_pred_list(preds, indices)
                -
                -        return pred_list
                -
                -
                -class PostSigmoidBinary(BasePredPostProcessor):
                -    @Timer(
                -        "PostSigmoidBinary.__init__",
                -        "{name}: {milliseconds:.2f} ms",
                -        logger=logger.debug,
                -    )
                -    def __init__(
                -        self,
                -        transform: transforms.Compose,
                -        device: torch.device,
                -        optimize_transform: bool,
                -        labels: List[str],
                -        threshold: float = 0.5,
                -    ):
                -        """Initialize the predictor postprocessor that runs sigmoid on the prediction tensor and returns a list of prediction data structures.
                -
                -        Args:
                -            transform (Compose): Composed Torch transform object.
                -            device (torch.device): Torch device cpu or cuda.
                -            optimize_transform (bool): Whether to optimize the transform using TorchScript.
                -            labels (List[str]): List of labels.
                -            threshold (float): Probability threshold for positive class.
                -        """
                -        super().__init__(transform, device, optimize_transform, labels)
                -        self.threshold = threshold
                -
                -    @Timer(
                -        "PostSigmoidBinary.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug
                -    )
                -    def run(self, preds: torch.Tensor) -> List[Prediction]:
                -        """Post-processes the prediction tensor using argmax and returns a list of prediction data structures, one for each face.
                -
                -        Args:
                -            preds (torch.Tensor): Batch prediction tensor.
                -
                -        Returns:
                -            List[Prediction]: List of prediction data structures containing the predicted labelsand confidence scores for each face in the batch.
                -        """
                -        preds = torch.sigmoid(preds.squeeze(1))
                -        preds_thresh = preds.where(preds >= self.threshold, torch.zeros_like(preds))
                -        indices = torch.round(preds_thresh)
                -        indices = indices.cpu().numpy().astype(int).tolist()
                -        pred_list = self.create_pred_list(preds, indices)
                -
                -        return pred_list
                -
                -
                -class PostEmbedder(BasePredPostProcessor):
                -    def __init__(
                -        self,
                -        transform: transforms.Compose,
                -        device: torch.device,
                -        optimize_transform: bool,
                -        labels: List[str],
                -    ):
                -        """Initialize the predictor postprocessor that extracts the embedding from the prediction tensor and returns a list of prediction data structures.
                -
                -        Args:
                -            transform (Compose): Composed Torch transform object.
                -            device (torch.device): Torch device cpu or cuda.
                -            optimize_transform (bool): Whether to optimize the transform using TorchScript.
                -            labels (List[str]): List of labels.
                -        """
                -        super().__init__(transform, device, optimize_transform, labels)
                -
                -    @Timer("PostEmbedder.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                -    def run(self, preds: torch.Tensor) -> List[Prediction]:
                -        """Extracts the embedding from the prediction tensor and returns a list of prediction data structures, one for each face.
                -
                -        Args:
                -            preds (torch.Tensor): Batch prediction tensor.
                -
                -        Returns:
                -            List[Prediction]: List of prediction data structures containing the predicted embeddings.
                -        """
                -        if isinstance(preds, tuple):
                -            preds = preds[0]
                -
                -        indices = [0] * preds.shape[0]
                -        pred_list = self.create_pred_list(preds, indices)
                -
                -        return pred_list
                -
                -
                -class PostMultiLabel(BasePredPostProcessor):
                -    def __init__(
                -        self,
                -        transform: transforms.Compose,
                -        device: torch.device,
                -        optimize_transform: bool,
                -        labels: List[str],
                -        dim: int,
                -        threshold: float = 0.5,
                -    ):
                -        """Initialize the predictor postprocessor that extracts multiple labels from the confidence scores.
                -
                -        Args:
                -            transform (Compose): Composed Torch transform object.
                -            device (torch.device): Torch device cpu or cuda.
                -            optimize_transform (bool): Whether to optimize the transform using TorchScript.
                -            labels (List[str]): List of labels.
                -            dim (int): Axis along which to apply the softmax.
                -            threshold (float): Probability threshold for including a label. Only labels with a confidence score above the threshold are included. Defaults to 0.5.
                -        """
                -        super().__init__(transform, device, optimize_transform, labels)
                -        self.dim = dim
                -        self.threshold = threshold
                -
                -    @Timer("PostMultiLabel.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                -    def run(self, preds: torch.Tensor) -> List[Prediction]:
                -        """Extracts multiple labels and puts them in other[multi] predictions. The most likely label is put in the label field. Confidence scores are returned in the logits field.
                -
                -        Args:
                -            preds (torch.Tensor): Batch prediction tensor.
                -
                -        Returns:
                -            List[Prediction]: List of prediction data structures containing the most prevailing label, confidence scores, and multiple labels for each face.
                -        """
                -        if isinstance(preds, tuple):
                -            preds = preds[0]
                -
                -        indices = torch.argmax(preds, dim=self.dim).cpu().numpy().tolist()
                -
                -        pred_list = []
                -        for i in range(preds.shape[0]):
                -            preds_sample = preds[i]
                -            label_filter = (preds_sample > self.threshold).cpu().numpy().tolist()
                -            labels_true = list(compress(self.labels, label_filter))
                -            pred = Prediction(
                -                label=self.labels[indices[i]],
                -                logits=preds_sample,
                -                other={"multi": labels_true},
                -            )
                -            pred_list.append(pred)
                -
                -        return pred_list
                -
                -
                -class PostLabelConfidencePairs(BasePredPostProcessor):
                -    def __init__(
                -        self,
                -        transform: transforms.Compose,
                -        device: torch.device,
                -        optimize_transform: bool,
                -        labels: List[str],
                -        offsets: Optional[List[float]] = None,
                -    ):
                -        """Initialize the predictor postprocessor that zips the confidence scores with the labels.
                -
                -        Args:
                -            transform (Compose): Composed Torch transform object.
                -            device (torch.device): Torch device cpu or cuda.
                -            optimize_transform (bool): Whether to optimize the transform using TorchScript.
                -            labels (List[str]): List of labels.
                -            offsets (Optional[List[float]], optional): List of offsets to add to the confidence scores. Defaults to None.
                -        """
                -        super().__init__(transform, device, optimize_transform, labels)
                -
                -        if offsets is None:
                -            offsets = [0] * len(labels)
                -        self.offsets = offsets
                -
                -    @Timer(
                -        "PostLabelConfidencePairs.run",
                -        "{name}: {milliseconds:.2f} ms",
                -        logger=logger.debug,
                -    )
                -    def run(self, preds: torch.Tensor) -> List[Prediction]:
                -        """Extracts the confidence scores and puts them in other[label] predictions.
                -
                -        Args:
                -            preds (torch.Tensor): Batch prediction tensor.
                -
                -        Returns:
                -            List[Prediction]: List of prediction data structures containing the logits and label logit pairs.
                -        """
                -        if isinstance(preds, tuple):
                -            preds = preds[0]
                -
                -        pred_list = []
                -        for i in range(preds.shape[0]):
                -            preds_sample = preds[i]
                -            preds_sample_list = preds_sample.cpu().numpy().tolist()
                -            other_labels = {
                -                label: preds_sample_list[j] + self.offsets[j]
                -                for j, label in enumerate(self.labels)
                -            }
                -            pred = Prediction(
                -                label="other",
                -                logits=preds_sample,
                -                other=other_labels,
                -            )
                -            pred_list.append(pred)
                -
                -        return pred_list
                -
                @@ -476,35 +159,6 @@

                Returns

                List[Prediction]
                List of predictions.
                -
                - -Expand source code - -
                def create_pred_list(
                -    self, preds: torch.Tensor, indices: List[int]
                -) -> List[Prediction]:
                -    """Create a list of predictions.
                -
                -    Args:
                -        preds (torch.Tensor): Tensor of predictions, shape (batch, _).
                -        indices (List[int]): List of label indices, one for each sample.
                -
                -    Returns:
                -        List[Prediction]: List of predictions.
                -
                -    """
                -    assert (
                -        len(indices) == preds.shape[0]
                -    ), "Predictions and indices must have the same length."
                -
                -    pred_labels = [self.labels[indx] for indx in indices]
                -
                -    pred_list = []
                -    for i, label in enumerate(pred_labels):
                -        pred = Prediction(label, preds[i])
                -        pred_list.append(pred)
                -    return pred_list
                -
                def run(self, preds: Union[torch.Tensor, Tuple[torch.Tensor]]) ‑> List[Prediction] @@ -521,22 +175,6 @@

                Returns

                List[Prediction]
                List of predictions.
                -
                - -Expand source code - -
                @abstractmethod
                -def run(self, preds: Union[torch.Tensor, Tuple[torch.Tensor]]) -> List[Prediction]:
                -    """Abstract method that runs the predictor post processing functionality and returns a list of prediction data structures, one for each face in the batch.
                -
                -    Args:
                -        preds (Union[torch.Tensor, Tuple[torch.Tensor]]): Output of the predictor model.
                -
                -    Returns:
                -        List[Prediction]: List of predictions.
                -
                -    """
                -

                Inherited members

                @@ -630,25 +268,6 @@

                Returns

                List[Prediction]
                List of prediction data structures containing the predicted labels and confidence scores for each face in the batch.
                -
                - -Expand source code - -
                @Timer("PostArgMax.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                -def run(self, preds: torch.Tensor) -> List[Prediction]:
                -    """Post-processes the prediction tensor using argmax and returns a list of prediction data structures, one for each face.
                -
                -    Args:
                -        preds (torch.Tensor): Batch prediction tensor.
                -
                -    Returns:
                -        List[Prediction]: List of prediction data structures containing the predicted labels and confidence scores for each face in the batch.
                -    """
                -    indices = torch.argmax(preds, dim=self.dim).cpu().numpy().tolist()
                -    pred_list = self.create_pred_list(preds, indices)
                -
                -    return pred_list
                -

                Inherited members

                @@ -752,30 +371,6 @@

                Returns

                List[Prediction]
                List of prediction data structures containing the predicted labelsand confidence scores for each face in the batch.
                -
                - -Expand source code - -
                @Timer(
                -    "PostSigmoidBinary.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug
                -)
                -def run(self, preds: torch.Tensor) -> List[Prediction]:
                -    """Post-processes the prediction tensor using argmax and returns a list of prediction data structures, one for each face.
                -
                -    Args:
                -        preds (torch.Tensor): Batch prediction tensor.
                -
                -    Returns:
                -        List[Prediction]: List of prediction data structures containing the predicted labelsand confidence scores for each face in the batch.
                -    """
                -    preds = torch.sigmoid(preds.squeeze(1))
                -    preds_thresh = preds.where(preds >= self.threshold, torch.zeros_like(preds))
                -    indices = torch.round(preds_thresh)
                -    indices = indices.cpu().numpy().astype(int).tolist()
                -    pred_list = self.create_pred_list(preds, indices)
                -
                -    return pred_list
                -

                Inherited members

                @@ -867,28 +462,6 @@

                Returns

                List[Prediction]
                List of prediction data structures containing the predicted embeddings.
                -
                - -Expand source code - -
                @Timer("PostEmbedder.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                -def run(self, preds: torch.Tensor) -> List[Prediction]:
                -    """Extracts the embedding from the prediction tensor and returns a list of prediction data structures, one for each face.
                -
                -    Args:
                -        preds (torch.Tensor): Batch prediction tensor.
                -
                -    Returns:
                -        List[Prediction]: List of prediction data structures containing the predicted embeddings.
                -    """
                -    if isinstance(preds, tuple):
                -        preds = preds[0]
                -
                -    indices = [0] * preds.shape[0]
                -    pred_list = self.create_pred_list(preds, indices)
                -
                -    return pred_list
                -

                Inherited members

                @@ -1001,39 +574,6 @@

                Returns

                List[Prediction]
                List of prediction data structures containing the most prevailing label, confidence scores, and multiple labels for each face.
                -
                - -Expand source code - -
                @Timer("PostMultiLabel.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                -def run(self, preds: torch.Tensor) -> List[Prediction]:
                -    """Extracts multiple labels and puts them in other[multi] predictions. The most likely label is put in the label field. Confidence scores are returned in the logits field.
                -
                -    Args:
                -        preds (torch.Tensor): Batch prediction tensor.
                -
                -    Returns:
                -        List[Prediction]: List of prediction data structures containing the most prevailing label, confidence scores, and multiple labels for each face.
                -    """
                -    if isinstance(preds, tuple):
                -        preds = preds[0]
                -
                -    indices = torch.argmax(preds, dim=self.dim).cpu().numpy().tolist()
                -
                -    pred_list = []
                -    for i in range(preds.shape[0]):
                -        preds_sample = preds[i]
                -        label_filter = (preds_sample > self.threshold).cpu().numpy().tolist()
                -        labels_true = list(compress(self.labels, label_filter))
                -        pred = Prediction(
                -            label=self.labels[indices[i]],
                -            logits=preds_sample,
                -            other={"multi": labels_true},
                -        )
                -        pred_list.append(pred)
                -
                -    return pred_list
                -

                Inherited members

                @@ -1149,44 +689,6 @@

                Returns

                List[Prediction]
                List of prediction data structures containing the logits and label logit pairs.
                -
                - -Expand source code - -
                @Timer(
                -    "PostLabelConfidencePairs.run",
                -    "{name}: {milliseconds:.2f} ms",
                -    logger=logger.debug,
                -)
                -def run(self, preds: torch.Tensor) -> List[Prediction]:
                -    """Extracts the confidence scores and puts them in other[label] predictions.
                -
                -    Args:
                -        preds (torch.Tensor): Batch prediction tensor.
                -
                -    Returns:
                -        List[Prediction]: List of prediction data structures containing the logits and label logit pairs.
                -    """
                -    if isinstance(preds, tuple):
                -        preds = preds[0]
                -
                -    pred_list = []
                -    for i in range(preds.shape[0]):
                -        preds_sample = preds[i]
                -        preds_sample_list = preds_sample.cpu().numpy().tolist()
                -        other_labels = {
                -            label: preds_sample_list[j] + self.offsets[j]
                -            for j, label in enumerate(self.labels)
                -        }
                -        pred = Prediction(
                -            label="other",
                -            logits=preds_sample,
                -            other=other_labels,
                -        )
                -        pred_list.append(pred)
                -
                -    return pred_list
                -

                Inherited members

                @@ -1249,7 +751,6 @@

                Inherited members

                }).setContent('').open(); } -

                Index

                  @@ -1304,7 +805,7 @@

                  - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/predictor/pre.html b/docs/facetorch/analyzer/predictor/pre.html index f3e506d..2eaa960 100644 --- a/docs/facetorch/analyzer/predictor/pre.html +++ b/docs/facetorch/analyzer/predictor/pre.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.predictor.pre API documentation - - - - - - + + + + + + - - + +
                  @@ -22,103 +25,6 @@

                  Module facetorch.analyzer.predictor.pre

                  -
                  - -Expand source code - -
                  from abc import abstractmethod
                  -
                  -import torch
                  -from codetiming import Timer
                  -from facetorch.base import BaseProcessor
                  -from facetorch.logger import LoggerJsonFile
                  -from facetorch.utils import rgb2bgr
                  -from torchvision import transforms
                  -
                  -logger = LoggerJsonFile().logger
                  -
                  -
                  -class BasePredPreProcessor(BaseProcessor):
                  -    @Timer(
                  -        "BasePredPreProcessor.__init__",
                  -        "{name}: {milliseconds:.2f} ms",
                  -        logger=logger.debug,
                  -    )
                  -    def __init__(
                  -        self,
                  -        transform: transforms.Compose,
                  -        device: torch.device,
                  -        optimize_transform: bool,
                  -    ):
                  -        """Base class for predictor pre processors.
                  -
                  -        All predictor pre processors should subclass it.
                  -        All subclass should overwrite:
                  -
                  -        - Methods:``run``, used for running the processing
                  -
                  -        Args:
                  -            device (torch.device): Torch device cpu or cuda.
                  -            transform (transforms.Compose): Transform compose object to be applied to the  image.
                  -            optimize_transform (bool): Whether to optimize the transform.
                  -
                  -        """
                  -        super().__init__(transform, device, optimize_transform)
                  -
                  -    @abstractmethod
                  -    def run(self, faces: torch.Tensor) -> torch.Tensor:
                  -        """Abstract method that runs the predictor pre processing functionality and returns a batch of preprocessed face tensors.
                  -
                  -        Args:
                  -            faces (torch.Tensor): Batch of face tensors with shape (batch, channels, height, width).
                  -
                  -        Returns:
                  -            torch.Tensor: Batch of preprocessed face tensors with shape (batch, channels, height, width).
                  -
                  -        """
                  -
                  -
                  -class PredictorPreProcessor(BasePredPreProcessor):
                  -    def __init__(
                  -        self,
                  -        transform: transforms.Compose,
                  -        device: torch.device,
                  -        optimize_transform: bool,
                  -        reverse_colors: bool = False,
                  -    ):
                  -        """Torch transform based pre-processor that is applied to face tensors before they are passed to the predictor model.
                  -
                  -        Args:
                  -            transform (transforms.Compose): Composed Torch transform object.
                  -            device (torch.device): Torch device cpu or cuda.
                  -            optimize_transform (bool): Whether to optimize the transform.
                  -            reverse_colors (bool): Whether to reverse the colors of the image tensor
                  -        """
                  -        super().__init__(transform, device, optimize_transform)
                  -        self.reverse_colors = reverse_colors
                  -
                  -    @Timer(
                  -        "PredictorPreProcessor.run",
                  -        "{name}: {milliseconds:.2f} ms",
                  -        logger=logger.debug,
                  -    )
                  -    def run(self, faces: torch.Tensor) -> torch.Tensor:
                  -        """Runs the trasform on a batch of face tensors.
                  -
                  -        Args:
                  -            faces (torch.Tensor): Batch of face tensors.
                  -
                  -        Returns:
                  -            torch.Tensor: Batch of preprocessed face tensors.
                  -        """
                  -        if faces.device != self.device:
                  -            faces = faces.to(self.device)
                  -
                  -        faces = self.transform(faces)
                  -        if self.reverse_colors:
                  -            faces = rgb2bgr(faces)
                  -        return faces
                  -
                  @@ -218,22 +124,6 @@

                  Returns

                  torch.Tensor
                  Batch of preprocessed face tensors with shape (batch, channels, height, width).
                  -
                  - -Expand source code - -
                  @abstractmethod
                  -def run(self, faces: torch.Tensor) -> torch.Tensor:
                  -    """Abstract method that runs the predictor pre processing functionality and returns a batch of preprocessed face tensors.
                  -
                  -    Args:
                  -        faces (torch.Tensor): Batch of face tensors with shape (batch, channels, height, width).
                  -
                  -    Returns:
                  -        torch.Tensor: Batch of preprocessed face tensors with shape (batch, channels, height, width).
                  -
                  -    """
                  -

                  Inherited members

                  @@ -329,32 +219,6 @@

                  Returns

                  torch.Tensor
                  Batch of preprocessed face tensors.
                  -
                  - -Expand source code - -
                  @Timer(
                  -    "PredictorPreProcessor.run",
                  -    "{name}: {milliseconds:.2f} ms",
                  -    logger=logger.debug,
                  -)
                  -def run(self, faces: torch.Tensor) -> torch.Tensor:
                  -    """Runs the trasform on a batch of face tensors.
                  -
                  -    Args:
                  -        faces (torch.Tensor): Batch of face tensors.
                  -
                  -    Returns:
                  -        torch.Tensor: Batch of preprocessed face tensors.
                  -    """
                  -    if faces.device != self.device:
                  -        faces = faces.to(self.device)
                  -
                  -    faces = self.transform(faces)
                  -    if self.reverse_colors:
                  -        faces = rgb2bgr(faces)
                  -    return faces
                  -

                  Inherited members

                  @@ -416,7 +280,6 @@

                  Inherited members

                  }).setContent('').open(); } -

                  Index

                    @@ -446,7 +309,7 @@

                    - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/reader/core.html b/docs/facetorch/analyzer/reader/core.html index 549505f..197a44b 100644 --- a/docs/facetorch/analyzer/reader/core.html +++ b/docs/facetorch/analyzer/reader/core.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.reader.core API documentation - - - - - - + + + + + + - - + +
                    @@ -22,194 +25,6 @@

                    Module facetorch.analyzer.reader.core

                    -
                    - -Expand source code - -
                    import io
                    -import requests
                    -from PIL import Image
                    -import numpy as np
                    -import torch
                    -import torchvision
                    -from codetiming import Timer
                    -from typing import Union
                    -from facetorch.base import BaseReader
                    -from facetorch.datastruct import ImageData
                    -from facetorch.logger import LoggerJsonFile
                    -
                    -logger = LoggerJsonFile().logger
                    -
                    -
                    -class UniversalReader(BaseReader):
                    -    def __init__(
                    -        self,
                    -        transform: torchvision.transforms.Compose,
                    -        device: torch.device,
                    -        optimize_transform: bool,
                    -    ):
                    -        """UniversalReader can read images from a path, URL, tensor, numpy array, bytes or PIL Image and return an ImageData object containing the image tensor.
                    -
                    -        Args:
                    -            transform (torchvision.transforms.Compose): Transform compose object to be applied to the image, if fix_image_size is True.
                    -            device (torch.device): Torch device cpu or cuda object.
                    -            optimize_transform (bool): Whether to optimize the transforms that are: resizing the image to a fixed size.
                    -
                    -        """
                    -        super().__init__(transform, device, optimize_transform)
                    -
                    -    @Timer("UniversalReader.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                    -    def run(
                    -        self,
                    -        image_source: Union[str, torch.Tensor, np.ndarray, bytes, Image.Image],
                    -        fix_img_size: bool = False,
                    -    ) -> ImageData:
                    -        """Reads an image from a path, URL, tensor, numpy array, bytes or PIL Image and returns a tensor of the image with values between 0-255 and shape (batch, channels, height, width). The order of color channels is RGB. PyTorch and Torchvision are used to read the image.
                    -
                    -        Args:
                    -            image_source (Union[str, torch.Tensor, np.ndarray, bytes, Image.Image]): Image source to be read.
                    -            fix_img_size (bool): Whether to resize the image to a fixed size. If False, the size_portrait and size_landscape are ignored. Default is False.
                    -
                    -        Returns:
                    -            ImageData: ImageData object with image tensor and pil Image.
                    -        """
                    -        if isinstance(image_source, str):
                    -            if image_source.startswith("http"):
                    -                return self.read_image_from_url(image_source, fix_img_size)
                    -            else:
                    -                return self.read_image_from_path(image_source, fix_img_size)
                    -        elif isinstance(image_source, torch.Tensor):
                    -            return self.read_tensor(image_source, fix_img_size)
                    -        elif isinstance(image_source, np.ndarray):
                    -            return self.read_numpy_array(image_source, fix_img_size)
                    -        elif isinstance(image_source, bytes):
                    -            return self.read_image_from_bytes(image_source, fix_img_size)
                    -        elif isinstance(image_source, Image.Image):
                    -            return self.read_pil_image(image_source, fix_img_size)
                    -        else:
                    -            raise ValueError("Unsupported data type")
                    -
                    -    def read_tensor(self, tensor: torch.Tensor, fix_img_size: bool) -> ImageData:
                    -        return self.process_tensor(tensor, fix_img_size)
                    -
                    -    def read_pil_image(self, pil_image: Image.Image, fix_img_size: bool) -> ImageData:
                    -        tensor = torchvision.transforms.functional.to_tensor(pil_image)
                    -        return self.process_tensor(tensor, fix_img_size)
                    -
                    -    def read_numpy_array(self, array: np.ndarray, fix_img_size: bool) -> ImageData:
                    -        pil_image = Image.fromarray(array, mode="RGB")
                    -        return self.read_pil_image(pil_image, fix_img_size)
                    -
                    -    def read_image_from_bytes(
                    -        self, image_bytes: bytes, fix_img_size: bool
                    -    ) -> ImageData:
                    -        pil_image = Image.open(io.BytesIO(image_bytes))
                    -        return self.read_pil_image(pil_image, fix_img_size)
                    -
                    -    def read_image_from_path(self, path_image: str, fix_img_size: bool) -> ImageData:
                    -        try:
                    -            image_tensor = torchvision.io.read_image(path_image)
                    -        except Exception as e:
                    -            logger.error(f"Failed to read image from path {path_image}: {e}")
                    -            raise ValueError(f"Could not read image from path {path_image}: {e}") from e
                    -
                    -        return self.process_tensor(image_tensor, fix_img_size)
                    -
                    -    def read_image_from_url(self, url: str, fix_img_size: bool) -> ImageData:
                    -        try:
                    -            response = requests.get(url, timeout=10)
                    -            response.raise_for_status()
                    -        except requests.RequestException as e:
                    -            logger.error(f"Failed to fetch image from URL {url}: {e}")
                    -            raise ValueError(f"Could not fetch image from URL {url}: {e}") from e
                    -
                    -        image_bytes = response.content
                    -        return self.read_image_from_bytes(image_bytes, fix_img_size)
                    -
                    -
                    -class ImageReader(BaseReader):
                    -    def __init__(
                    -        self,
                    -        transform: torchvision.transforms.Compose,
                    -        device: torch.device,
                    -        optimize_transform: bool,
                    -    ):
                    -        """ImageReader is a wrapper around a functionality for reading images by Torchvision.
                    -
                    -        Args:
                    -            transform (torchvision.transforms.Compose): Transform compose object to be applied to the image, if fix_image_size is True.
                    -            device (torch.device): Torch device cpu or cuda object.
                    -            optimize_transform (bool): Whether to optimize the transforms that are: resizing the image to a fixed size.
                    -
                    -        """
                    -        super().__init__(
                    -            transform,
                    -            device,
                    -            optimize_transform,
                    -        )
                    -
                    -    @Timer("ImageReader.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                    -    def run(self, path_image: str, fix_img_size: bool = False) -> ImageData:
                    -        """Reads an image from a path and returns a tensor of the image with values between 0-255 and shape (batch, channels, height, width). The order of color channels is RGB. PyTorch and Torchvision are used to read the image.
                    -
                    -        Args:
                    -            path_image (str): Path to the image.
                    -            fix_img_size (bool): Whether to resize the image to a fixed size. If False, the size_portrait and size_landscape are ignored. Default is False.
                    -
                    -        Returns:
                    -            ImageData: ImageData object with image tensor and pil Image.
                    -        """
                    -        data = ImageData(path_input=path_image)
                    -        data.img = torchvision.io.read_image(
                    -            data.path_input, mode=torchvision.io.ImageReadMode.RGB
                    -        )
                    -        data.img = data.img.unsqueeze(0)
                    -        data.img = data.img.to(self.device)
                    -
                    -        if fix_img_size:
                    -            data.img = self.transform(data.img)
                    -
                    -        data.tensor = data.img.type(torch.float32)
                    -        data.img = data.img.squeeze(0).cpu()
                    -        data.set_dims()
                    -
                    -        return data
                    -
                    -
                    -class TensorReader(BaseReader):
                    -    def __init__(
                    -        self,
                    -        transform: torchvision.transforms.Compose,
                    -        device: torch.device,
                    -        optimize_transform: bool,
                    -    ):
                    -        """TensorReader is a wrapper around a functionality for reading tensors by Torchvision.
                    -
                    -        Args:
                    -            transform (torchvision.transforms.Compose): Transform compose object to be applied to the image, if fix_image_size is True.
                    -            device (torch.device): Torch device cpu or cuda object.
                    -            optimize_transform (bool): Whether to optimize the transforms that are: resizing the image to a fixed size.
                    -
                    -        """
                    -        super().__init__(
                    -            transform,
                    -            device,
                    -            optimize_transform,
                    -        )
                    -
                    -    @Timer("TensorReader.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                    -    def run(self, tensor: torch.Tensor, fix_img_size: bool = False) -> ImageData:
                    -        """Reads a tensor and returns a tensor of the image with values between 0-255 and shape (batch, channels, height, width). The order of color channels is RGB. PyTorch and Torchvision are used to read the image.
                    -
                    -        Args:
                    -            tensor (torch.Tensor): Tensor of a single image with RGB values between 0-255 and shape (channels, height, width).
                    -            fix_img_size (bool): Whether to resize the image to a fixed size. If False, the size_portrait and size_landscape are ignored. Default is False.
                    -
                    -        Returns:
                    -            ImageData: ImageData object with image tensor and pil Image.
                    -        """
                    -        return self.process_tensor(tensor, fix_img_size)
                    -
                    @@ -291,12 +106,18 @@

                    Args

                    return self.process_tensor(tensor, fix_img_size) def read_pil_image(self, pil_image: Image.Image, fix_img_size: bool) -> ImageData: - tensor = torchvision.transforms.functional.to_tensor(pil_image) + if pil_image.mode != "RGB": + pil_image = pil_image.convert("RGB") + tensor = torchvision.transforms.functional.pil_to_tensor(pil_image) return self.process_tensor(tensor, fix_img_size) def read_numpy_array(self, array: np.ndarray, fix_img_size: bool) -> ImageData: - pil_image = Image.fromarray(array, mode="RGB") - return self.read_pil_image(pil_image, fix_img_size) + image_tensor = torch.from_numpy(array).float() + if image_tensor.ndim == 3 and image_tensor.shape[2] == 3: + image_tensor = image_tensor.permute(2, 0, 1).contiguous() + else: + raise ValueError(f"Unsupported numpy array shape: {image_tensor.shape}") + return self.process_tensor(image_tensor, fix_img_size) def read_image_from_bytes( self, image_bytes: bytes, fix_img_size: bool @@ -348,138 +169,42 @@

                    Returns

                    ImageData
                    ImageData object with image tensor and pil Image.
                    -
                    - -Expand source code - -
                    @Timer("UniversalReader.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                    -def run(
                    -    self,
                    -    image_source: Union[str, torch.Tensor, np.ndarray, bytes, Image.Image],
                    -    fix_img_size: bool = False,
                    -) -> ImageData:
                    -    """Reads an image from a path, URL, tensor, numpy array, bytes or PIL Image and returns a tensor of the image with values between 0-255 and shape (batch, channels, height, width). The order of color channels is RGB. PyTorch and Torchvision are used to read the image.
                    -
                    -    Args:
                    -        image_source (Union[str, torch.Tensor, np.ndarray, bytes, Image.Image]): Image source to be read.
                    -        fix_img_size (bool): Whether to resize the image to a fixed size. If False, the size_portrait and size_landscape are ignored. Default is False.
                    -
                    -    Returns:
                    -        ImageData: ImageData object with image tensor and pil Image.
                    -    """
                    -    if isinstance(image_source, str):
                    -        if image_source.startswith("http"):
                    -            return self.read_image_from_url(image_source, fix_img_size)
                    -        else:
                    -            return self.read_image_from_path(image_source, fix_img_size)
                    -    elif isinstance(image_source, torch.Tensor):
                    -        return self.read_tensor(image_source, fix_img_size)
                    -    elif isinstance(image_source, np.ndarray):
                    -        return self.read_numpy_array(image_source, fix_img_size)
                    -    elif isinstance(image_source, bytes):
                    -        return self.read_image_from_bytes(image_source, fix_img_size)
                    -    elif isinstance(image_source, Image.Image):
                    -        return self.read_pil_image(image_source, fix_img_size)
                    -    else:
                    -        raise ValueError("Unsupported data type")
                    -
                    def read_tensor(self, tensor: torch.Tensor, fix_img_size: bool) ‑> ImageData
                    -
                    - -Expand source code - -
                    def read_tensor(self, tensor: torch.Tensor, fix_img_size: bool) -> ImageData:
                    -    return self.process_tensor(tensor, fix_img_size)
                    -
                    def read_pil_image(self, pil_image: PIL.Image.Image, fix_img_size: bool) ‑> ImageData
                    -
                    - -Expand source code - -
                    def read_pil_image(self, pil_image: Image.Image, fix_img_size: bool) -> ImageData:
                    -    tensor = torchvision.transforms.functional.to_tensor(pil_image)
                    -    return self.process_tensor(tensor, fix_img_size)
                    -
                    def read_numpy_array(self, array: numpy.ndarray, fix_img_size: bool) ‑> ImageData
                    -
                    - -Expand source code - -
                    def read_numpy_array(self, array: np.ndarray, fix_img_size: bool) -> ImageData:
                    -    pil_image = Image.fromarray(array, mode="RGB")
                    -    return self.read_pil_image(pil_image, fix_img_size)
                    -
                    def read_image_from_bytes(self, image_bytes: bytes, fix_img_size: bool) ‑> ImageData
                    -
                    - -Expand source code - -
                    def read_image_from_bytes(
                    -    self, image_bytes: bytes, fix_img_size: bool
                    -) -> ImageData:
                    -    pil_image = Image.open(io.BytesIO(image_bytes))
                    -    return self.read_pil_image(pil_image, fix_img_size)
                    -
                    def read_image_from_path(self, path_image: str, fix_img_size: bool) ‑> ImageData
                    -
                    - -Expand source code - -
                    def read_image_from_path(self, path_image: str, fix_img_size: bool) -> ImageData:
                    -    try:
                    -        image_tensor = torchvision.io.read_image(path_image)
                    -    except Exception as e:
                    -        logger.error(f"Failed to read image from path {path_image}: {e}")
                    -        raise ValueError(f"Could not read image from path {path_image}: {e}") from e
                    -
                    -    return self.process_tensor(image_tensor, fix_img_size)
                    -
                    def read_image_from_url(self, url: str, fix_img_size: bool) ‑> ImageData
                    -
                    - -Expand source code - -
                    def read_image_from_url(self, url: str, fix_img_size: bool) -> ImageData:
                    -    try:
                    -        response = requests.get(url, timeout=10)
                    -        response.raise_for_status()
                    -    except requests.RequestException as e:
                    -        logger.error(f"Failed to fetch image from URL {url}: {e}")
                    -        raise ValueError(f"Could not fetch image from URL {url}: {e}") from e
                    -
                    -    image_bytes = response.content
                    -    return self.read_image_from_bytes(image_bytes, fix_img_size)
                    -

                    Inherited members

                    @@ -583,37 +308,6 @@

                    Returns

                    ImageData
                    ImageData object with image tensor and pil Image.
                    -
                    - -Expand source code - -
                    @Timer("ImageReader.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                    -def run(self, path_image: str, fix_img_size: bool = False) -> ImageData:
                    -    """Reads an image from a path and returns a tensor of the image with values between 0-255 and shape (batch, channels, height, width). The order of color channels is RGB. PyTorch and Torchvision are used to read the image.
                    -
                    -    Args:
                    -        path_image (str): Path to the image.
                    -        fix_img_size (bool): Whether to resize the image to a fixed size. If False, the size_portrait and size_landscape are ignored. Default is False.
                    -
                    -    Returns:
                    -        ImageData: ImageData object with image tensor and pil Image.
                    -    """
                    -    data = ImageData(path_input=path_image)
                    -    data.img = torchvision.io.read_image(
                    -        data.path_input, mode=torchvision.io.ImageReadMode.RGB
                    -    )
                    -    data.img = data.img.unsqueeze(0)
                    -    data.img = data.img.to(self.device)
                    -
                    -    if fix_img_size:
                    -        data.img = self.transform(data.img)
                    -
                    -    data.tensor = data.img.type(torch.float32)
                    -    data.img = data.img.squeeze(0).cpu()
                    -    data.set_dims()
                    -
                    -    return data
                    -

                    Inherited members

                    @@ -703,23 +397,6 @@

                    Returns

                    ImageData
                    ImageData object with image tensor and pil Image.
                    -
                    - -Expand source code - -
                    @Timer("TensorReader.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                    -def run(self, tensor: torch.Tensor, fix_img_size: bool = False) -> ImageData:
                    -    """Reads a tensor and returns a tensor of the image with values between 0-255 and shape (batch, channels, height, width). The order of color channels is RGB. PyTorch and Torchvision are used to read the image.
                    -
                    -    Args:
                    -        tensor (torch.Tensor): Tensor of a single image with RGB values between 0-255 and shape (channels, height, width).
                    -        fix_img_size (bool): Whether to resize the image to a fixed size. If False, the size_portrait and size_landscape are ignored. Default is False.
                    -
                    -    Returns:
                    -        ImageData: ImageData object with image tensor and pil Image.
                    -    """
                    -    return self.process_tensor(tensor, fix_img_size)
                    -

                    Inherited members

                    @@ -782,7 +459,6 @@

                    Inherited members

                    }).setContent('').open(); } -

                    Index

                      @@ -824,7 +500,7 @@

                      -

                      Generated by pdoc 0.10.0.

                      +

                      Generated by pdoc 0.11.1.

                      - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/reader/index.html b/docs/facetorch/analyzer/reader/index.html index 0fc2a3c..22f50b6 100644 --- a/docs/facetorch/analyzer/reader/index.html +++ b/docs/facetorch/analyzer/reader/index.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.reader API documentation - - - - - - + + + + + + - - + +
                      @@ -22,14 +25,6 @@

                      Module facetorch.analyzer.reader

                      -
                      - -Expand source code - -
                      from .core import ImageReader, TensorReader, UniversalReader
                      -
                      -__all__ = ["ImageReader", "TensorReader", "UniversalReader"]
                      -

                      Sub-modules

                      @@ -138,37 +133,6 @@

                      Returns

                      ImageData
                      ImageData object with image tensor and pil Image.
                      -
                      - -Expand source code - -
                      @Timer("ImageReader.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                      -def run(self, path_image: str, fix_img_size: bool = False) -> ImageData:
                      -    """Reads an image from a path and returns a tensor of the image with values between 0-255 and shape (batch, channels, height, width). The order of color channels is RGB. PyTorch and Torchvision are used to read the image.
                      -
                      -    Args:
                      -        path_image (str): Path to the image.
                      -        fix_img_size (bool): Whether to resize the image to a fixed size. If False, the size_portrait and size_landscape are ignored. Default is False.
                      -
                      -    Returns:
                      -        ImageData: ImageData object with image tensor and pil Image.
                      -    """
                      -    data = ImageData(path_input=path_image)
                      -    data.img = torchvision.io.read_image(
                      -        data.path_input, mode=torchvision.io.ImageReadMode.RGB
                      -    )
                      -    data.img = data.img.unsqueeze(0)
                      -    data.img = data.img.to(self.device)
                      -
                      -    if fix_img_size:
                      -        data.img = self.transform(data.img)
                      -
                      -    data.tensor = data.img.type(torch.float32)
                      -    data.img = data.img.squeeze(0).cpu()
                      -    data.set_dims()
                      -
                      -    return data
                      -

                      Inherited members

                      @@ -258,23 +222,6 @@

                      Returns

                      ImageData
                      ImageData object with image tensor and pil Image.
                      -
                      - -Expand source code - -
                      @Timer("TensorReader.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                      -def run(self, tensor: torch.Tensor, fix_img_size: bool = False) -> ImageData:
                      -    """Reads a tensor and returns a tensor of the image with values between 0-255 and shape (batch, channels, height, width). The order of color channels is RGB. PyTorch and Torchvision are used to read the image.
                      -
                      -    Args:
                      -        tensor (torch.Tensor): Tensor of a single image with RGB values between 0-255 and shape (channels, height, width).
                      -        fix_img_size (bool): Whether to resize the image to a fixed size. If False, the size_portrait and size_landscape are ignored. Default is False.
                      -
                      -    Returns:
                      -        ImageData: ImageData object with image tensor and pil Image.
                      -    """
                      -    return self.process_tensor(tensor, fix_img_size)
                      -

                      Inherited members

                      @@ -358,12 +305,18 @@

                      Args

                      return self.process_tensor(tensor, fix_img_size) def read_pil_image(self, pil_image: Image.Image, fix_img_size: bool) -> ImageData: - tensor = torchvision.transforms.functional.to_tensor(pil_image) + if pil_image.mode != "RGB": + pil_image = pil_image.convert("RGB") + tensor = torchvision.transforms.functional.pil_to_tensor(pil_image) return self.process_tensor(tensor, fix_img_size) def read_numpy_array(self, array: np.ndarray, fix_img_size: bool) -> ImageData: - pil_image = Image.fromarray(array, mode="RGB") - return self.read_pil_image(pil_image, fix_img_size) + image_tensor = torch.from_numpy(array).float() + if image_tensor.ndim == 3 and image_tensor.shape[2] == 3: + image_tensor = image_tensor.permute(2, 0, 1).contiguous() + else: + raise ValueError(f"Unsupported numpy array shape: {image_tensor.shape}") + return self.process_tensor(image_tensor, fix_img_size) def read_image_from_bytes( self, image_bytes: bytes, fix_img_size: bool @@ -415,138 +368,42 @@

                      Returns

                      ImageData
                      ImageData object with image tensor and pil Image.
                      -
                      - -Expand source code - -
                      @Timer("UniversalReader.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                      -def run(
                      -    self,
                      -    image_source: Union[str, torch.Tensor, np.ndarray, bytes, Image.Image],
                      -    fix_img_size: bool = False,
                      -) -> ImageData:
                      -    """Reads an image from a path, URL, tensor, numpy array, bytes or PIL Image and returns a tensor of the image with values between 0-255 and shape (batch, channels, height, width). The order of color channels is RGB. PyTorch and Torchvision are used to read the image.
                      -
                      -    Args:
                      -        image_source (Union[str, torch.Tensor, np.ndarray, bytes, Image.Image]): Image source to be read.
                      -        fix_img_size (bool): Whether to resize the image to a fixed size. If False, the size_portrait and size_landscape are ignored. Default is False.
                      -
                      -    Returns:
                      -        ImageData: ImageData object with image tensor and pil Image.
                      -    """
                      -    if isinstance(image_source, str):
                      -        if image_source.startswith("http"):
                      -            return self.read_image_from_url(image_source, fix_img_size)
                      -        else:
                      -            return self.read_image_from_path(image_source, fix_img_size)
                      -    elif isinstance(image_source, torch.Tensor):
                      -        return self.read_tensor(image_source, fix_img_size)
                      -    elif isinstance(image_source, np.ndarray):
                      -        return self.read_numpy_array(image_source, fix_img_size)
                      -    elif isinstance(image_source, bytes):
                      -        return self.read_image_from_bytes(image_source, fix_img_size)
                      -    elif isinstance(image_source, Image.Image):
                      -        return self.read_pil_image(image_source, fix_img_size)
                      -    else:
                      -        raise ValueError("Unsupported data type")
                      -
                      def read_tensor(self, tensor: torch.Tensor, fix_img_size: bool) ‑> ImageData
                      -
                      - -Expand source code - -
                      def read_tensor(self, tensor: torch.Tensor, fix_img_size: bool) -> ImageData:
                      -    return self.process_tensor(tensor, fix_img_size)
                      -
                      def read_pil_image(self, pil_image: PIL.Image.Image, fix_img_size: bool) ‑> ImageData
                      -
                      - -Expand source code - -
                      def read_pil_image(self, pil_image: Image.Image, fix_img_size: bool) -> ImageData:
                      -    tensor = torchvision.transforms.functional.to_tensor(pil_image)
                      -    return self.process_tensor(tensor, fix_img_size)
                      -
                      def read_numpy_array(self, array: numpy.ndarray, fix_img_size: bool) ‑> ImageData
                      -
                      - -Expand source code - -
                      def read_numpy_array(self, array: np.ndarray, fix_img_size: bool) -> ImageData:
                      -    pil_image = Image.fromarray(array, mode="RGB")
                      -    return self.read_pil_image(pil_image, fix_img_size)
                      -
                      def read_image_from_bytes(self, image_bytes: bytes, fix_img_size: bool) ‑> ImageData
                      -
                      - -Expand source code - -
                      def read_image_from_bytes(
                      -    self, image_bytes: bytes, fix_img_size: bool
                      -) -> ImageData:
                      -    pil_image = Image.open(io.BytesIO(image_bytes))
                      -    return self.read_pil_image(pil_image, fix_img_size)
                      -
                      def read_image_from_path(self, path_image: str, fix_img_size: bool) ‑> ImageData
                      -
                      - -Expand source code - -
                      def read_image_from_path(self, path_image: str, fix_img_size: bool) -> ImageData:
                      -    try:
                      -        image_tensor = torchvision.io.read_image(path_image)
                      -    except Exception as e:
                      -        logger.error(f"Failed to read image from path {path_image}: {e}")
                      -        raise ValueError(f"Could not read image from path {path_image}: {e}") from e
                      -
                      -    return self.process_tensor(image_tensor, fix_img_size)
                      -
                      def read_image_from_url(self, url: str, fix_img_size: bool) ‑> ImageData
                      -
                      - -Expand source code - -
                      def read_image_from_url(self, url: str, fix_img_size: bool) -> ImageData:
                      -    try:
                      -        response = requests.get(url, timeout=10)
                      -        response.raise_for_status()
                      -    except requests.RequestException as e:
                      -        logger.error(f"Failed to fetch image from URL {url}: {e}")
                      -        raise ValueError(f"Could not fetch image from URL {url}: {e}") from e
                      -
                      -    image_bytes = response.content
                      -    return self.read_image_from_bytes(image_bytes, fix_img_size)
                      -

                      Inherited members

                      @@ -609,7 +466,6 @@

                      Inherited members

                      }).setContent('').open(); } -

                      Index

                        @@ -656,7 +512,7 @@

                        -

                        Generated by pdoc 0.10.0.

                        +

                        Generated by pdoc 0.11.1.

                        - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/unifier/core.html b/docs/facetorch/analyzer/unifier/core.html index e2a938e..3419878 100644 --- a/docs/facetorch/analyzer/unifier/core.html +++ b/docs/facetorch/analyzer/unifier/core.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.unifier.core API documentation - - - - - - + + + + + + - - + +
                        @@ -22,51 +25,6 @@

                        Module facetorch.analyzer.unifier.core

                        -
                        - -Expand source code - -
                        import torch
                        -from codetiming import Timer
                        -from facetorch.base import BaseProcessor
                        -from facetorch.datastruct import ImageData
                        -from facetorch.logger import LoggerJsonFile
                        -from torchvision import transforms
                        -
                        -logger = LoggerJsonFile().logger
                        -
                        -
                        -class FaceUnifier(BaseProcessor):
                        -    def __init__(
                        -        self,
                        -        transform: transforms.Compose,
                        -        device: torch.device,
                        -        optimize_transform: bool,
                        -    ):
                        -        """FaceUnifier is a transform based processor that can unify sizes of all faces and normalize them between 0 and 1.
                        -
                        -        Args:
                        -            transform (Compose): Composed Torch transform object.
                        -            device (torch.device): Torch device cpu or cuda object.
                        -            optimize_transform (bool): Whether to optimize the transform.
                        -        """
                        -        super().__init__(transform, device, optimize_transform)
                        -
                        -    @Timer("FaceUnifier.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                        -    def run(self, data: ImageData) -> ImageData:
                        -        """Runs unifying transform on each face tensor one by one.
                        -
                        -        Args:
                        -            data (ImageData): ImageData object containing the face tensors.
                        -
                        -        Returns:
                        -            ImageData: ImageData object containing the unified face tensors normalized between 0 and 1.
                        -        """
                        -        for indx, face in enumerate(data.faces):
                        -            data.faces[indx].tensor = self.transform(face.tensor)
                        -
                        -        return data
                        -
                        @@ -148,25 +106,6 @@

                        Returns

                        ImageData
                        ImageData object containing the unified face tensors normalized between 0 and 1.
                        -
                        - -Expand source code - -
                        @Timer("FaceUnifier.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                        -def run(self, data: ImageData) -> ImageData:
                        -    """Runs unifying transform on each face tensor one by one.
                        -
                        -    Args:
                        -        data (ImageData): ImageData object containing the face tensors.
                        -
                        -    Returns:
                        -        ImageData: ImageData object containing the unified face tensors normalized between 0 and 1.
                        -    """
                        -    for indx, face in enumerate(data.faces):
                        -        data.faces[indx].tensor = self.transform(face.tensor)
                        -
                        -    return data
                        -

                        Inherited members

                        @@ -228,7 +167,6 @@

                        Inherited members

                        }).setContent('').open(); } -

                        Index

                          @@ -252,7 +190,7 @@

                          -

                          Generated by pdoc 0.10.0.

                          +

                          Generated by pdoc 0.11.1.

                          - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/unifier/index.html b/docs/facetorch/analyzer/unifier/index.html index da1e8a9..a1110fe 100644 --- a/docs/facetorch/analyzer/unifier/index.html +++ b/docs/facetorch/analyzer/unifier/index.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.unifier API documentation - - - - - - + + + + + + - - + +
                          @@ -22,15 +25,6 @@

                          Module facetorch.analyzer.unifier

                          -
                          - -Expand source code - -
                          from .core import FaceUnifier
                          -
                          -
                          -__all__ = ["FaceUnifier"]
                          -

                          Sub-modules

                          @@ -119,25 +113,6 @@

                          Returns

                          ImageData
                          ImageData object containing the unified face tensors normalized between 0 and 1.
                          -
                          - -Expand source code - -
                          @Timer("FaceUnifier.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                          -def run(self, data: ImageData) -> ImageData:
                          -    """Runs unifying transform on each face tensor one by one.
                          -
                          -    Args:
                          -        data (ImageData): ImageData object containing the face tensors.
                          -
                          -    Returns:
                          -        ImageData: ImageData object containing the unified face tensors normalized between 0 and 1.
                          -    """
                          -    for indx, face in enumerate(data.faces):
                          -        data.faces[indx].tensor = self.transform(face.tensor)
                          -
                          -    return data
                          -

                          Inherited members

                          @@ -199,7 +174,6 @@

                          Inherited members

                          }).setContent('').open(); } -

                          Index

                            @@ -228,7 +202,7 @@

                            -

                            Generated by pdoc 0.10.0.

                            +

                            Generated by pdoc 0.11.1.

                            - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/utilizer/align.html b/docs/facetorch/analyzer/utilizer/align.html index 5258d25..82d7546 100644 --- a/docs/facetorch/analyzer/utilizer/align.html +++ b/docs/facetorch/analyzer/utilizer/align.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.utilizer.align API documentation - - - - - - + + + + + + - - + +
                            @@ -22,355 +25,6 @@

                            Module facetorch.analyzer.utilizer.align

                            -
                            - -Expand source code - -
                            import os
                            -from typing import List, Tuple, Union
                            -import torch
                            -import numpy as np
                            -from codetiming import Timer
                            -from facetorch.base import BaseDownloader, BaseUtilizer
                            -from facetorch.datastruct import ImageData
                            -from facetorch.logger import LoggerJsonFile
                            -from torchvision import transforms
                            -
                            -logger = LoggerJsonFile().logger
                            -
                            -
                            -class Lmk3DMeshPose(BaseUtilizer):
                            -    def __init__(
                            -        self,
                            -        transform: transforms.Compose,
                            -        device: torch.device,
                            -        optimize_transform: bool,
                            -        downloader_meta: BaseDownloader,
                            -        image_size: int = 120,
                            -    ):
                            -        """Initializes the Lmk3DMeshPose class. This class is used to convert the face parameter vector to 3D landmarks, mesh and pose.
                            -
                            -        Args:
                            -            transform (Compose): Composed Torch transform object.
                            -            device (torch.device): Torch device cpu or cuda object.
                            -            optimize_transform (bool): Whether to optimize the transform.
                            -            downloader_meta (BaseDownloader): Downloader for metadata.
                            -            image_size (int): Standard size of the face image.
                            -
                            -        """
                            -        super().__init__(transform, device, optimize_transform)
                            -
                            -        self.downloader_meta = downloader_meta
                            -        self.image_size = image_size
                            -        if not os.path.exists(self.downloader_meta.path_local):
                            -            self.downloader_meta.run()
                            -
                            -        self.meta = torch.load(self.downloader_meta.path_local)
                            -
                            -        for key in self.meta.keys():
                            -            if isinstance(self.meta[key], torch.Tensor):
                            -                self.meta[key] = self.meta[key].to(self.device)
                            -
                            -        self.keypoints = self.meta["keypoints"]
                            -        # PCA basis for shape, expression, texture
                            -        self.w_shp = self.meta["w_shp"]
                            -        self.w_exp = self.meta["w_exp"]
                            -        # param_mean and param_std are used for re-whitening
                            -        self.param_mean = self.meta["param_mean"]
                            -        self.param_std = self.meta["param_std"]
                            -        # mean values
                            -        self.u_shp = self.meta["u_shp"]
                            -        self.u_exp = self.meta["u_exp"]
                            -
                            -        self.u = self.u_shp + self.u_exp
                            -        self.w = torch.cat((self.w_shp, self.w_exp), dim=1)
                            -        # base vector for landmarks
                            -        self.w_base = self.w[self.keypoints]
                            -        self.w_norm = torch.linalg.norm(self.w, dim=0)
                            -        self.w_base_norm = torch.linalg.norm(self.w_base, dim=0)
                            -        self.u_base = self.u[self.keypoints].reshape(-1, 1)
                            -        self.w_shp_base = self.w_shp[self.keypoints]
                            -        self.w_exp_base = self.w_exp[self.keypoints]
                            -        self.dim = self.w_shp.shape[0] // 3
                            -
                            -    @Timer("Lmk3DMeshPose.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                            -    def run(self, data: ImageData) -> ImageData:
                            -        """Runs the Lmk3DMeshPose class functionality - convert the face parameter vector to 3D landmarks, mesh and pose.
                            -
                            -        Adds the following attributes to the data object:
                            -
                            -        - landmark [[y, x, z], 68 (points)]
                            -        - mesh [[y, x, z], 53215 (points)]
                            -        - pose (Euler angles [yaw, pitch, roll] and translation [y, x, z])
                            -
                            -        Args:
                            -            data (ImageData): ImageData object containing most of the data including the predictions.
                            -
                            -        Returns:
                            -            ImageData: ImageData object containing lmk3d, mesh and pose.
                            -        """
                            -        for count, face in enumerate(data.faces):
                            -            assert "align" in face.preds.keys(), "align key not found in face.preds"
                            -            param = face.preds["align"].logits
                            -
                            -            roi_box = [face.loc.x1, face.loc.y1, face.loc.x2, face.loc.y2]
                            -
                            -            landmarks = self._compute_sparse_vert(param, roi_box, transform_space=True)
                            -            vertices = self._compute_dense_vert(param, roi_box, transform_space=True)
                            -            angles, translation = self._compute_pose(param, roi_box)
                            -
                            -            data.faces[count].preds["align"].other["lmk3d"] = landmarks
                            -            data.faces[count].preds["align"].other["mesh"] = vertices
                            -            data.faces[count].preds["align"].other["pose"] = dict(
                            -                angles=angles, translation=translation
                            -            )
                            -
                            -        return data
                            -
                            -    def _matrix2angle_corr(self, re: torch.Tensor) -> List[float]:
                            -        """Converts a rotation matrix to angles.
                            -
                            -        Args:
                            -            re (torch.Tensor): Rotation matrix.
                            -
                            -        Returns:
                            -            List[float]: List of angles.
                            -        """
                            -        pi = torch.tensor(np.pi).to(self.device)
                            -        if re[2, 0] != 1 and re[2, 0] != -1:
                            -            x = torch.asin(re[2, 0])
                            -            y = torch.atan2(
                            -                re[1, 2] / torch.cos(x),
                            -                re[2, 2] / torch.cos(x),
                            -            )
                            -            z = torch.atan2(
                            -                re[0, 1] / torch.cos(x),
                            -                re[0, 0] / torch.cos(x),
                            -            )
                            -
                            -        else:  # Gimbal lock
                            -            z = 0
                            -            if re[2, 0] == -1:
                            -                x = pi / 2
                            -                y = z + torch.atan2(re[0, 1], re[0, 2])
                            -            else:
                            -                x = -pi / 2
                            -                y = -z + torch.atan2(-re[0, 1], -re[0, 2])
                            -
                            -        rx, ry, rz = (
                            -            float((x * 180 / pi).item()),
                            -            float((y * 180 / pi).item()),
                            -            float((z * 180 / pi).item()),
                            -        )
                            -
                            -        return [rx, ry, rz]
                            -
                            -    def _parse_param(self, param: torch.Tensor):
                            -        """Parses the parameter vector.
                            -
                            -        Args:
                            -            param (torch.Tensor): Parameter vector.
                            -
                            -        Returns:
                            -            Tuple[torch.Tensor]
                            -        """
                            -        p_ = param[:12].reshape(3, 4)
                            -        pe = p_[:, :3]
                            -        offset = p_[:, -1].reshape(3, 1)
                            -        alpha_shp = param[12:52].reshape(40, 1)
                            -        alpha_exp = param[52:62].reshape(10, 1)
                            -        return pe, offset, alpha_shp, alpha_exp
                            -
                            -    def _param2vert(
                            -        self, param: torch.Tensor, dense: bool = False, transform_space: bool = True
                            -    ) -> torch.Tensor:
                            -        """Parses the parameter vector into a dense or sparse vertex representation.
                            -
                            -        Args:
                            -            param (torch.Tensor): Parameter vector.
                            -            dense (bool): Whether to return a dense or sparse vertex representation.
                            -            transform_space (bool): Whether to transform the vertex representation to the original space.
                            -
                            -        Returns:
                            -            torch.Tensor: Dense or sparse vertex representation.
                            -        """
                            -
                            -        def _reshape_fortran(_x, shape):
                            -            if len(_x.shape) > 0:
                            -                _x = _x.permute(*reversed(range(len(_x.shape))))
                            -            return _x.reshape(*reversed(shape)).permute(*reversed(range(len(shape))))
                            -
                            -        if param.shape[0] == 62:
                            -            param_ = param * self.param_std[:62] + self.param_mean[:62]
                            -        else:
                            -            raise RuntimeError("length of params mismatch")
                            -
                            -        pe, offset, alpha_shp, alpha_exp = self._parse_param(param_)
                            -
                            -        if dense:
                            -            he = (
                            -                self.u + self.w_shp @ alpha_shp.float() + self.w_exp @ alpha_exp.float()
                            -            )
                            -            he = _reshape_fortran(he, (3, -1))
                            -            vertex = pe.float() @ he + offset
                            -            if transform_space:
                            -                # transform to image coordinate space
                            -                vertex[1, :] = self.image_size + 1 - vertex[1, :]
                            -
                            -        else:
                            -            he = (
                            -                self.u_base
                            -                + self.w_shp_base @ alpha_shp.float()
                            -                + self.w_exp_base @ alpha_exp.float()
                            -            )
                            -            he = _reshape_fortran(he, (3, -1))
                            -            vertex = pe.float() @ he + offset
                            -            if transform_space:
                            -                # transform to image coordinate space
                            -                vertex[1, :] = self.image_size + 1 - vertex[1, :]
                            -
                            -        return vertex
                            -
                            -    def _p2srt(
                            -        self, param: torch.Tensor
                            -    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
                            -        """Applies matrix norm to the parameter vector.
                            -
                            -        Args:
                            -            param (torch.Tensor): Parameter vector.
                            -
                            -        Returns:
                            -            Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
                            -        """
                            -        t3d = param[:, 3]
                            -        r1 = param[0:1, :3]
                            -        r2 = param[1:2, :3]
                            -        se = (torch.linalg.norm(r1) + torch.linalg.norm(r2)) / 2.0
                            -        r1 = r1 / torch.linalg.norm(r1)
                            -        r2 = r2 / torch.linalg.norm(r2)
                            -        r3 = torch.cross(r1, r2)
                            -        re = torch.cat((r1, r2, r3), 0)
                            -        return se, re, t3d
                            -
                            -    def _parse_pose(
                            -        self, param: torch.Tensor
                            -    ) -> Tuple[torch.Tensor, List[torch.Tensor], torch.Tensor]:
                            -        """Parses the parameter vector to pose data.
                            -
                            -        Args:
                            -            param (torch.Tensor): Parameter vector.
                            -
                            -        Returns:
                            -            Tuple[torch.Tensor, List[torch.Tensor], torch.Tensor]: Pose data.
                            -        """
                            -        param = param * self.param_std[:62] + self.param_mean[:62]
                            -        param = param[:12].reshape(3, -1)  # camera matrix
                            -        _, rem, t3d = self._p2srt(param)
                            -        pe = torch.cat((rem, t3d.reshape(3, -1)), 1)  # without scale
                            -        pose = self._matrix2angle_corr(rem)  # yaw, pitch, roll
                            -        return pe, pose, t3d
                            -
                            -    def _compute_vertices(
                            -        self,
                            -        param: torch.Tensor,
                            -        roi_bbox: Tuple[int, int, int, int],
                            -        dense: bool,
                            -        transform_space: bool = True,
                            -    ) -> torch.Tensor:
                            -        """Predict the vertices of the face given the parameter vector.
                            -
                            -        Args:
                            -            param (torch.Tensor): Parameter vector.
                            -            roi_bbox (Tuple[int, int, int, int]): Bounding box of the face.
                            -            dense (bool): Whether to return a dense or sparse vertex representation.
                            -            transform_space (bool): Whether to transform the vertex representation to the original space.
                            -
                            -        Returns:
                            -            torch.Tensor: Dense or sparse vertex representation.
                            -        """
                            -        vertex = self._param2vert(param, dense=dense, transform_space=transform_space)
                            -        sx, sy, ex, ey = roi_bbox
                            -        scale_x = (ex - sx) / self.image_size
                            -        scale_y = (ey - sy) / self.image_size
                            -        vertex[0, :] = vertex[0, :] * scale_x + sx
                            -        vertex[1, :] = vertex[1, :] * scale_y + sy
                            -
                            -        s = (scale_x + scale_y) / 2
                            -        vertex[2, :] *= s
                            -
                            -        return vertex
                            -
                            -    def _compute_sparse_vert(
                            -        self,
                            -        param: torch.Tensor,
                            -        roi_box: Tuple[int, int, int, int],
                            -        transform_space: bool = False,
                            -    ) -> torch.Tensor:
                            -        """Predict the sparse vertex representation of the face given the parameter vector.
                            -
                            -        Args:
                            -            param (torch.Tensor): Parameter vector.
                            -            roi_box (Tuple[int, int, int, int]): Bounding box of the face.
                            -            transform_space (bool): Whether to transform the vertex representation to the original space.
                            -
                            -        Returns:
                            -            torch.Tensor: Sparse vertex representation.
                            -
                            -        """
                            -        vertex = self._compute_vertices(
                            -            param, roi_box, dense=False, transform_space=transform_space
                            -        )
                            -        return vertex
                            -
                            -    def _compute_dense_vert(
                            -        self,
                            -        param: torch.Tensor,
                            -        roi_box: Tuple[int, int, int, int],
                            -        transform_space: bool = False,
                            -    ) -> torch.Tensor:
                            -        """Predict the dense vertex representation of the face given the parameter vector.
                            -
                            -        Args:
                            -            param (torch.Tensor): Parameter vector.
                            -            roi_box (Tuple[int, int, int, int, int]): Bounding box of the face.
                            -            transform_space (bool): Whether to transform the vertex representation to the original space.
                            -
                            -        Returns:
                            -            torch.Tensor: Dense vertex representation.
                            -        """
                            -        vertex = self._compute_vertices(
                            -            param, roi_box, dense=True, transform_space=transform_space
                            -        )
                            -        return vertex
                            -
                            -    def _compute_pose(
                            -        self,
                            -        param: torch.Tensor,
                            -        roi_bbox: Tuple[int, int, int, int],
                            -        ret_mat: bool = False,
                            -    ) -> Union[torch.Tensor, Tuple[List[float], torch.Tensor]]:
                            -        """Predict the pose of the face given the parameter vector.
                            -
                            -        Args:
                            -            param (torch.Tensor): Parameter vector.
                            -            roi_bbox (Tuple[int, int, int, int, int]): Bounding box of the face.
                            -            ret_mat (bool): Whether to return the rotation matrix.
                            -
                            -        Returns:
                            -            Union[torch.Tensor]: Pose of the face.
                            -        """
                            -        pe, angles, t3d = self._parse_pose(param)
                            -
                            -        sx, sy, ex, ey = roi_bbox
                            -        scale_x = (ex - sx) / self.image_size
                            -        scale_y = (ey - sy) / self.image_size
                            -        t3d[0] = t3d[0] * scale_x + sx
                            -        t3d[1] = t3d[1] * scale_y + sy
                            -
                            -        if ret_mat:
                            -            return pe
                            -        return angles, t3d
                            -
                            @@ -430,7 +84,10 @@

                            Args

                            if not os.path.exists(self.downloader_meta.path_local): self.downloader_meta.run() - self.meta = torch.load(self.downloader_meta.path_local) + self.meta = torch.load( + self.downloader_meta.path_local, + weights_only=False, + ) for key in self.meta.keys(): if isinstance(self.meta[key], torch.Tensor): @@ -613,7 +270,7 @@

                            Args

                            se = (torch.linalg.norm(r1) + torch.linalg.norm(r2)) / 2.0 r1 = r1 / torch.linalg.norm(r1) r2 = r2 / torch.linalg.norm(r2) - r3 = torch.cross(r1, r2) + r3 = torch.linalg.cross(r1, r2) re = torch.cat((r1, r2, r3), 0) return se, re, t3d @@ -764,44 +421,6 @@

                            Returns

                            ImageData
                            ImageData object containing lmk3d, mesh and pose.
                            -
                            - -Expand source code - -
                            @Timer("Lmk3DMeshPose.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                            -def run(self, data: ImageData) -> ImageData:
                            -    """Runs the Lmk3DMeshPose class functionality - convert the face parameter vector to 3D landmarks, mesh and pose.
                            -
                            -    Adds the following attributes to the data object:
                            -
                            -    - landmark [[y, x, z], 68 (points)]
                            -    - mesh [[y, x, z], 53215 (points)]
                            -    - pose (Euler angles [yaw, pitch, roll] and translation [y, x, z])
                            -
                            -    Args:
                            -        data (ImageData): ImageData object containing most of the data including the predictions.
                            -
                            -    Returns:
                            -        ImageData: ImageData object containing lmk3d, mesh and pose.
                            -    """
                            -    for count, face in enumerate(data.faces):
                            -        assert "align" in face.preds.keys(), "align key not found in face.preds"
                            -        param = face.preds["align"].logits
                            -
                            -        roi_box = [face.loc.x1, face.loc.y1, face.loc.x2, face.loc.y2]
                            -
                            -        landmarks = self._compute_sparse_vert(param, roi_box, transform_space=True)
                            -        vertices = self._compute_dense_vert(param, roi_box, transform_space=True)
                            -        angles, translation = self._compute_pose(param, roi_box)
                            -
                            -        data.faces[count].preds["align"].other["lmk3d"] = landmarks
                            -        data.faces[count].preds["align"].other["mesh"] = vertices
                            -        data.faces[count].preds["align"].other["pose"] = dict(
                            -            angles=angles, translation=translation
                            -        )
                            -
                            -    return data
                            -

                            Inherited members

                            @@ -863,7 +482,6 @@

                            Inherited members

                            }).setContent('').open(); } -

                            Index

                              @@ -887,7 +505,7 @@

                              -

                              Generated by pdoc 0.10.0.

                              +

                              Generated by pdoc 0.11.1.

                              - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/utilizer/draw.html b/docs/facetorch/analyzer/utilizer/draw.html index c83aac0..fbf0963 100644 --- a/docs/facetorch/analyzer/utilizer/draw.html +++ b/docs/facetorch/analyzer/utilizer/draw.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.utilizer.draw API documentation - - - - - - + + + + + + - - + +
                              @@ -22,129 +25,6 @@

                              Module facetorch.analyzer.utilizer.draw

                              -
                              - -Expand source code - -
                              import torch
                              -import torchvision
                              -from codetiming import Timer
                              -from facetorch.base import BaseUtilizer
                              -from facetorch.datastruct import ImageData
                              -from facetorch.logger import LoggerJsonFile
                              -from torchvision import transforms
                              -
                              -logger = LoggerJsonFile().logger
                              -
                              -
                              -class BoxDrawer(BaseUtilizer):
                              -    def __init__(
                              -        self,
                              -        transform: transforms.Compose,
                              -        device: torch.device,
                              -        optimize_transform: bool,
                              -        color: str,
                              -        line_width: int,
                              -    ):
                              -        """Initializes the BoxDrawer class. This class is used to draw the face boxes to the image tensor.
                              -
                              -        Args:
                              -            transform (Compose): Composed Torch transform object.
                              -            device (torch.device): Torch device cpu or cuda object.
                              -            optimize_transform (bool): Whether to optimize the transform.
                              -            color (str): Color of the boxes.
                              -            line_width (int): Line width of the boxes.
                              -
                              -        """
                              -        super().__init__(transform, device, optimize_transform)
                              -        self.color = color
                              -        self.line_width = line_width
                              -
                              -    @Timer("BoxDrawer.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                              -    def run(self, data: ImageData) -> ImageData:
                              -        """Draws face boxes to the image tensor.
                              -
                              -        Args:
                              -            data (ImageData): ImageData object containing the image tensor and face locations.
                              -        Returns:
                              -            ImageData: ImageData object containing the image tensor with face boxes.
                              -        """
                              -        loc_tensor = data.aggregate_loc_tensor()
                              -        labels = [str(face.indx) for face in data.faces]
                              -        data.img = torchvision.utils.draw_bounding_boxes(
                              -            image=data.img,
                              -            boxes=loc_tensor,
                              -            labels=labels,
                              -            colors=self.color,
                              -            width=self.line_width,
                              -        )
                              -
                              -        return data
                              -
                              -
                              -class LandmarkDrawerTorch(BaseUtilizer):
                              -    def __init__(
                              -        self,
                              -        transform: transforms.Compose,
                              -        device: torch.device,
                              -        optimize_transform: bool,
                              -        width: int,
                              -        color: str,
                              -    ):
                              -        """Initializes the LandmarkDrawer class. This class is used to draw the 3D face landmarks to the image tensor.
                              -
                              -        Args:
                              -            transform (Compose): Composed Torch transform object.
                              -            device (torch.device): Torch device cpu or cuda object.
                              -            optimize_transform (bool): Whether to optimize the transform.
                              -            width (int): Marker keypoint width.
                              -            color (str): Marker color.
                              -
                              -        """
                              -        super().__init__(transform, device, optimize_transform)
                              -        self.width = width
                              -        self.color = color
                              -
                              -    @Timer("LandmarkDrawer.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                              -    def run(self, data: ImageData) -> ImageData:
                              -        """Draws 3D face landmarks to the image tensor.
                              -
                              -        Args:
                              -            data (ImageData): ImageData object containing the image tensor and 3D face landmarks.
                              -        Returns:
                              -            ImageData: ImageData object containing the image tensor with 3D face landmarks.
                              -        """
                              -        data = self._draw_landmarks(data)
                              -
                              -        return data
                              -
                              -    def _draw_landmarks(self, data: ImageData) -> ImageData:
                              -        """Draws 3D face landmarks to the image tensor.
                              -
                              -        Args:
                              -            data (ImageData): ImageData object containing the image tensor, 3D face landmarks, and faces.
                              -
                              -        Returns:
                              -            (ImageData): ImageData object containing the image tensor with 3D face landmarks.
                              -        """
                              -
                              -        if len(data.faces) > 0:
                              -            pts = [face.preds["align"].other["lmk3d"].cpu() for face in data.faces]
                              -
                              -            img_in = data.img.clone()
                              -            pts = torch.stack(pts)
                              -            pts = torch.swapaxes(pts, 2, 1)
                              -
                              -            img_out = torchvision.utils.draw_keypoints(
                              -                img_in,
                              -                pts,
                              -                colors=self.color,
                              -                radius=self.width,
                              -            )
                              -            data.img = img_out
                              -
                              -        return data
                              -
                              @@ -244,31 +124,6 @@

                              Returns

                              ImageData
                              ImageData object containing the image tensor with face boxes.
                              -
                              - -Expand source code - -
                              @Timer("BoxDrawer.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                              -def run(self, data: ImageData) -> ImageData:
                              -    """Draws face boxes to the image tensor.
                              -
                              -    Args:
                              -        data (ImageData): ImageData object containing the image tensor and face locations.
                              -    Returns:
                              -        ImageData: ImageData object containing the image tensor with face boxes.
                              -    """
                              -    loc_tensor = data.aggregate_loc_tensor()
                              -    labels = [str(face.indx) for face in data.faces]
                              -    data.img = torchvision.utils.draw_bounding_boxes(
                              -        image=data.img,
                              -        boxes=loc_tensor,
                              -        labels=labels,
                              -        colors=self.color,
                              -        width=self.line_width,
                              -    )
                              -
                              -    return data
                              -

                              Inherited members

                              @@ -388,23 +243,6 @@

                              Returns

                              ImageData
                              ImageData object containing the image tensor with 3D face landmarks.
                              -
                              - -Expand source code - -
                              @Timer("LandmarkDrawer.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                              -def run(self, data: ImageData) -> ImageData:
                              -    """Draws 3D face landmarks to the image tensor.
                              -
                              -    Args:
                              -        data (ImageData): ImageData object containing the image tensor and 3D face landmarks.
                              -    Returns:
                              -        ImageData: ImageData object containing the image tensor with 3D face landmarks.
                              -    """
                              -    data = self._draw_landmarks(data)
                              -
                              -    return data
                              -

                              Inherited members

                              @@ -466,7 +304,6 @@

                              Inherited members

                              }).setContent('').open(); } -

                              Index

                                @@ -496,7 +333,7 @@

                                -

                                Generated by pdoc 0.10.0.

                                +

                                Generated by pdoc 0.11.1.

                                - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/utilizer/index.html b/docs/facetorch/analyzer/utilizer/index.html index e2f17cc..580aa71 100644 --- a/docs/facetorch/analyzer/utilizer/index.html +++ b/docs/facetorch/analyzer/utilizer/index.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.utilizer API documentation - - - - - - + + + + + + - - + +
                                @@ -22,17 +25,6 @@

                                Module facetorch.analyzer.utilizer

                                -
                                - -Expand source code - -
                                from .align import Lmk3DMeshPose
                                -from .draw import BoxDrawer, LandmarkDrawerTorch
                                -from .save import ImageSaver
                                -
                                -
                                -__all__ = ["Lmk3DMeshPose", "BoxDrawer", "LandmarkDrawerTorch", "ImageSaver"]
                                -

                                Sub-modules

                                @@ -107,7 +99,10 @@

                                Args

                                if not os.path.exists(self.downloader_meta.path_local): self.downloader_meta.run() - self.meta = torch.load(self.downloader_meta.path_local) + self.meta = torch.load( + self.downloader_meta.path_local, + weights_only=False, + ) for key in self.meta.keys(): if isinstance(self.meta[key], torch.Tensor): @@ -290,7 +285,7 @@

                                Args

                                se = (torch.linalg.norm(r1) + torch.linalg.norm(r2)) / 2.0 r1 = r1 / torch.linalg.norm(r1) r2 = r2 / torch.linalg.norm(r2) - r3 = torch.cross(r1, r2) + r3 = torch.linalg.cross(r1, r2) re = torch.cat((r1, r2, r3), 0) return se, re, t3d @@ -441,44 +436,6 @@

                                Returns

                                ImageData
                                ImageData object containing lmk3d, mesh and pose.
                                -
                                - -Expand source code - -
                                @Timer("Lmk3DMeshPose.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                                -def run(self, data: ImageData) -> ImageData:
                                -    """Runs the Lmk3DMeshPose class functionality - convert the face parameter vector to 3D landmarks, mesh and pose.
                                -
                                -    Adds the following attributes to the data object:
                                -
                                -    - landmark [[y, x, z], 68 (points)]
                                -    - mesh [[y, x, z], 53215 (points)]
                                -    - pose (Euler angles [yaw, pitch, roll] and translation [y, x, z])
                                -
                                -    Args:
                                -        data (ImageData): ImageData object containing most of the data including the predictions.
                                -
                                -    Returns:
                                -        ImageData: ImageData object containing lmk3d, mesh and pose.
                                -    """
                                -    for count, face in enumerate(data.faces):
                                -        assert "align" in face.preds.keys(), "align key not found in face.preds"
                                -        param = face.preds["align"].logits
                                -
                                -        roi_box = [face.loc.x1, face.loc.y1, face.loc.x2, face.loc.y2]
                                -
                                -        landmarks = self._compute_sparse_vert(param, roi_box, transform_space=True)
                                -        vertices = self._compute_dense_vert(param, roi_box, transform_space=True)
                                -        angles, translation = self._compute_pose(param, roi_box)
                                -
                                -        data.faces[count].preds["align"].other["lmk3d"] = landmarks
                                -        data.faces[count].preds["align"].other["mesh"] = vertices
                                -        data.faces[count].preds["align"].other["pose"] = dict(
                                -            angles=angles, translation=translation
                                -        )
                                -
                                -    return data
                                -

                                Inherited members

                                @@ -579,31 +536,6 @@

                                Returns

                                ImageData
                                ImageData object containing the image tensor with face boxes.
                                -
                                - -Expand source code - -
                                @Timer("BoxDrawer.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                                -def run(self, data: ImageData) -> ImageData:
                                -    """Draws face boxes to the image tensor.
                                -
                                -    Args:
                                -        data (ImageData): ImageData object containing the image tensor and face locations.
                                -    Returns:
                                -        ImageData: ImageData object containing the image tensor with face boxes.
                                -    """
                                -    loc_tensor = data.aggregate_loc_tensor()
                                -    labels = [str(face.indx) for face in data.faces]
                                -    data.img = torchvision.utils.draw_bounding_boxes(
                                -        image=data.img,
                                -        boxes=loc_tensor,
                                -        labels=labels,
                                -        colors=self.color,
                                -        width=self.line_width,
                                -    )
                                -
                                -    return data
                                -

                                Inherited members

                                @@ -723,23 +655,6 @@

                                Returns

                                ImageData
                                ImageData object containing the image tensor with 3D face landmarks.
                                -
                                - -Expand source code - -
                                @Timer("LandmarkDrawer.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                                -def run(self, data: ImageData) -> ImageData:
                                -    """Draws 3D face landmarks to the image tensor.
                                -
                                -    Args:
                                -        data (ImageData): ImageData object containing the image tensor and 3D face landmarks.
                                -    Returns:
                                -        ImageData: ImageData object containing the image tensor with 3D face landmarks.
                                -    """
                                -    data = self._draw_landmarks(data)
                                -
                                -    return data
                                -

                                Inherited members

                                @@ -826,27 +741,6 @@

                                Returns

                                ImageData
                                ImageData object containing the same data as the input.
                                -
                                - -Expand source code - -
                                @Timer("ImageSaver.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                                -def run(self, data: ImageData) -> ImageData:
                                -    """Saves the image tensor to an image file, if the path_output attribute of ImageData is not None.
                                -
                                -    Args:
                                -        data (ImageData): ImageData object containing the img tensor.
                                -
                                -    Returns:
                                -        ImageData: ImageData object containing the same data as the input.
                                -    """
                                -    if data.path_output is not None:
                                -        os.makedirs(os.path.dirname(data.path_output), exist_ok=True)
                                -        pil_image = torchvision.transforms.functional.to_pil_image(data.img)
                                -        pil_image.save(data.path_output)
                                -
                                -    return data
                                -

                                Inherited members

                                @@ -908,7 +802,6 @@

                                Inherited members

                                }).setContent('').open(); } -

                                Index

                                  @@ -957,7 +850,7 @@

                                  -

                                  Generated by pdoc 0.10.0.

                                  +

                                  Generated by pdoc 0.11.1.

                                  - \ No newline at end of file + diff --git a/docs/facetorch/analyzer/utilizer/save.html b/docs/facetorch/analyzer/utilizer/save.html index d86fcaf..b415ca8 100644 --- a/docs/facetorch/analyzer/utilizer/save.html +++ b/docs/facetorch/analyzer/utilizer/save.html @@ -2,18 +2,21 @@ - - + + facetorch.analyzer.utilizer.save API documentation - - - - - - + + + + + + - - + +
                                  @@ -22,56 +25,6 @@

                                  Module facetorch.analyzer.utilizer.save

                                  -
                                  - -Expand source code - -
                                  import os
                                  -import torch
                                  -import torchvision
                                  -from codetiming import Timer
                                  -from facetorch.base import BaseUtilizer
                                  -from facetorch.datastruct import ImageData
                                  -from facetorch.logger import LoggerJsonFile
                                  -from torchvision import transforms
                                  -
                                  -logger = LoggerJsonFile().logger
                                  -
                                  -
                                  -class ImageSaver(BaseUtilizer):
                                  -    def __init__(
                                  -        self,
                                  -        transform: transforms.Compose,
                                  -        device: torch.device,
                                  -        optimize_transform: bool,
                                  -    ):
                                  -        """Initializes the ImageSaver class. This class is used to save the image tensor to an image file.
                                  -
                                  -        Args:
                                  -            transform (Compose): Composed Torch transform object.
                                  -            device (torch.device): Torch device cpu or cuda object.
                                  -            optimize_transform (bool): Whether to optimize the transform.
                                  -
                                  -        """
                                  -        super().__init__(transform, device, optimize_transform)
                                  -
                                  -    @Timer("ImageSaver.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                                  -    def run(self, data: ImageData) -> ImageData:
                                  -        """Saves the image tensor to an image file, if the path_output attribute of ImageData is not None.
                                  -
                                  -        Args:
                                  -            data (ImageData): ImageData object containing the img tensor.
                                  -
                                  -        Returns:
                                  -            ImageData: ImageData object containing the same data as the input.
                                  -        """
                                  -        if data.path_output is not None:
                                  -            os.makedirs(os.path.dirname(data.path_output), exist_ok=True)
                                  -            pil_image = torchvision.transforms.functional.to_pil_image(data.img)
                                  -            pil_image.save(data.path_output)
                                  -
                                  -        return data
                                  -
                                  @@ -157,27 +110,6 @@

                                  Returns

                                  ImageData
                                  ImageData object containing the same data as the input.
                                  -
                                  - -Expand source code - -
                                  @Timer("ImageSaver.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                                  -def run(self, data: ImageData) -> ImageData:
                                  -    """Saves the image tensor to an image file, if the path_output attribute of ImageData is not None.
                                  -
                                  -    Args:
                                  -        data (ImageData): ImageData object containing the img tensor.
                                  -
                                  -    Returns:
                                  -        ImageData: ImageData object containing the same data as the input.
                                  -    """
                                  -    if data.path_output is not None:
                                  -        os.makedirs(os.path.dirname(data.path_output), exist_ok=True)
                                  -        pil_image = torchvision.transforms.functional.to_pil_image(data.img)
                                  -        pil_image.save(data.path_output)
                                  -
                                  -    return data
                                  -

                                  Inherited members

                                  @@ -239,7 +171,6 @@

                                  Inherited members

                                  }).setContent('').open(); } -

                                  Index

                                    @@ -263,7 +194,7 @@

                                    -

                                    Generated by pdoc 0.10.0.

                                    +

                                    Generated by pdoc 0.11.1.

                                    - \ No newline at end of file + diff --git a/docs/facetorch/base.html b/docs/facetorch/base.html index 29cb236..90acaf0 100644 --- a/docs/facetorch/base.html +++ b/docs/facetorch/base.html @@ -2,18 +2,21 @@ - - + + facetorch.base API documentation - - - - - - + + + + + + - - + +
                                    @@ -22,270 +25,6 @@

                                    Module facetorch.base

                                    -
                                    - -Expand source code - -
                                    import os
                                    -import copy
                                    -from abc import ABCMeta, abstractmethod
                                    -from typing import Optional, Tuple, Union
                                    -
                                    -import torch
                                    -from codetiming import Timer
                                    -from torchvision import transforms
                                    -
                                    -from facetorch import utils
                                    -from facetorch.datastruct import ImageData
                                    -from facetorch.logger import LoggerJsonFile
                                    -from facetorch.transforms import script_transform
                                    -
                                    -logger = LoggerJsonFile().logger
                                    -
                                    -
                                    -class BaseProcessor(object, metaclass=ABCMeta):
                                    -    @Timer(
                                    -        "BaseProcessor.__init__", "{name}: {milliseconds:.2f} ms", logger=logger.debug
                                    -    )
                                    -    def __init__(
                                    -        self,
                                    -        transform: Optional[transforms.Compose],
                                    -        device: torch.device,
                                    -        optimize_transform: bool,
                                    -    ):
                                    -        """Base class for processors.
                                    -
                                    -        All data pre and post processors should subclass it.
                                    -        All subclass should overwrite:
                                    -
                                    -        - Methods:``run``, used for running the processing functionality.
                                    -
                                    -        Args:
                                    -            device (torch.device): Torch device cpu or cuda.
                                    -            transform (transforms.Compose): Transform compose object to be applied to the image.
                                    -            optimize_transform (bool): Whether to optimize the transform.
                                    -
                                    -        """
                                    -        super().__init__()
                                    -        self.device = device
                                    -        self.transform = transform if transform != "None" else None
                                    -        self.optimize_transform = optimize_transform
                                    -
                                    -        if self.transform is not None:
                                    -            self.transform = utils.fix_transform_list_attr(self.transform)
                                    -
                                    -        if self.optimize_transform is True:
                                    -            self.optimize()
                                    -
                                    -    def optimize(self):
                                    -        """Optimizes the transform using torch.jit and deploys it to the device."""
                                    -        if self.transform is not None:
                                    -            self.transform = script_transform(self.transform)
                                    -            self.transform = self.transform.to(self.device)
                                    -
                                    -    @abstractmethod
                                    -    def run(self):
                                    -        """Abstract method that should implement a tensor processing functionality"""
                                    -
                                    -
                                    -class BaseReader(BaseProcessor):
                                    -    @Timer("BaseReader.__init__", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                                    -    def __init__(
                                    -        self,
                                    -        transform: transforms.Compose,
                                    -        device: torch.device,
                                    -        optimize_transform: bool,
                                    -    ):
                                    -        """Base class for image reader.
                                    -
                                    -        All image readers should subclass it.
                                    -        All subclass should overwrite:
                                    -
                                    -        - Methods:``run``, used for running the reading process and return a tensor.
                                    -
                                    -        Args:
                                    -            transform (transforms.Compose): Transform to be applied to the image.
                                    -            device (torch.device): Torch device cpu or cuda.
                                    -            optimize_transform (bool): Whether to optimize the transforms that are resizing
                                    -            the image to a fixed size.
                                    -
                                    -        """
                                    -        super().__init__(transform, device, optimize_transform)
                                    -        self.device = device
                                    -        self.optimize_transform = optimize_transform
                                    -
                                    -    @abstractmethod
                                    -    def run(self, path: str) -> ImageData:
                                    -        """Abstract method that reads an image from a path and returns a data object containing
                                    -        a tensor of the image with
                                    -         shape (batch, channels, height, width).
                                    -
                                    -        Args:
                                    -            path (str): Path to the image.
                                    -
                                    -        Returns:
                                    -            ImageData: ImageData object with the image tensor.
                                    -        """
                                    -        pass
                                    -
                                    -    def process_tensor(self, tensor: torch.Tensor, fix_img_size: bool) -> ImageData:
                                    -        """Read a tensor and return a data object containing a tensor of the image with
                                    -        shape (batch, channels, height, width).
                                    -
                                    -        Args:
                                    -            tensor (torch.Tensor): Tensor of a single image with RGB values between 0-255 and shape (channels, height, width).
                                    -            fix_img_size (bool): Whether to resize the image to a fixed size. If False, the size_portrait and size_landscape are ignored. Default is False.
                                    -        """
                                    -
                                    -        data = ImageData(path_input=None)
                                    -        data.tensor = copy.deepcopy(tensor)
                                    -
                                    -        if tensor.dim() == 3:
                                    -            data.tensor = data.tensor.unsqueeze(0)
                                    -
                                    -        data.tensor = data.tensor.to(self.device)
                                    -
                                    -        if fix_img_size:
                                    -            data.tensor = self.transform(data.tensor)
                                    -
                                    -        data.img = data.tensor.squeeze(0).cpu()
                                    -        data.tensor = data.tensor.type(torch.float32)
                                    -        data.set_dims()
                                    -
                                    -        return data
                                    -
                                    -
                                    -class BaseDownloader(object, metaclass=ABCMeta):
                                    -    @Timer(
                                    -        "BaseDownloader.__init__", "{name}: {milliseconds:.2f} ms", logger=logger.debug
                                    -    )
                                    -    def __init__(
                                    -        self,
                                    -        file_id: str,
                                    -        path_local: str,
                                    -    ):
                                    -        """Base class for downloaders.
                                    -
                                    -        All downloaders should subclass it.
                                    -        All subclass should overwrite:
                                    -
                                    -        - Methods:``run``, supporting to run the download functionality.
                                    -
                                    -        Args:
                                    -            file_id (str): ID of the hosted file (e.g. Google Drive File ID).
                                    -            path_local (str): The file is downloaded to this local path.
                                    -
                                    -        """
                                    -        super().__init__()
                                    -        self.file_id = file_id
                                    -        self.path_local = path_local
                                    -
                                    -    @abstractmethod
                                    -    def run(self) -> None:
                                    -        """Abstract method that should implement the download functionality"""
                                    -
                                    -
                                    -class BaseModel(object, metaclass=ABCMeta):
                                    -    @Timer("BaseModel.__init__", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                                    -    def __init__(self, downloader: BaseDownloader, device: torch.device):
                                    -        """Base class for torch models.
                                    -
                                    -        All detectors and predictors should subclass it.
                                    -        All subclass should overwrite:
                                    -
                                    -        - Methods:``run``, supporting to make detections and predictions with the model.
                                    -
                                    -        Args:
                                    -            downloader (BaseDownloader): Downloader for the model.
                                    -            device (torch.device): Torch device cpu or cuda.
                                    -
                                    -        Attributes:
                                    -            model (torch.jit.ScriptModule or torch.jit.TracedModule): Loaded TorchScript model.
                                    -
                                    -        """
                                    -        super().__init__()
                                    -        self.downloader = downloader
                                    -        self.path_local = self.downloader.path_local
                                    -        self.device = device
                                    -
                                    -        self.model = self.load_model()
                                    -
                                    -    @Timer("BaseModel.load_model", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                                    -    def load_model(self) -> Union[torch.jit.ScriptModule, torch.jit.TracedModule]:
                                    -        """Loads the TorchScript model.
                                    -
                                    -        Returns:
                                    -            Union[torch.jit.ScriptModule, torch.jit.TracedModule]: Loaded TorchScript model.
                                    -        """
                                    -        if not os.path.exists(self.path_local):
                                    -            dir_local = os.path.dirname(self.path_local)
                                    -            os.makedirs(dir_local, exist_ok=True)
                                    -            self.downloader.run()
                                    -        model = torch.jit.load(self.path_local, map_location=self.device)
                                    -        model.eval()
                                    -
                                    -        return model
                                    -
                                    -    @Timer("BaseModel.inference", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                                    -    def inference(
                                    -        self, tensor: torch.Tensor
                                    -    ) -> Union[torch.Tensor, Tuple[torch.Tensor]]:
                                    -        """Inference the model with the given tensor.
                                    -
                                    -        Args:
                                    -            tensor (torch.Tensor): Input tensor for the model.
                                    -
                                    -        Returns:
                                    -            Union[torch.Tensor, Tuple[torch.Tensor]]: Output tensor or tuple of tensors.
                                    -        """
                                    -        with torch.no_grad():
                                    -            if tensor.device != self.device:
                                    -                tensor = tensor.to(self.device)
                                    -
                                    -            logits = self.model(tensor)
                                    -
                                    -        return logits
                                    -
                                    -    @abstractmethod
                                    -    def run(self):
                                    -        """Abstract method for making the predictions. Example pipeline:
                                    -
                                    -        - self.preprocessor.run
                                    -        - self.inference
                                    -        - self.postprocessor.run
                                    -
                                    -        """
                                    -
                                    -
                                    -class BaseUtilizer(BaseProcessor):
                                    -    def __init__(
                                    -        self,
                                    -        transform: transforms.Compose,
                                    -        device: torch.device,
                                    -        optimize_transform: bool,
                                    -    ):
                                    -        """BaseUtilizer is a processor that takes ImageData as input to do any kind of work that requires model predictions for example, drawing, summarizing, etc.
                                    -
                                    -        Args:
                                    -            transform (Compose): Composed Torch transform object.
                                    -            device (torch.device): Torch device cpu or cuda object.
                                    -            optimize_transform (bool): Whether to optimize the transform.
                                    -        """
                                    -        super().__init__(transform, device, optimize_transform)
                                    -
                                    -    @abstractmethod
                                    -    def run(self, data: ImageData) -> ImageData:
                                    -        """Runs utility function on the ImageData object.
                                    -
                                    -        Args:
                                    -            data (ImageData): ImageData object containing most of the data including the predictions.
                                    -
                                    -        Returns:
                                    -            ImageData: ImageData object containing the same data as input or modified object.
                                    -        """
                                    -
                                    -        return data
                                    -
                                    @@ -381,30 +120,12 @@

                                    Methods

                                    Optimizes the transform using torch.jit and deploys it to the device.

                                    -
                                    - -Expand source code - -
                                    def optimize(self):
                                    -    """Optimizes the transform using torch.jit and deploys it to the device."""
                                    -    if self.transform is not None:
                                    -        self.transform = script_transform(self.transform)
                                    -        self.transform = self.transform.to(self.device)
                                    -
                                    def run(self)

                                    Abstract method that should implement a tensor processing functionality

                                    -
                                    - -Expand source code - -
                                    @abstractmethod
                                    -def run(self):
                                    -    """Abstract method that should implement a tensor processing functionality"""
                                    -
                                    @@ -528,24 +249,6 @@

                                    Returns

                                    ImageData
                                    ImageData object with the image tensor.
                                    -
                                    - -Expand source code - -
                                    @abstractmethod
                                    -def run(self, path: str) -> ImageData:
                                    -    """Abstract method that reads an image from a path and returns a data object containing
                                    -    a tensor of the image with
                                    -     shape (batch, channels, height, width).
                                    -
                                    -    Args:
                                    -        path (str): Path to the image.
                                    -
                                    -    Returns:
                                    -        ImageData: ImageData object with the image tensor.
                                    -    """
                                    -    pass
                                    -
                                    def process_tensor(self, tensor: torch.Tensor, fix_img_size: bool) ‑> ImageData @@ -560,36 +263,6 @@

                                    Args

                                    fix_img_size : bool
                                    Whether to resize the image to a fixed size. If False, the size_portrait and size_landscape are ignored. Default is False.
                                    -
                                    - -Expand source code - -
                                    def process_tensor(self, tensor: torch.Tensor, fix_img_size: bool) -> ImageData:
                                    -    """Read a tensor and return a data object containing a tensor of the image with
                                    -    shape (batch, channels, height, width).
                                    -
                                    -    Args:
                                    -        tensor (torch.Tensor): Tensor of a single image with RGB values between 0-255 and shape (channels, height, width).
                                    -        fix_img_size (bool): Whether to resize the image to a fixed size. If False, the size_portrait and size_landscape are ignored. Default is False.
                                    -    """
                                    -
                                    -    data = ImageData(path_input=None)
                                    -    data.tensor = copy.deepcopy(tensor)
                                    -
                                    -    if tensor.dim() == 3:
                                    -        data.tensor = data.tensor.unsqueeze(0)
                                    -
                                    -    data.tensor = data.tensor.to(self.device)
                                    -
                                    -    if fix_img_size:
                                    -        data.tensor = self.transform(data.tensor)
                                    -
                                    -    data.img = data.tensor.squeeze(0).cpu()
                                    -    data.tensor = data.tensor.type(torch.float32)
                                    -    data.set_dims()
                                    -
                                    -    return data
                                    -

                                    Inherited members

                                    @@ -663,14 +336,6 @@

                                    Methods

                                    Abstract method that should implement the download functionality

                                    -
                                    - -Expand source code - -
                                    @abstractmethod
                                    -def run(self) -> None:
                                    -    """Abstract method that should implement the download functionality"""
                                    -
                                    @@ -789,26 +454,6 @@

                                    Returns

                                    Union[torch.jit.ScriptModule, torch.jit.TracedModule]
                                    Loaded TorchScript model.
                                    -
                                    - -Expand source code - -
                                    @Timer("BaseModel.load_model", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                                    -def load_model(self) -> Union[torch.jit.ScriptModule, torch.jit.TracedModule]:
                                    -    """Loads the TorchScript model.
                                    -
                                    -    Returns:
                                    -        Union[torch.jit.ScriptModule, torch.jit.TracedModule]: Loaded TorchScript model.
                                    -    """
                                    -    if not os.path.exists(self.path_local):
                                    -        dir_local = os.path.dirname(self.path_local)
                                    -        os.makedirs(dir_local, exist_ok=True)
                                    -        self.downloader.run()
                                    -    model = torch.jit.load(self.path_local, map_location=self.device)
                                    -    model.eval()
                                    -
                                    -    return model
                                    -
                                    def inference(self, tensor: torch.Tensor) ‑> Union[torch.Tensor, Tuple[torch.Tensor]] @@ -825,30 +470,6 @@

                                    Returns

                                    Union[torch.Tensor, Tuple[torch.Tensor]]
                                    Output tensor or tuple of tensors.
                                    -
                                    - -Expand source code - -
                                    @Timer("BaseModel.inference", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                                    -def inference(
                                    -    self, tensor: torch.Tensor
                                    -) -> Union[torch.Tensor, Tuple[torch.Tensor]]:
                                    -    """Inference the model with the given tensor.
                                    -
                                    -    Args:
                                    -        tensor (torch.Tensor): Input tensor for the model.
                                    -
                                    -    Returns:
                                    -        Union[torch.Tensor, Tuple[torch.Tensor]]: Output tensor or tuple of tensors.
                                    -    """
                                    -    with torch.no_grad():
                                    -        if tensor.device != self.device:
                                    -            tensor = tensor.to(self.device)
                                    -
                                    -        logits = self.model(tensor)
                                    -
                                    -    return logits
                                    -
                                    def run(self) @@ -860,20 +481,6 @@

                                    Returns

                                  • self.inference
                                  • self.postprocessor.run
                                  • -
                                    - -Expand source code - -
                                    @abstractmethod
                                    -def run(self):
                                    -    """Abstract method for making the predictions. Example pipeline:
                                    -
                                    -    - self.preprocessor.run
                                    -    - self.inference
                                    -    - self.postprocessor.run
                                    -
                                    -    """
                                    -
                                    @@ -953,23 +560,6 @@

                                    Returns

                                    ImageData
                                    ImageData object containing the same data as input or modified object.
                                    -
                                    - -Expand source code - -
                                    @abstractmethod
                                    -def run(self, data: ImageData) -> ImageData:
                                    -    """Runs utility function on the ImageData object.
                                    -
                                    -    Args:
                                    -        data (ImageData): ImageData object containing most of the data including the predictions.
                                    -
                                    -    Returns:
                                    -        ImageData: ImageData object containing the same data as input or modified object.
                                    -    """
                                    -
                                    -    return data
                                    -

                                    Inherited members

                                    @@ -1031,7 +621,6 @@

                                    Inherited members

                                    }).setContent('').open(); } -

                                    Index

                                      @@ -1083,7 +672,7 @@

                                      -

                                      Generated by pdoc 0.10.0.

                                      +

                                      Generated by pdoc 0.11.1.

                                      - \ No newline at end of file + diff --git a/docs/facetorch/datastruct.html b/docs/facetorch/datastruct.html index 197092a..0c5fa60 100644 --- a/docs/facetorch/datastruct.html +++ b/docs/facetorch/datastruct.html @@ -2,18 +2,21 @@ - - + + facetorch.datastruct API documentation - - - - - - + + + + + + - - + +
                                      @@ -22,268 +25,6 @@

                                      Module facetorch.datastruct

                                      -
                                      - -Expand source code - -
                                      from dataclasses import dataclass, field
                                      -from typing import Dict, List, Optional
                                      -
                                      -import torch
                                      -from codetiming import Timer
                                      -
                                      -from facetorch.logger import LoggerJsonFile
                                      -
                                      -logger = LoggerJsonFile().logger
                                      -
                                      -
                                      -@dataclass
                                      -class Dimensions:
                                      -    """Data class for image dimensions.
                                      -
                                      -    Attributes:
                                      -        height (int): Image height.
                                      -        width (int): Image width.
                                      -    """
                                      -
                                      -    height: int = field(default=0)
                                      -    width: int = field(default=0)
                                      -
                                      -
                                      -@dataclass
                                      -class Location:
                                      -    """Data class for face location.
                                      -
                                      -    Attributes:
                                      -        x1 (int): x1 coordinate
                                      -        x2 (int): x2 coordinate
                                      -        y1 (int): y1 coordinate
                                      -        y2 (int): y2 coordinate
                                      -    """
                                      -
                                      -    x1: int = field(default=0)
                                      -    x2: int = field(default=0)
                                      -    y1: int = field(default=0)
                                      -    y2: int = field(default=0)
                                      -
                                      -    def form_square(self) -> None:
                                      -        """Form a square from the location.
                                      -
                                      -        Returns:
                                      -            None
                                      -        """
                                      -        height = self.y2 - self.y1
                                      -        width = self.x2 - self.x1
                                      -
                                      -        if height > width:
                                      -            diff = height - width
                                      -            self.x1 = self.x1 - int(diff / 2)
                                      -            self.x2 = self.x2 + int(diff / 2)
                                      -        elif height < width:
                                      -            diff = width - height
                                      -            self.y1 = self.y1 - int(diff / 2)
                                      -            self.y2 = self.y2 + int(diff / 2)
                                      -        else:
                                      -            pass
                                      -
                                      -    def expand(self, amount: float) -> None:
                                      -        """Expand the location while keeping the center.
                                      -
                                      -        Args:
                                      -            amount (float): Amount to expand the location by in multiples of the original size.
                                      -
                                      -
                                      -        Returns:
                                      -            None
                                      -        """
                                      -        assert amount >= 0, "Amount must be greater than or equal to 0."
                                      -        # if amount != 0:
                                      -        #     self.x1 = self.x1 - amount
                                      -        #     self.y1 = self.y1 - amount
                                      -        #     self.x2 = self.x2 + amount
                                      -        #     self.y2 = self.y2 + amount
                                      -        if amount != 0.0:
                                      -            self.x1 = self.x1 - int((self.x2 - self.x1) / 2 * amount)
                                      -            self.y1 = self.y1 - int((self.y2 - self.y1) / 2 * amount)
                                      -            self.x2 = self.x2 + int((self.x2 - self.x1) / 2 * amount)
                                      -            self.y2 = self.y2 + int((self.y2 - self.y1) / 2 * amount)
                                      -
                                      -
                                      -@dataclass
                                      -class Prediction:
                                      -    """Data class for face prediction results and derivatives.
                                      -
                                      -    Attributes:
                                      -        label (str): Label of the face given by predictor.
                                      -        logits (torch.Tensor): Output of the predictor model for the face.
                                      -        other (Dict): Any other predictions and derivatives for the face.
                                      -    """
                                      -
                                      -    label: str = field(default_factory=str)
                                      -    logits: torch.Tensor = field(default_factory=torch.Tensor)
                                      -    other: Dict = field(default_factory=dict)
                                      -
                                      -
                                      -@dataclass
                                      -class Detection:
                                      -    """Data class for detector output.
                                      -
                                      -    Attributes:
                                      -        loc (torch.Tensor): Locations of faces
                                      -        conf (torch.Tensor): Confidences of faces
                                      -        landmarks (torch.Tensor): Landmarks of faces
                                      -        boxes (torch.Tensor): Bounding boxes of faces
                                      -        dets (torch.Tensor): Detections of faces
                                      -
                                      -    """
                                      -
                                      -    loc: torch.Tensor = field(default_factory=torch.Tensor)
                                      -    conf: torch.Tensor = field(default_factory=torch.Tensor)
                                      -    landmarks: torch.Tensor = field(default_factory=torch.Tensor)
                                      -    boxes: torch.Tensor = field(default_factory=torch.Tensor)
                                      -    dets: torch.Tensor = field(default_factory=torch.Tensor)
                                      -
                                      -
                                      -@dataclass
                                      -class Face:
                                      -    """Data class for face attributes.
                                      -
                                      -    Attributes:
                                      -        indx (int): Index of the face.
                                      -        loc (Location): Location of the face in the image.
                                      -        dims (Dimensions): Dimensions of the face (height, width).
                                      -        tensor (torch.Tensor): Face tensor.
                                      -        ratio (float): Ratio of the face area to the image area.
                                      -        preds (Dict[str, Prediction]): Predictions of the face given by predictor set.
                                      -    """
                                      -
                                      -    indx: int = field(default_factory=int)
                                      -    loc: Location = field(default_factory=Location)
                                      -    dims: Dimensions = field(default_factory=Dimensions)
                                      -    tensor: torch.Tensor = field(default_factory=torch.Tensor)
                                      -    ratio: float = field(default_factory=float)
                                      -    preds: Dict[str, Prediction] = field(default_factory=dict)
                                      -
                                      -
                                      -@dataclass
                                      -class ImageData:
                                      -    """The main data class used for passing data between the different facetorch modules.
                                      -
                                      -    Attributes:
                                      -        path_input (str): Path to the input image.
                                      -        path_output (str): Path to the output image where the resulting image is saved.
                                      -        img (torch.Tensor): Original image tensor used for drawing purposes.
                                      -        tensor (torch.Tensor): Processed image tensor.
                                      -        dims (Dimensions): Dimensions of the image (height, width).
                                      -        det (Detection): Detection data given by the detector.
                                      -        faces (List[Face]): List of faces in the image.
                                      -        version (str): Version of the facetorch library.
                                      -
                                      -    """
                                      -
                                      -    path_input: str = field(default_factory=str)
                                      -    path_output: Optional[str] = field(default_factory=str)
                                      -    img: torch.Tensor = field(default_factory=torch.Tensor)
                                      -    tensor: torch.Tensor = field(default_factory=torch.Tensor)
                                      -    dims: Dimensions = field(default_factory=Dimensions)
                                      -    det: Detection = field(default_factory=Detection)
                                      -    faces: List[Face] = field(default_factory=list)
                                      -    version: str = field(default_factory=str)
                                      -
                                      -    def add_preds(
                                      -        self,
                                      -        preds_list: List[Prediction],
                                      -        predictor_name: str,
                                      -        face_offset: int = 0,
                                      -    ) -> None:
                                      -        """Adds a list of predictions to the data object.
                                      -
                                      -        Args:
                                      -            preds_list (List[Prediction]): List of predictions.
                                      -            predictor_name (str): Name of the predictor.
                                      -            face_offset (int): Offset of the face index where the predictions are added.
                                      -
                                      -        Returns:
                                      -            None
                                      -
                                      -        """
                                      -        j = 0
                                      -        for i in range(face_offset, face_offset + len(preds_list)):
                                      -            self.faces[i].preds[predictor_name] = preds_list[j]
                                      -            j += 1
                                      -
                                      -    def reset_img(self) -> None:
                                      -        """Reset the original image tensor to empty state."""
                                      -        self.img = torch.tensor([])
                                      -
                                      -    def reset_tensor(self) -> None:
                                      -        """Reset the processed image tensor to empty state."""
                                      -        self.tensor = torch.tensor([])
                                      -
                                      -    def reset_face_tensors(self) -> None:
                                      -        """Reset the face tensors to empty state."""
                                      -        for i in range(0, len(self.faces)):
                                      -            self.faces[i].tensor = torch.tensor([])
                                      -
                                      -    def reset_face_pred_tensors(self) -> None:
                                      -        """Reset the face prediction tensors to empty state."""
                                      -        for i in range(0, len(self.faces)):
                                      -            for key in self.faces[i].preds:
                                      -                self.faces[i].preds[key].logits = torch.tensor([])
                                      -                self.faces[i].preds[key].other = {}
                                      -
                                      -    def reset_det_tensors(self) -> None:
                                      -        """Reset the detection object to empty state."""
                                      -        self.det = Detection()
                                      -
                                      -    @Timer(
                                      -        "ImageData.reset_faces", "{name}: {milliseconds:.2f} ms", logger=logger.debug
                                      -    )
                                      -    def reset_tensors(self) -> None:
                                      -        """Reset the tensors to empty state."""
                                      -        self.reset_img()
                                      -        self.reset_tensor()
                                      -        self.reset_face_tensors()
                                      -        self.reset_face_pred_tensors()
                                      -        self.reset_det_tensors()
                                      -
                                      -    def set_dims(self) -> None:
                                      -        """Set the dimensions attribute from the tensor attribute."""
                                      -        self.dims.height = self.tensor.shape[2]
                                      -        self.dims.width = self.tensor.shape[3]
                                      -
                                      -    def aggregate_loc_tensor(self) -> torch.Tensor:
                                      -        """Aggregates the location tensor from all faces.
                                      -
                                      -        Returns:
                                      -            torch.Tensor: Aggregated location tensor for drawing purposes.
                                      -        """
                                      -        loc_tensor = torch.zeros((len(self.faces), 4), dtype=torch.float32)
                                      -        for i in range(0, len(self.faces)):
                                      -            loc_tensor[i] = torch.tensor(
                                      -                [
                                      -                    self.faces[i].loc.x1,
                                      -                    self.faces[i].loc.y1,
                                      -                    self.faces[i].loc.x2,
                                      -                    self.faces[i].loc.y2,
                                      -                ]
                                      -            )
                                      -        return loc_tensor
                                      -
                                      -
                                      -@dataclass
                                      -class Response:
                                      -    """Data class for response data, which is a subset of ImageData.
                                      -
                                      -    Attributes:
                                      -        faces (List[Face]): List of faces in the image.
                                      -        version (str): Version of the facetorch library.
                                      -
                                      -    """
                                      -
                                      -    faces: List[Face] = field(default_factory=list)
                                      -    version: str = field(default_factory=str)
                                      -
                                      @@ -442,30 +183,6 @@

                                      Methods

                                      Form a square from the location.

                                      Returns

                                      None

                                      -
                                      - -Expand source code - -
                                      def form_square(self) -> None:
                                      -    """Form a square from the location.
                                      -
                                      -    Returns:
                                      -        None
                                      -    """
                                      -    height = self.y2 - self.y1
                                      -    width = self.x2 - self.x1
                                      -
                                      -    if height > width:
                                      -        diff = height - width
                                      -        self.x1 = self.x1 - int(diff / 2)
                                      -        self.x2 = self.x2 + int(diff / 2)
                                      -    elif height < width:
                                      -        diff = width - height
                                      -        self.y1 = self.y1 - int(diff / 2)
                                      -        self.y2 = self.y2 + int(diff / 2)
                                      -    else:
                                      -        pass
                                      -
                                      def expand(self, amount: float) ‑> None @@ -479,32 +196,6 @@

                                      Args

                                      Returns

                                      None

                                      -
                                      - -Expand source code - -
                                      def expand(self, amount: float) -> None:
                                      -    """Expand the location while keeping the center.
                                      -
                                      -    Args:
                                      -        amount (float): Amount to expand the location by in multiples of the original size.
                                      -
                                      -
                                      -    Returns:
                                      -        None
                                      -    """
                                      -    assert amount >= 0, "Amount must be greater than or equal to 0."
                                      -    # if amount != 0:
                                      -    #     self.x1 = self.x1 - amount
                                      -    #     self.y1 = self.y1 - amount
                                      -    #     self.x2 = self.x2 + amount
                                      -    #     self.y2 = self.y2 + amount
                                      -    if amount != 0.0:
                                      -        self.x1 = self.x1 - int((self.x2 - self.x1) / 2 * amount)
                                      -        self.y1 = self.y1 - int((self.y2 - self.y1) / 2 * amount)
                                      -        self.x2 = self.x2 + int((self.x2 - self.x1) / 2 * amount)
                                      -        self.y2 = self.y2 + int((self.y2 - self.y1) / 2 * amount)
                                      -
                                      @@ -883,142 +574,48 @@

                                      Args

                                      Returns

                                      None

                                      -
                                      - -Expand source code - -
                                      def add_preds(
                                      -    self,
                                      -    preds_list: List[Prediction],
                                      -    predictor_name: str,
                                      -    face_offset: int = 0,
                                      -) -> None:
                                      -    """Adds a list of predictions to the data object.
                                      -
                                      -    Args:
                                      -        preds_list (List[Prediction]): List of predictions.
                                      -        predictor_name (str): Name of the predictor.
                                      -        face_offset (int): Offset of the face index where the predictions are added.
                                      -
                                      -    Returns:
                                      -        None
                                      -
                                      -    """
                                      -    j = 0
                                      -    for i in range(face_offset, face_offset + len(preds_list)):
                                      -        self.faces[i].preds[predictor_name] = preds_list[j]
                                      -        j += 1
                                      -
                                      def reset_img(self) ‑> None

                                      Reset the original image tensor to empty state.

                                      -
                                      - -Expand source code - -
                                      def reset_img(self) -> None:
                                      -    """Reset the original image tensor to empty state."""
                                      -    self.img = torch.tensor([])
                                      -
                                      def reset_tensor(self) ‑> None

                                      Reset the processed image tensor to empty state.

                                      -
                                      - -Expand source code - -
                                      def reset_tensor(self) -> None:
                                      -    """Reset the processed image tensor to empty state."""
                                      -    self.tensor = torch.tensor([])
                                      -
                                      def reset_face_tensors(self) ‑> None

                                      Reset the face tensors to empty state.

                                      -
                                      - -Expand source code - -
                                      def reset_face_tensors(self) -> None:
                                      -    """Reset the face tensors to empty state."""
                                      -    for i in range(0, len(self.faces)):
                                      -        self.faces[i].tensor = torch.tensor([])
                                      -
                                      def reset_face_pred_tensors(self) ‑> None

                                      Reset the face prediction tensors to empty state.

                                      -
                                      - -Expand source code - -
                                      def reset_face_pred_tensors(self) -> None:
                                      -    """Reset the face prediction tensors to empty state."""
                                      -    for i in range(0, len(self.faces)):
                                      -        for key in self.faces[i].preds:
                                      -            self.faces[i].preds[key].logits = torch.tensor([])
                                      -            self.faces[i].preds[key].other = {}
                                      -
                                      def reset_det_tensors(self) ‑> None

                                      Reset the detection object to empty state.

                                      -
                                      - -Expand source code - -
                                      def reset_det_tensors(self) -> None:
                                      -    """Reset the detection object to empty state."""
                                      -    self.det = Detection()
                                      -
                                      def reset_tensors(self) ‑> None

                                      Reset the tensors to empty state.

                                      -
                                      - -Expand source code - -
                                      @Timer(
                                      -    "ImageData.reset_faces", "{name}: {milliseconds:.2f} ms", logger=logger.debug
                                      -)
                                      -def reset_tensors(self) -> None:
                                      -    """Reset the tensors to empty state."""
                                      -    self.reset_img()
                                      -    self.reset_tensor()
                                      -    self.reset_face_tensors()
                                      -    self.reset_face_pred_tensors()
                                      -    self.reset_det_tensors()
                                      -
                                      def set_dims(self) ‑> None

                                      Set the dimensions attribute from the tensor attribute.

                                      -
                                      - -Expand source code - -
                                      def set_dims(self) -> None:
                                      -    """Set the dimensions attribute from the tensor attribute."""
                                      -    self.dims.height = self.tensor.shape[2]
                                      -    self.dims.width = self.tensor.shape[3]
                                      -
                                      def aggregate_loc_tensor(self) ‑> torch.Tensor @@ -1030,28 +627,6 @@

                                      Returns

                                      torch.Tensor
                                      Aggregated location tensor for drawing purposes.
                                      -
                                      - -Expand source code - -
                                      def aggregate_loc_tensor(self) -> torch.Tensor:
                                      -    """Aggregates the location tensor from all faces.
                                      -
                                      -    Returns:
                                      -        torch.Tensor: Aggregated location tensor for drawing purposes.
                                      -    """
                                      -    loc_tensor = torch.zeros((len(self.faces), 4), dtype=torch.float32)
                                      -    for i in range(0, len(self.faces)):
                                      -        loc_tensor[i] = torch.tensor(
                                      -            [
                                      -                self.faces[i].loc.x1,
                                      -                self.faces[i].loc.y1,
                                      -                self.faces[i].loc.x2,
                                      -                self.faces[i].loc.y2,
                                      -            ]
                                      -        )
                                      -    return loc_tensor
                                      -
                                      @@ -1147,7 +722,6 @@

                                      Class variables

                                      }).setContent('').open(); } -

                                      Index

                                        @@ -1241,7 +815,7 @@

                                        -

                                        Generated by pdoc 0.10.0.

                                        +

                                        Generated by pdoc 0.11.1.

                                        - \ No newline at end of file + diff --git a/docs/facetorch/downloader.html b/docs/facetorch/downloader.html index 9e8452b..031f616 100644 --- a/docs/facetorch/downloader.html +++ b/docs/facetorch/downloader.html @@ -2,18 +2,21 @@ - - + + facetorch.downloader API documentation - - - - - - + + + + + + - - + +
                                        @@ -22,37 +25,6 @@

                                        Module facetorch.downloader

                                        -
                                        - -Expand source code - -
                                        import os
                                        -import gdown
                                        -from codetiming import Timer
                                        -
                                        -from facetorch import base
                                        -from facetorch.logger import LoggerJsonFile
                                        -
                                        -logger = LoggerJsonFile().logger
                                        -
                                        -
                                        -class DownloaderGDrive(base.BaseDownloader):
                                        -    def __init__(self, file_id: str, path_local: str):
                                        -        """Downloader for Google Drive files.
                                        -
                                        -        Args:
                                        -            file_id (str): ID of the file hosted on Google Drive.
                                        -            path_local (str): The file is downloaded to this local path.
                                        -        """
                                        -        super().__init__(file_id, path_local)
                                        -
                                        -    @Timer("DownloaderGDrive.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                                        -    def run(self):
                                        -        """Downloads a file from Google Drive."""
                                        -        os.makedirs(os.path.dirname(self.path_local), exist_ok=True)
                                        -        url = f"https://drive.google.com/uc?&id={self.file_id}&confirm=t"
                                        -        gdown.download(url, output=self.path_local, quiet=False)
                                        -
                                        @@ -108,17 +80,6 @@

                                        Methods

                                        Downloads a file from Google Drive.

                                        -
                                        - -Expand source code - -
                                        @Timer("DownloaderGDrive.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                                        -def run(self):
                                        -    """Downloads a file from Google Drive."""
                                        -    os.makedirs(os.path.dirname(self.path_local), exist_ok=True)
                                        -    url = f"https://drive.google.com/uc?&id={self.file_id}&confirm=t"
                                        -    gdown.download(url, output=self.path_local, quiet=False)
                                        -
                                        @@ -172,7 +133,6 @@

                                        Methods

                                        }).setContent('').open(); } -

                                        Index

                                          @@ -196,7 +156,7 @@

                                          -

                                          Generated by pdoc 0.10.0.

                                          +

                                          Generated by pdoc 0.11.1.

                                          - \ No newline at end of file + diff --git a/docs/facetorch/index.html b/docs/facetorch/index.html index dadaf27..55b089d 100644 --- a/docs/facetorch/index.html +++ b/docs/facetorch/index.html @@ -1,625 +1,114 @@ - - - - - - facetorch API documentation - - - - - - - - - + + + + +facetorch API documentation + + + + + + + + + - -
                                          -
                                          -
                                          -

                                          Package facetorch

                                          -
                                          -
                                          -
                                          - - Expand source code - -
                                          from .analyzer.core import FaceAnalyzer
                                          -
                                          -__all__ = ["FaceAnalyzer"]
                                          -
                                          -
                                          -
                                          -

                                          Sub-modules

                                          -
                                          -
                                          facetorch.analyzer -
                                          -
                                          -
                                          -
                                          -
                                          facetorch.base
                                          -
                                          -
                                          -
                                          -
                                          facetorch.datastruct -
                                          -
                                          -
                                          -
                                          -
                                          facetorch.downloader -
                                          -
                                          -
                                          -
                                          -
                                          facetorch.logger -
                                          -
                                          -
                                          -
                                          -
                                          facetorch.transforms -
                                          -
                                          -
                                          -
                                          -
                                          facetorch.utils
                                          -
                                          -
                                          -
                                          -
                                          -
                                          -
                                          -
                                          -
                                          -
                                          -
                                          -

                                          Classes

                                          -
                                          -
                                          +
                                          +
                                          +
                                          +

                                          Package facetorch

                                          +
                                          +
                                          +
                                          +
                                          +

                                          Sub-modules

                                          +
                                          +
                                          facetorch.analyzer
                                          +
                                          +
                                          +
                                          +
                                          facetorch.base
                                          +
                                          +
                                          +
                                          +
                                          facetorch.datastruct
                                          +
                                          +
                                          +
                                          +
                                          facetorch.downloader
                                          +
                                          +
                                          +
                                          +
                                          facetorch.logger
                                          +
                                          +
                                          +
                                          +
                                          facetorch.transforms
                                          +
                                          +
                                          +
                                          +
                                          facetorch.utils
                                          +
                                          +
                                          +
                                          +
                                          +
                                          +
                                          +
                                          +
                                          +
                                          +
                                          +

                                          Classes

                                          +
                                          +
                                          class FaceAnalyzer (cfg: omegaconf.omegaconf.OmegaConf)
                                          -
                                          -
                                          -

                                          FaceAnalyzer is the main class that reads images, runs face detection, tensor unification - and facial feature prediction. - It also draws bounding boxes and facial landmarks over the image.

                                          -

                                          The following components are used:

                                          -
                                            -
                                          1. Reader - reads the image and returns an ImageData object containing the image - tensor.
                                          2. -
                                          3. Detector - wrapper around a neural network that detects faces.
                                          4. -
                                          5. Unifier - processor that unifies sizes of all faces and normalizes them between 0 - and 1.
                                          6. -
                                          7. Predictor dict - dict of wrappers around neural networks trained to analyze facial - features.
                                          8. -
                                          9. Utilizer dict - dict of utilizer processors that can for example extract 3D face - landmarks or draw boxes over the image.
                                          10. -
                                          -

                                          Args

                                          -
                                          -
                                          cfg : OmegaConf
                                          -
                                          Config object with image reader, face detector, unifier and predictor - configurations.
                                          -
                                          -

                                          Attributes

                                          -
                                          -
                                          cfg : OmegaConf
                                          -
                                          Config object with image reader, face detector, unifier and predictor - configurations.
                                          -
                                          reader : BaseReader
                                          -
                                          Reader object that reads the image and returns an ImageData object containing the - image tensor.
                                          -
                                          detector : FaceDetector
                                          -
                                          FaceDetector object that wraps a neural network that detects faces.
                                          -
                                          unifier : FaceUnifier
                                          -
                                          FaceUnifier object that unifies sizes of all faces and normalizes them between 0 and - 1.
                                          -
                                          predictors - : Dict[str, FacePredictor]
                                          -
                                          Dict of FacePredictor objects that predict facial features. Key is the name of the - predictor.
                                          -
                                          utilizers : Dict[str, FaceUtilizer] -
                                          -
                                          Dict of FaceUtilizer objects that can extract 3D face landmarks, draw boxes over the - image, etc. Key is the name of the utilizer.
                                          -
                                          logger : logging.Logger
                                          -
                                          Logger object that logs messages to the console or to a file.
                                          -
                                          -
                                          -
                                          - - Expand source code - -
                                          class FaceAnalyzer(object):
                                          +
                                          +

                                          FaceAnalyzer is the main class that reads images, runs face detection, tensor unification and facial feature prediction. +It also draws bounding boxes and facial landmarks over the image.

                                          +

                                          The following components are used:

                                          +
                                            +
                                          1. Reader - reads the image and returns an ImageData object containing the image tensor.
                                          2. +
                                          3. Detector - wrapper around a neural network that detects faces.
                                          4. +
                                          5. Unifier - processor that unifies sizes of all faces and normalizes them between 0 and 1.
                                          6. +
                                          7. Predictor dict - dict of wrappers around neural networks trained to analyze facial features.
                                          8. +
                                          9. Utilizer dict - dict of utilizer processors that can for example extract 3D face landmarks or draw boxes over the image.
                                          10. +
                                          +

                                          Args

                                          +
                                          +
                                          cfg : OmegaConf
                                          +
                                          Config object with image reader, face detector, unifier and predictor configurations.
                                          +
                                          +

                                          Attributes

                                          +
                                          +
                                          cfg : OmegaConf
                                          +
                                          Config object with image reader, face detector, unifier and predictor configurations.
                                          +
                                          reader : BaseReader
                                          +
                                          Reader object that reads the image and returns an ImageData object containing the image tensor.
                                          +
                                          detector : FaceDetector
                                          +
                                          FaceDetector object that wraps a neural network that detects faces.
                                          +
                                          unifier : FaceUnifier
                                          +
                                          FaceUnifier object that unifies sizes of all faces and normalizes them between 0 and 1.
                                          +
                                          predictors : Dict[str, FacePredictor]
                                          +
                                          Dict of FacePredictor objects that predict facial features. Key is the name of the predictor.
                                          +
                                          utilizers : Dict[str, FaceUtilizer]
                                          +
                                          Dict of FaceUtilizer objects that can extract 3D face landmarks, draw boxes over the image, etc. Key is the name of the utilizer.
                                          +
                                          logger : logging.Logger
                                          +
                                          Logger object that logs messages to the console or to a file.
                                          +
                                          +
                                          + +Expand source code + +
                                          class FaceAnalyzer(object):
                                               @Timer(
                                                   "FaceAnalyzer.__init__", "{name}: {milliseconds:.2f} ms", logger=logger.debug
                                               )
                                          @@ -796,280 +285,122 @@ 

                                          Attributes

                                          else: self.logger.debug("Returning response with faces", extra=response.__dict__) return response
                                          -
                                          -

                                          Methods

                                          -
                                          -
                                          +
                                          +

                                          Methods

                                          +
                                          +
                                          def run(self, image_source: Union[str, torch.Tensor, numpy.ndarray, bytes, PIL.Image.Image, ForwardRef(None)] = None, path_image: Optional[str] = None, batch_size: int = 8, fix_img_size: bool = False, return_img_data: bool = False, include_tensors: bool = False, path_output: Optional[str] = None, tensor: Optional[torch.Tensor] = None) ‑> Union[Response, ImageData]
                                          -
                                          -
                                          -

                                          Reads image, detects faces, unifies the detected faces, predicts facial features - and returns analyzed data.

                                          -

                                          Args

                                          -
                                          -
                                          image_source - : Optional[Union[str, torch.Tensor, np.ndarray, bytes, Image.Image]] -
                                          -
                                          Input to be analyzed. If None, path_image or tensor must be provided. - Default: None.
                                          -
                                          path_image : Optional[str] -
                                          -
                                          Path to the image to be analyzed. If None, tensor must be provided. Default: - None.
                                          -
                                          batch_size : int
                                          -
                                          Batch size for making predictions on the faces. Default is 8.
                                          -
                                          fix_img_size : bool
                                          -
                                          If True, resizes the image to the size specified in reader. Default is - False.
                                          -
                                          return_img_data : bool
                                          -
                                          If True, returns all image data including tensors, otherwise only returns - the faces. Default is False.
                                          -
                                          include_tensors : bool
                                          -
                                          If True, removes tensors from the returned data object. Default is False. -
                                          -
                                          path_output : Optional[str] -
                                          -
                                          Path where to save the image with detected faces. If None, the image is not - saved. Default: None.
                                          -
                                          tensor - : Optional[torch.Tensor]
                                          -
                                          Image tensor to be analyzed. If None, path_image must be provided. Default: - None.
                                          -
                                          -

                                          Returns

                                          -
                                          -
                                          Union[Response, ImageData]
                                          -
                                          If return_img_data is False, returns a Response object containing the faces - and their facial features. If return_img_data is True, returns the entire - ImageData object.
                                          -
                                          -
                                          -
                                          - - Expand source code - -
                                          @Timer("FaceAnalyzer.run", "{name}: {milliseconds:.2f} ms", logger=logger.debug)
                                          -def run(
                                          -    self,
                                          -    image_source: Optional[
                                          -        Union[str, torch.Tensor, np.ndarray, bytes, Image.Image]
                                          -    ] = None,
                                          -    path_image: Optional[str] = None,
                                          -    batch_size: int = 8,
                                          -    fix_img_size: bool = False,
                                          -    return_img_data: bool = False,
                                          -    include_tensors: bool = False,
                                          -    path_output: Optional[str] = None,
                                          -    tensor: Optional[torch.Tensor] = None,
                                          -) -> Union[Response, ImageData]:
                                          -    """Reads image, detects faces, unifies the detected faces, predicts facial features
                                          -     and returns analyzed data.
                                          -
                                          -    Args:
                                          -        image_source (Optional[Union[str, torch.Tensor, np.ndarray, bytes, Image.Image]]): Input to be analyzed. If None, path_image or tensor must be provided. Default: None.
                                          -        path_image (Optional[str]): Path to the image to be analyzed. If None, tensor must be provided. Default: None.
                                          -        batch_size (int): Batch size for making predictions on the faces. Default is 8.
                                          -        fix_img_size (bool): If True, resizes the image to the size specified in reader. Default is False.
                                          -        return_img_data (bool): If True, returns all image data including tensors, otherwise only returns the faces. Default is False.
                                          -        include_tensors (bool): If True, removes tensors from the returned data object. Default is False.
                                          -        path_output (Optional[str]): Path where to save the image with detected faces. If None, the image is not saved. Default: None.
                                          -        tensor (Optional[torch.Tensor]): Image tensor to be analyzed. If None, path_image must be provided. Default: None.
                                          -
                                          -    Returns:
                                          -        Union[Response, ImageData]: If return_img_data is False, returns a Response object containing the faces and their facial features. If return_img_data is True, returns the entire ImageData object.
                                          -
                                          -    """
                                          -
                                          -    def _predict_batch(
                                          -        data: ImageData, predictor: FacePredictor, predictor_name: str
                                          -    ) -> ImageData:
                                          -        n_faces = len(data.faces)
                                          -
                                          -        for face_indx_start in range(0, n_faces, batch_size):
                                          -            face_indx_end = min(face_indx_start + batch_size, n_faces)
                                          -
                                          -            face_batch_tensor = torch.stack(
                                          -                [face.tensor for face in data.faces[face_indx_start:face_indx_end]]
                                          -            )
                                          -            preds = predictor.run(face_batch_tensor)
                                          -            data.add_preds(preds, predictor_name, face_indx_start)
                                          -
                                          -        return data
                                          -
                                          -    self.logger.info("Running FaceAnalyzer")
                                          -
                                          -    if path_image is None and tensor is None and image_source is None:
                                          -        raise ValueError("Either input, path_image or tensor must be provided.")
                                          -
                                          -    if image_source is not None:
                                          -        self.logger.debug("Using image_source as input")
                                          -        reader_input = image_source
                                          -    elif path_image is not None:
                                          -        self.logger.debug(
                                          -            "Using path_image as input", extra={"path_image": path_image}
                                          -        )
                                          -        reader_input = path_image
                                          -    else:
                                          -        self.logger.debug("Using tensor as input")
                                          -        reader_input = tensor
                                          -
                                          -    self.logger.info("Reading image", extra={"input": reader_input})
                                          -    data = self.reader.run(reader_input, fix_img_size=fix_img_size)
                                          -
                                          -    path_output = None if path_output == "None" else path_output
                                          -    data.path_output = path_output
                                          -
                                          -    try:
                                          -        data.version = version("facetorch")
                                          -    except Exception as e:
                                          -        self.logger.warning("Could not get version number", extra={"error": e})
                                          -
                                          -    self.logger.info("Detecting faces")
                                          -    data = self.detector.run(data)
                                          -    n_faces = len(data.faces)
                                          -    self.logger.info(f"Number of faces: {n_faces}")
                                          -
                                          -    if n_faces > 0 and self.unifier is not None:
                                          -        self.logger.info("Unifying faces")
                                          -        data = self.unifier.run(data)
                                          -
                                          -        self.logger.info("Predicting facial features")
                                          -        for predictor_name, predictor in self.predictors.items():
                                          -            self.logger.info(f"Running FacePredictor: {predictor_name}")
                                          -            data = _predict_batch(data, predictor, predictor_name)
                                          -
                                          -        self.logger.info("Utilizing facial features")
                                          -        for utilizer_name, utilizer in self.utilizers.items():
                                          -            self.logger.info(f"Running BaseUtilizer: {utilizer_name}")
                                          -            data = utilizer.run(data)
                                          -    else:
                                          -        if "save" in self.utilizers:
                                          -            self.utilizers["save"].run(data)
                                          -
                                          -    if not include_tensors:
                                          -        self.logger.debug(
                                          -            "Removing tensors from response as include_tensors is False"
                                          -        )
                                          -        data.reset_tensors()
                                          -
                                          -    response = Response(faces=data.faces, version=data.version)
                                          -
                                          -    if return_img_data:
                                          -        self.logger.debug("Returning image data object", extra=data.__dict__)
                                          -        return data
                                          -    else:
                                          -        self.logger.debug("Returning response with faces", extra=response.__dict__)
                                          -        return response
                                          -
                                          -
                                          -
                                          -
                                          -
                                          -
                                          -
                                          - -
                                          - +
                                          +

                                          Reads image, detects faces, unifies the detected faces, predicts facial features +and returns analyzed data.

                                          +

                                          Args

                                          +
                                          +
                                          image_source : Optional[Union[str, torch.Tensor, np.ndarray, bytes, Image.Image]]
                                          +
                                          Input to be analyzed. If None, path_image or tensor must be provided. Default: None.
                                          +
                                          path_image : Optional[str]
                                          +
                                          Path to the image to be analyzed. If None, tensor must be provided. Default: None.
                                          +
                                          batch_size : int
                                          +
                                          Batch size for making predictions on the faces. Default is 8.
                                          +
                                          fix_img_size : bool
                                          +
                                          If True, resizes the image to the size specified in reader. Default is False.
                                          +
                                          return_img_data : bool
                                          +
                                          If True, returns all image data including tensors, otherwise only returns the faces. Default is False.
                                          +
                                          include_tensors : bool
                                          +
                                          If True, removes tensors from the returned data object. Default is False.
                                          +
                                          path_output : Optional[str]
                                          +
                                          Path where to save the image with detected faces. If None, the image is not saved. Default: None.
                                          +
                                          tensor : Optional[torch.Tensor]
                                          +
                                          Image tensor to be analyzed. If None, path_image must be provided. Default: None.
                                          +
                                          +

                                          Returns

                                          +
                                          +
                                          Union[Response, ImageData]
                                          +
                                          If return_img_data is False, returns a Response object containing the faces and their facial features. If return_img_data is True, returns the entire ImageData object.
                                          +
                                          +
                                          +
                                          + + +
                                          +
                                          + +
                                          + - - \ No newline at end of file + diff --git a/docs/facetorch/logger.html b/docs/facetorch/logger.html index fd939e1..76fd34b 100644 --- a/docs/facetorch/logger.html +++ b/docs/facetorch/logger.html @@ -2,18 +2,21 @@ - - + + facetorch.logger API documentation - - - - - - + + + + + + - - + +
                                          @@ -22,64 +25,6 @@

                                          Module facetorch.logger

                                          -
                                          - -Expand source code - -
                                          import logging
                                          -import os
                                          -from typing import Optional
                                          -
                                          -from pythonjsonlogger import jsonlogger
                                          -
                                          -
                                          -class LoggerJsonFile:
                                          -    def __init__(
                                          -        self,
                                          -        name: str = "facetorch",
                                          -        level: int = logging.CRITICAL,
                                          -        path_file: Optional[str] = None,
                                          -        json_format: str = "%(asctime)s %(levelname)s %(message)s",
                                          -    ):
                                          -        """Logger in json format that writes to a file and console.
                                          -
                                          -        Args:
                                          -            name (str): Name of the logger.
                                          -            level (str): Level of the logger.
                                          -            path_file (str): Path to the log file.
                                          -            json_format (str): Format of the log record.
                                          -
                                          -        Attributes:
                                          -            logger (logging.Logger): Logger object.
                                          -
                                          -        """
                                          -        self.name = name
                                          -        self.level = level
                                          -        self.path_file = path_file
                                          -        self.json_format = json_format
                                          -
                                          -        self.logger = logging.getLogger(self.name)
                                          -        self.configure()
                                          -
                                          -    def configure(self):
                                          -        """Configures the logger."""
                                          -        if self.logger.level == 0 or self.level < self.logger.level:
                                          -            self.logger.setLevel(self.level)
                                          -
                                          -        if len(self.logger.handlers) == 0:
                                          -            json_handler = logging.StreamHandler()
                                          -            formatter = jsonlogger.JsonFormatter(fmt=self.json_format)
                                          -            json_handler.setFormatter(formatter)
                                          -            self.logger.addHandler(json_handler)
                                          -
                                          -            if self.path_file is not None:
                                          -                os.makedirs(os.path.dirname(self.path_file), exist_ok=True)
                                          -                path_file_handler = logging.FileHandler(self.path_file, mode="w")
                                          -                path_file_handler.setLevel(self.level)
                                          -                self.logger.addHandler(path_file_handler)
                                          -
                                          -        self.logger.propagate = False
                                          -
                                          @@ -170,29 +115,6 @@

                                          Methods

                                          Configures the logger.

                                          -
                                          - -Expand source code - -
                                          def configure(self):
                                          -    """Configures the logger."""
                                          -    if self.logger.level == 0 or self.level < self.logger.level:
                                          -        self.logger.setLevel(self.level)
                                          -
                                          -    if len(self.logger.handlers) == 0:
                                          -        json_handler = logging.StreamHandler()
                                          -        formatter = jsonlogger.JsonFormatter(fmt=self.json_format)
                                          -        json_handler.setFormatter(formatter)
                                          -        self.logger.addHandler(json_handler)
                                          -
                                          -        if self.path_file is not None:
                                          -            os.makedirs(os.path.dirname(self.path_file), exist_ok=True)
                                          -            path_file_handler = logging.FileHandler(self.path_file, mode="w")
                                          -            path_file_handler.setLevel(self.level)
                                          -            self.logger.addHandler(path_file_handler)
                                          -
                                          -    self.logger.propagate = False
                                          -
                                          @@ -246,7 +168,6 @@

                                          Methods

                                          }).setContent('').open(); } -

                                          Index

                                            @@ -270,7 +191,7 @@

                                            -

                                            Generated by pdoc 0.10.0.

                                            +

                                            Generated by pdoc 0.11.1.

                                            - \ No newline at end of file + diff --git a/docs/facetorch/transforms.html b/docs/facetorch/transforms.html index 8e553ca..930138a 100644 --- a/docs/facetorch/transforms.html +++ b/docs/facetorch/transforms.html @@ -2,18 +2,21 @@ - - + + facetorch.transforms API documentation - - - - - - + + + + + + - - + +
                                            @@ -22,76 +25,6 @@

                                            Module facetorch.transforms

                                            -
                                            - -Expand source code - -
                                            from typing import Union
                                            -
                                            -import torch
                                            -import torchvision
                                            -from torchvision import transforms
                                            -
                                            -
                                            -def script_transform(
                                            -    transform: transforms.Compose,
                                            -) -> Union[torch.jit.ScriptModule, torch.jit.ScriptFunction]:
                                            -    """Convert the composed transform to a TorchScript module.
                                            -
                                            -    Args:
                                            -        transform (transforms.Compose): Transform compose object to be scripted.
                                            -
                                            -    Returns:
                                            -        Union[torch.jit.ScriptModule, torch.jit.ScriptFunction]: Scripted transform.
                                            -    """
                                            -
                                            -    transform_seq = torch.nn.Sequential(*transform.transforms)
                                            -    transform_jit = torch.jit.script(transform_seq)
                                            -    return transform_jit
                                            -
                                            -
                                            -class SquarePad(torch.nn.Module):
                                            -    """SquarePad is a transform that pads the image to a square shape."""
                                            -
                                            -    def __init__(self) -> None:
                                            -        """It is initialized as a torch.nn.Module."""
                                            -        super().__init__()
                                            -
                                            -    def __call__(self, tensor: torch.Tensor) -> torch.Tensor:
                                            -        """Pads a tensor to a square.
                                            -
                                            -        Args:
                                            -            tensor (torch.Tensor): tensor to pad.
                                            -
                                            -        Returns:
                                            -            torch.Tensor: Padded tensor.
                                            -        """
                                            -        height, width = tensor.shape[-2:]
                                            -        img_size = [width, height]
                                            -
                                            -        max_wh = max(img_size)
                                            -        p_left, p_top = [(max_wh - s) // 2 for s in img_size]
                                            -        p_right, p_bottom = [
                                            -            max_wh - (s + pad) for s, pad in zip(img_size, [p_left, p_top])
                                            -        ]
                                            -        padding = (p_left, p_top, p_right, p_bottom)
                                            -        tensor_padded = torchvision.transforms.functional.pad(
                                            -            tensor, padding, 0, "constant"
                                            -        )
                                            -        return tensor_padded
                                            -
                                            -    def forward(self, tensor: torch.Tensor) -> torch.Tensor:
                                            -        """Pads a tensor to a square.
                                            -
                                            -        Args:
                                            -            tensor (torch.Tensor): tensor to pad.
                                            -
                                            -        Returns:
                                            -            torch.Tensor: Padded tensor.
                                            -
                                            -        """
                                            -        return self.__call__(tensor)
                                            -
                                            @@ -115,26 +48,6 @@

                                            Returns

                                            Union[torch.jit.ScriptModule, torch.jit.ScriptFunction]
                                            Scripted transform.
                                            -
                                            - -Expand source code - -
                                            def script_transform(
                                            -    transform: transforms.Compose,
                                            -) -> Union[torch.jit.ScriptModule, torch.jit.ScriptFunction]:
                                            -    """Convert the composed transform to a TorchScript module.
                                            -
                                            -    Args:
                                            -        transform (transforms.Compose): Transform compose object to be scripted.
                                            -
                                            -    Returns:
                                            -        Union[torch.jit.ScriptModule, torch.jit.ScriptFunction]: Scripted transform.
                                            -    """
                                            -
                                            -    transform_seq = torch.nn.Sequential(*transform.transforms)
                                            -    transform_jit = torch.jit.script(transform_seq)
                                            -    return transform_jit
                                            -

                                            @@ -214,22 +127,6 @@

                                            Returns

                                            torch.Tensor
                                            Padded tensor.
                                            -
                                            - -Expand source code - -
                                            def forward(self, tensor: torch.Tensor) -> torch.Tensor:
                                            -    """Pads a tensor to a square.
                                            -
                                            -    Args:
                                            -        tensor (torch.Tensor): tensor to pad.
                                            -
                                            -    Returns:
                                            -        torch.Tensor: Padded tensor.
                                            -
                                            -    """
                                            -    return self.__call__(tensor)
                                            -
                                            @@ -283,7 +180,6 @@

                                            Returns

                                            }).setContent('').open(); } -

                                            Index

                                              @@ -312,7 +208,7 @@

                                              -

                                              Generated by pdoc 0.10.0.

                                              +

                                              Generated by pdoc 0.11.1.

                                              - \ No newline at end of file + diff --git a/docs/facetorch/utils.html b/docs/facetorch/utils.html index cd575b9..a58ed87 100644 --- a/docs/facetorch/utils.html +++ b/docs/facetorch/utils.html @@ -2,18 +2,21 @@ - - + + facetorch.utils API documentation - - - - - - + + + + + + - - + +
                                              @@ -22,47 +25,6 @@

                                              Module facetorch.utils

                                              -
                                              - -Expand source code - -
                                              import omegaconf
                                              -import torch
                                              -import torchvision
                                              -
                                              -
                                              -def rgb2bgr(tensor: torch.Tensor) -> torch.Tensor:
                                              -    """Converts a batch of RGB tensors to BGR tensors or vice versa.
                                              -
                                              -    Args:
                                              -        tensor (torch.Tensor): Batch of RGB (or BGR) channeled tensors
                                              -        with shape (dim0, channels, dim2, dim3)
                                              -
                                              -    Returns:
                                              -        torch.Tensor: Batch of BGR (or RGB) tensors with shape (dim0, channels, dim2, dim3).
                                              -    """
                                              -    assert tensor.shape[1] == 3, "Tensor must have 3 channels."
                                              -    return tensor[:, [2, 1, 0]]
                                              -
                                              -
                                              -def fix_transform_list_attr(
                                              -    transform: torchvision.transforms.Compose,
                                              -) -> torchvision.transforms.Compose:
                                              -    """Fix the transform attributes by converting the listconfig to a list.
                                              -    This enables to optimize the transform using TorchScript.
                                              -
                                              -    Args:
                                              -        transform (torchvision.transforms.Compose): Transform to be fixed.
                                              -
                                              -    Returns:
                                              -        torchvision.transforms.Compose: Fixed transform.
                                              -    """
                                              -    for transform_x in transform.transforms:
                                              -        for key, value in transform_x.__dict__.items():
                                              -            if isinstance(value, omegaconf.listconfig.ListConfig):
                                              -                transform_x.__dict__[key] = list(value)
                                              -    return transform
                                              -
                                              @@ -87,23 +49,6 @@

                                              Returns

                                              torch.Tensor
                                              Batch of BGR (or RGB) tensors with shape (dim0, channels, dim2, dim3).
                                              -
                                              - -Expand source code - -
                                              def rgb2bgr(tensor: torch.Tensor) -> torch.Tensor:
                                              -    """Converts a batch of RGB tensors to BGR tensors or vice versa.
                                              -
                                              -    Args:
                                              -        tensor (torch.Tensor): Batch of RGB (or BGR) channeled tensors
                                              -        with shape (dim0, channels, dim2, dim3)
                                              -
                                              -    Returns:
                                              -        torch.Tensor: Batch of BGR (or RGB) tensors with shape (dim0, channels, dim2, dim3).
                                              -    """
                                              -    assert tensor.shape[1] == 3, "Tensor must have 3 channels."
                                              -    return tensor[:, [2, 1, 0]]
                                              -
                                              def fix_transform_list_attr(transform: torchvision.transforms.transforms.Compose) ‑> torchvision.transforms.transforms.Compose @@ -121,28 +66,6 @@

                                              Returns

                                              torchvision.transforms.Compose
                                              Fixed transform.
                                              -
                                              - -Expand source code - -
                                              def fix_transform_list_attr(
                                              -    transform: torchvision.transforms.Compose,
                                              -) -> torchvision.transforms.Compose:
                                              -    """Fix the transform attributes by converting the listconfig to a list.
                                              -    This enables to optimize the transform using TorchScript.
                                              -
                                              -    Args:
                                              -        transform (torchvision.transforms.Compose): Transform to be fixed.
                                              -
                                              -    Returns:
                                              -        torchvision.transforms.Compose: Fixed transform.
                                              -    """
                                              -    for transform_x in transform.transforms:
                                              -        for key, value in transform_x.__dict__.items():
                                              -            if isinstance(value, omegaconf.listconfig.ListConfig):
                                              -                transform_x.__dict__[key] = list(value)
                                              -    return transform
                                              -

                                              @@ -196,7 +119,6 @@

                                              Returns

                                              }).setContent('').open(); } -

                                              Index

                                                @@ -216,7 +138,7 @@

                                                Index

                                                - \ No newline at end of file + diff --git a/environment.yml b/environment.yml old mode 100644 new mode 100755 diff --git a/facetorch/__init__.py b/facetorch/__init__.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/__init__.py b/facetorch/analyzer/__init__.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/core.py b/facetorch/analyzer/core.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/detector/__init__.py b/facetorch/analyzer/detector/__init__.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/detector/core.py b/facetorch/analyzer/detector/core.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/detector/post.py b/facetorch/analyzer/detector/post.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/detector/pre.py b/facetorch/analyzer/detector/pre.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/predictor/__init__.py b/facetorch/analyzer/predictor/__init__.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/predictor/core.py b/facetorch/analyzer/predictor/core.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/predictor/post.py b/facetorch/analyzer/predictor/post.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/predictor/pre.py b/facetorch/analyzer/predictor/pre.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/reader/__init__.py b/facetorch/analyzer/reader/__init__.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/reader/core.py b/facetorch/analyzer/reader/core.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/unifier/__init__.py b/facetorch/analyzer/unifier/__init__.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/unifier/core.py b/facetorch/analyzer/unifier/core.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/utilizer/__init__.py b/facetorch/analyzer/utilizer/__init__.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/utilizer/align.py b/facetorch/analyzer/utilizer/align.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/utilizer/draw.py b/facetorch/analyzer/utilizer/draw.py old mode 100644 new mode 100755 diff --git a/facetorch/analyzer/utilizer/save.py b/facetorch/analyzer/utilizer/save.py old mode 100644 new mode 100755 diff --git a/facetorch/base.py b/facetorch/base.py old mode 100644 new mode 100755 diff --git a/facetorch/datastruct.py b/facetorch/datastruct.py old mode 100644 new mode 100755 diff --git a/facetorch/downloader.py b/facetorch/downloader.py old mode 100644 new mode 100755 diff --git a/facetorch/logger.py b/facetorch/logger.py old mode 100644 new mode 100755 diff --git a/facetorch/transforms.py b/facetorch/transforms.py old mode 100644 new mode 100755 diff --git a/facetorch/utils.py b/facetorch/utils.py old mode 100644 new mode 100755 diff --git a/gpu.conda-lock.yml b/gpu.conda-lock.yml old mode 100644 new mode 100755 diff --git a/gpu.environment.yml b/gpu.environment.yml old mode 100644 new mode 100755 diff --git a/pdoc/templates/_lunr_search.inc.mako b/pdoc/templates/_lunr_search.inc.mako old mode 100644 new mode 100755 diff --git a/pdoc/templates/config.mako b/pdoc/templates/config.mako old mode 100644 new mode 100755 diff --git a/pytest.ini b/pytest.ini old mode 100644 new mode 100755 diff --git a/requirements.dev.txt b/requirements.dev.txt old mode 100644 new mode 100755 index c8e006e..e714a3e --- a/requirements.dev.txt +++ b/requirements.dev.txt @@ -1,6 +1,6 @@ black>=22.1.0 flake8>=5.0.4 -pdoc3>=0.8 +pdoc3>=0.9.2 pytest>=6.0.0 pytest-cov>=2.10.0 setuptools>=63.4.2 diff --git a/scripts/example.py b/scripts/example.py old mode 100644 new mode 100755 diff --git a/scripts/example_tensor.py b/scripts/example_tensor.py old mode 100644 new mode 100755 diff --git a/scripts/repeated_inference.py b/scripts/repeated_inference.py old mode 100644 new mode 100755 diff --git a/setup.py b/setup.py old mode 100644 new mode 100755 diff --git a/tests/conftest.py b/tests/conftest.py old mode 100644 new mode 100755 diff --git a/tests/test_align.py b/tests/test_align.py old mode 100644 new mode 100755 diff --git a/tests/test_analyzer.py b/tests/test_analyzer.py old mode 100644 new mode 100755 diff --git a/tests/test_au.py b/tests/test_au.py old mode 100644 new mode 100755 diff --git a/tests/test_deepfake.py b/tests/test_deepfake.py old mode 100644 new mode 100755 diff --git a/tests/test_detector.py b/tests/test_detector.py old mode 100644 new mode 100755 diff --git a/tests/test_embed.py b/tests/test_embed.py old mode 100644 new mode 100755 diff --git a/tests/test_fer.py b/tests/test_fer.py old mode 100644 new mode 100755 diff --git a/tests/test_predictors.py b/tests/test_predictors.py old mode 100644 new mode 100755 diff --git a/tests/test_reader.py b/tests/test_reader.py old mode 100644 new mode 100755 diff --git a/tests/test_response.py b/tests/test_response.py old mode 100644 new mode 100755 diff --git a/tests/test_save.py b/tests/test_save.py old mode 100644 new mode 100755 diff --git a/tests/test_transforms.py b/tests/test_transforms.py old mode 100644 new mode 100755 diff --git a/tests/test_unifier.py b/tests/test_unifier.py old mode 100644 new mode 100755 diff --git a/tests/test_utilizers.py b/tests/test_utilizers.py old mode 100644 new mode 100755 diff --git a/tests/test_va.py b/tests/test_va.py old mode 100644 new mode 100755 diff --git a/tests/test_verify.py b/tests/test_verify.py old mode 100644 new mode 100755 diff --git a/version b/version old mode 100644 new mode 100755