diff --git a/CITATION.md b/CITATION.md
deleted file mode 100644
index 910ed29..0000000
--- a/CITATION.md
+++ /dev/null
@@ -1,71 +0,0 @@
-## Cite DeepFace Papers
-
-Please cite deepface in your publications if it helps your research. Here are its BibTex entries:
-
-### Facial Recognition
-
-If you use deepface in your research for facial recogntion purposes, please cite these publications:
-
-```BibTeX
-@article{serengil2024lightface,
- title = {A Benchmark of Facial Recognition Pipelines and Co-Usability Performances of Modules},
- author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
- journal = {Bilisim Teknolojileri Dergisi},
- volume = {17},
- number = {2},
- pages = {95-107},
- year = {2024},
- doi = {10.17671/gazibtd.1399077},
- url = {https://dergipark.org.tr/en/pub/gazibtd/issue/84331/1399077},
- publisher = {Gazi University}
-}
-```
-
-```BibTeX
-@inproceedings{serengil2020lightface,
- title = {LightFace: A Hybrid Deep Face Recognition Framework},
- author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
- booktitle = {2020 Innovations in Intelligent Systems and Applications Conference (ASYU)},
- pages = {23-27},
- year = {2020},
- doi = {10.1109/ASYU50717.2020.9259802},
- url = {https://ieeexplore.ieee.org/document/9259802},
- organization = {IEEE}
-}
-```
-
-### Facial Attribute Analysis
-
-If you use deepface in your research for facial attribute analysis purposes such as age, gender, emotion or ethnicity prediction, please cite the this publication.
-
-```BibTeX
-@inproceedings{serengil2021lightface,
- title = {HyperExtended LightFace: A Facial Attribute Analysis Framework},
- author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
- booktitle = {2021 International Conference on Engineering and Emerging Technologies (ICEET)},
- pages = {1-4},
- year = {2021},
- doi = {10.1109/ICEET53442.2021.9659697},
- url = {https://ieeexplore.ieee.org/document/9659697/},
- organization = {IEEE}
-}
-```
-
-### Additional Papers
-
-We have additionally released these papers within the DeepFace project for a multitude of purposes.
-
-```BibTeX
-@misc{serengil2023db,
- title = {An evaluation of sql and nosql databases for facial recognition pipelines},
- author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
- year = {2023},
- archivePrefix = {Cambridge Open Engage},
- doi = {10.33774/coe-2023-18rcn},
- url = {https://www.cambridge.org/engage/coe/article-details/63f3e5541d2d184063d4f569}
-}
-```
-
-### Repositories
-
-Also, if you use deepface in your GitHub projects, please add `deepface` in the `requirements.txt`. Thereafter, your project will be listed in its [dependency graph](https://github.com/serengil/deepface/network/dependents).
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
deleted file mode 100644
index d85d5bc..0000000
--- a/Dockerfile
+++ /dev/null
@@ -1,58 +0,0 @@
-# base image
-FROM python:3.8.12
-LABEL org.opencontainers.image.source https://github.com/serengil/deepface
-
-# -----------------------------------
-# create required folder
-RUN mkdir /app
-RUN mkdir /app/deepface
-
-# -----------------------------------
-# switch to application directory
-WORKDIR /app
-
-# -----------------------------------
-# update image os
-RUN apt-get update
-RUN apt-get install ffmpeg libsm6 libxext6 -y
-
-# -----------------------------------
-# Copy required files from repo into image
-COPY ./deepface /app/deepface
-# even though we will use local requirements, this one is required to perform install deepface from source code
-COPY ./requirements.txt /app/requirements.txt
-COPY ./requirements_local /app/requirements_local.txt
-COPY ./package_info.json /app/
-COPY ./setup.py /app/
-COPY ./README.md /app/
-
-# -----------------------------------
-# if you plan to use a GPU, you should install the 'tensorflow-gpu' package
-# RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org tensorflow-gpu
-
-# if you plan to use face anti-spoofing, then activate this line
-# RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org torch==2.1.2
-# -----------------------------------
-# install deepface from pypi release (might be out-of-date)
-# RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org deepface
-# -----------------------------------
-# install dependencies - deepface with these dependency versions is working
-RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org -r /app/requirements_local.txt
-# install deepface from source code (always up-to-date)
-RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org -e .
-
-# -----------------------------------
-# some packages are optional in deepface. activate if your task depends on one.
-# RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org cmake==3.24.1.1
-# RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org dlib==19.20.0
-# RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org lightgbm==2.3.1
-
-# -----------------------------------
-# environment variables
-ENV PYTHONUNBUFFERED=1
-
-# -----------------------------------
-# run the app (re-configure port if necessary)
-WORKDIR /app/deepface/api/src
-EXPOSE 5000
-CMD ["gunicorn", "--workers=1", "--timeout=3600", "--bind=0.0.0.0:5000", "app:create_app()"]
diff --git a/FaceImageArtView.json b/FaceImageArtView.json
new file mode 100644
index 0000000..58a59b2
--- /dev/null
+++ b/FaceImageArtView.json
@@ -0,0 +1,140 @@
+{
+ "3": {
+ "inputs": {
+ "seed": 479096427492872,
+ "steps": 20,
+ "cfg": 8,
+ "sampler_name": "dpmpp_2m",
+ "scheduler": "normal",
+ "denoise": 0.9,
+ "model": [
+ "19",
+ 0
+ ],
+ "positive": [
+ "6",
+ 0
+ ],
+ "negative": [
+ "7",
+ 0
+ ],
+ "latent_image": [
+ "12",
+ 0
+ ]
+ },
+ "class_type": "KSampler",
+ "_meta": {
+ "title": "K采样器"
+ }
+ },
+ "6": {
+ "inputs": {
+ "text": "1 girl",
+ "clip": [
+ "19",
+ 1
+ ]
+ },
+ "class_type": "CLIPTextEncode",
+ "_meta": {
+ "title": "CLIP文本编码"
+ }
+ },
+ "7": {
+ "inputs": {
+ "text": "",
+ "clip": [
+ "19",
+ 1
+ ]
+ },
+ "class_type": "CLIPTextEncode",
+ "_meta": {
+ "title": "CLIP文本编码"
+ }
+ },
+ "8": {
+ "inputs": {
+ "samples": [
+ "3",
+ 0
+ ],
+ "vae": [
+ "14",
+ 2
+ ]
+ },
+ "class_type": "VAEDecode",
+ "_meta": {
+ "title": "VAE解码"
+ }
+ },
+ "10": {
+ "inputs": {
+ "image": "WechatIMG422.jpg",
+ "upload": "image"
+ },
+ "class_type": "LoadImage",
+ "_meta": {
+ "title": "加载图像"
+ }
+ },
+ "12": {
+ "inputs": {
+ "pixels": [
+ "10",
+ 0
+ ],
+ "vae": [
+ "14",
+ 2
+ ]
+ },
+ "class_type": "VAEEncode",
+ "_meta": {
+ "title": "VAE编码"
+ }
+ },
+ "14": {
+ "inputs": {
+ "ckpt_name": "majicMIX realistic 麦橘写实_v7.safetensors"
+ },
+ "class_type": "CheckpointLoaderSimple",
+ "_meta": {
+ "title": "Checkpoint加载器(简易)"
+ }
+ },
+ "19": {
+ "inputs": {
+ "lora_name": "instantid_diffusion_pytorch_model.safetensors",
+ "strength_model": 1,
+ "strength_clip": 1,
+ "model": [
+ "14",
+ 0
+ ],
+ "clip": [
+ "14",
+ 1
+ ]
+ },
+ "class_type": "LoraLoader",
+ "_meta": {
+ "title": "加载LoRA"
+ }
+ },
+ "26": {
+ "inputs": {
+ "images": [
+ "8",
+ 0
+ ]
+ },
+ "class_type": "PreviewImage",
+ "_meta": {
+ "title": "预览图像"
+ }
+ }
+}
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index 2b0f9fb..0000000
--- a/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2019 Sefik Ilkin Serengil
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/Makefile b/Makefile
deleted file mode 100644
index cb8e9ae..0000000
--- a/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-test:
- cd tests && python -m pytest . -s --disable-warnings
-
-lint:
- python -m pylint deepface/ --fail-under=10
-
-coverage:
- pip install pytest-cov && cd tests && python -m pytest --cov=deepface
\ No newline at end of file
diff --git a/benchmarks/Evaluate-Results.ipynb b/benchmarks/Evaluate-Results.ipynb
deleted file mode 100644
index e2a7172..0000000
--- a/benchmarks/Evaluate-Results.ipynb
+++ /dev/null
@@ -1,1844 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "id": "59b076ef",
- "metadata": {},
- "source": [
- "# Evaluate DeepFace's Results In The Big Picture"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 1,
- "id": "79200f8c",
- "metadata": {},
- "outputs": [],
- "source": [
- "import pandas as pd\n",
- "from IPython.display import display, HTML\n",
- "from sklearn import metrics\n",
- "import matplotlib.pyplot as plt"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "id": "bbc11592",
- "metadata": {},
- "outputs": [],
- "source": [
- "alignment = [False, True]\n",
- "models = [\"Facenet512\", \"Facenet\", \"VGG-Face\", \"ArcFace\", \"Dlib\", \"GhostFaceNet\", \"SFace\", \"OpenFace\", \"DeepFace\", \"DeepID\"]\n",
- "detectors = [\"retinaface\", \"mtcnn\", \"fastmtcnn\", \"dlib\", \"yolov8\", \"yunet\", \"centerface\", \"mediapipe\", \"ssd\", \"opencv\", \"skip\"]\n",
- "distance_metrics = [\"euclidean\", \"euclidean_l2\", \"cosine\"]"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "e0dabf1b",
- "metadata": {},
- "source": [
- "# Main results"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "id": "03b09fa3",
- "metadata": {
- "scrolled": false
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "euclidean for alignment False\n"
- ]
- },
- {
- "data": {
- "text/html": [
- "
\n",
- " \n",
- " \n",
- " | \n",
- " Facenet512 | \n",
- " Facenet | \n",
- " VGG-Face | \n",
- " ArcFace | \n",
- " Dlib | \n",
- " GhostFaceNet | \n",
- " SFace | \n",
- " OpenFace | \n",
- " DeepFace | \n",
- " DeepID | \n",
- "
\n",
- " \n",
- " | detector | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- "
\n",
- " \n",
- " \n",
- " \n",
- " | retinaface | \n",
- " 96.1 | \n",
- " 92.8 | \n",
- " 95.7 | \n",
- " 84.1 | \n",
- " 88.3 | \n",
- " 83.2 | \n",
- " 78.6 | \n",
- " 70.8 | \n",
- " 67.4 | \n",
- " 64.3 | \n",
- "
\n",
- " \n",
- " | mtcnn | \n",
- " 95.9 | \n",
- " 92.5 | \n",
- " 95.5 | \n",
- " 81.8 | \n",
- " 89.3 | \n",
- " 83.2 | \n",
- " 76.3 | \n",
- " 70.9 | \n",
- " 65.9 | \n",
- " 63.2 | \n",
- "
\n",
- " \n",
- " | fastmtcnn | \n",
- " 96.3 | \n",
- " 93.0 | \n",
- " 96.0 | \n",
- " 82.2 | \n",
- " 90.0 | \n",
- " 82.7 | \n",
- " 76.8 | \n",
- " 71.2 | \n",
- " 66.5 | \n",
- " 64.3 | \n",
- "
\n",
- " \n",
- " | dlib | \n",
- " 96.0 | \n",
- " 89.0 | \n",
- " 94.1 | \n",
- " 82.6 | \n",
- " 96.3 | \n",
- " 65.6 | \n",
- " 73.1 | \n",
- " 75.9 | \n",
- " 61.8 | \n",
- " 61.9 | \n",
- "
\n",
- " \n",
- " | yolov8 | \n",
- " 94.8 | \n",
- " 90.8 | \n",
- " 95.2 | \n",
- " 83.2 | \n",
- " 88.4 | \n",
- " 77.6 | \n",
- " 71.6 | \n",
- " 68.9 | \n",
- " 68.2 | \n",
- " 66.3 | \n",
- "
\n",
- " \n",
- " | yunet | \n",
- " 97.9 | \n",
- " 96.5 | \n",
- " 96.3 | \n",
- " 84.1 | \n",
- " 91.4 | \n",
- " 82.7 | \n",
- " 78.2 | \n",
- " 71.7 | \n",
- " 65.5 | \n",
- " 65.2 | \n",
- "
\n",
- " \n",
- " | centerface | \n",
- " 97.4 | \n",
- " 95.4 | \n",
- " 95.8 | \n",
- " 83.2 | \n",
- " 90.3 | \n",
- " 82.0 | \n",
- " 76.5 | \n",
- " 69.9 | \n",
- " 65.7 | \n",
- " 62.9 | \n",
- "
\n",
- " \n",
- " | mediapipe | \n",
- " 94.9 | \n",
- " 87.1 | \n",
- " 93.1 | \n",
- " 71.1 | \n",
- " 91.9 | \n",
- " 61.9 | \n",
- " 73.2 | \n",
- " 77.6 | \n",
- " 61.7 | \n",
- " 62.4 | \n",
- "
\n",
- " \n",
- " | ssd | \n",
- " 97.2 | \n",
- " 94.9 | \n",
- " 96.7 | \n",
- " 83.9 | \n",
- " 88.6 | \n",
- " 84.9 | \n",
- " 82.0 | \n",
- " 69.9 | \n",
- " 66.7 | \n",
- " 64.0 | \n",
- "
\n",
- " \n",
- " | opencv | \n",
- " 94.1 | \n",
- " 90.2 | \n",
- " 95.8 | \n",
- " 89.8 | \n",
- " 91.2 | \n",
- " 91.0 | \n",
- " 86.9 | \n",
- " 71.1 | \n",
- " 68.4 | \n",
- " 61.1 | \n",
- "
\n",
- " \n",
- " | skip | \n",
- " 92.0 | \n",
- " 64.1 | \n",
- " 90.6 | \n",
- " 56.6 | \n",
- " 69.0 | \n",
- " 75.1 | \n",
- " 81.4 | \n",
- " 57.4 | \n",
- " 60.8 | \n",
- " 60.7 | \n",
- "
\n",
- " \n",
- "
"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- "
"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "euclidean_l2 for alignment False\n"
- ]
- },
- {
- "data": {
- "text/html": [
- "\n",
- " \n",
- " \n",
- " | \n",
- " Facenet512 | \n",
- " Facenet | \n",
- " VGG-Face | \n",
- " ArcFace | \n",
- " Dlib | \n",
- " GhostFaceNet | \n",
- " SFace | \n",
- " OpenFace | \n",
- " DeepFace | \n",
- " DeepID | \n",
- "
\n",
- " \n",
- " | detector | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- "
\n",
- " \n",
- " \n",
- " \n",
- " | retinaface | \n",
- " 98.0 | \n",
- " 95.9 | \n",
- " 95.7 | \n",
- " 95.7 | \n",
- " 88.4 | \n",
- " 89.5 | \n",
- " 90.6 | \n",
- " 70.8 | \n",
- " 67.7 | \n",
- " 64.6 | \n",
- "
\n",
- " \n",
- " | mtcnn | \n",
- " 97.8 | \n",
- " 96.2 | \n",
- " 95.5 | \n",
- " 95.9 | \n",
- " 89.2 | \n",
- " 88.0 | \n",
- " 91.1 | \n",
- " 70.9 | \n",
- " 67.0 | \n",
- " 64.0 | \n",
- "
\n",
- " \n",
- " | fastmtcnn | \n",
- " 97.7 | \n",
- " 96.6 | \n",
- " 96.0 | \n",
- " 95.9 | \n",
- " 89.6 | \n",
- " 87.8 | \n",
- " 89.7 | \n",
- " 71.2 | \n",
- " 67.8 | \n",
- " 64.2 | \n",
- "
\n",
- " \n",
- " | dlib | \n",
- " 96.5 | \n",
- " 89.9 | \n",
- " 94.1 | \n",
- " 93.8 | \n",
- " 95.6 | \n",
- " 63.0 | \n",
- " 75.0 | \n",
- " 75.9 | \n",
- " 62.6 | \n",
- " 61.8 | \n",
- "
\n",
- " \n",
- " | yolov8 | \n",
- " 97.7 | \n",
- " 95.8 | \n",
- " 95.2 | \n",
- " 95.0 | \n",
- " 88.1 | \n",
- " 88.7 | \n",
- " 89.8 | \n",
- " 68.9 | \n",
- " 68.9 | \n",
- " 65.3 | \n",
- "
\n",
- " \n",
- " | yunet | \n",
- " 98.3 | \n",
- " 96.8 | \n",
- " 96.3 | \n",
- " 96.1 | \n",
- " 91.7 | \n",
- " 88.0 | \n",
- " 90.5 | \n",
- " 71.7 | \n",
- " 67.6 | \n",
- " 63.2 | \n",
- "
\n",
- " \n",
- " | centerface | \n",
- " 97.4 | \n",
- " 96.3 | \n",
- " 95.8 | \n",
- " 95.8 | \n",
- " 90.2 | \n",
- " 86.8 | \n",
- " 89.3 | \n",
- " 69.9 | \n",
- " 68.4 | \n",
- " 63.1 | \n",
- "
\n",
- " \n",
- " | mediapipe | \n",
- " 96.3 | \n",
- " 90.0 | \n",
- " 93.1 | \n",
- " 89.3 | \n",
- " 91.8 | \n",
- " 65.6 | \n",
- " 74.6 | \n",
- " 77.6 | \n",
- " 64.9 | \n",
- " 61.6 | \n",
- "
\n",
- " \n",
- " | ssd | \n",
- " 97.9 | \n",
- " 97.0 | \n",
- " 96.7 | \n",
- " 96.6 | \n",
- " 89.4 | \n",
- " 91.5 | \n",
- " 93.0 | \n",
- " 69.9 | \n",
- " 68.7 | \n",
- " 64.9 | \n",
- "
\n",
- " \n",
- " | opencv | \n",
- " 96.2 | \n",
- " 92.9 | \n",
- " 95.8 | \n",
- " 93.2 | \n",
- " 91.5 | \n",
- " 93.3 | \n",
- " 91.7 | \n",
- " 71.1 | \n",
- " 68.3 | \n",
- " 61.6 | \n",
- "
\n",
- " \n",
- " | skip | \n",
- " 91.4 | \n",
- " 67.6 | \n",
- " 90.6 | \n",
- " 57.2 | \n",
- " 69.3 | \n",
- " 78.4 | \n",
- " 83.4 | \n",
- " 57.4 | \n",
- " 62.6 | \n",
- " 61.6 | \n",
- "
\n",
- " \n",
- "
"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- "
"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "cosine for alignment False\n"
- ]
- },
- {
- "data": {
- "text/html": [
- "\n",
- " \n",
- " \n",
- " | \n",
- " Facenet512 | \n",
- " Facenet | \n",
- " VGG-Face | \n",
- " ArcFace | \n",
- " Dlib | \n",
- " GhostFaceNet | \n",
- " SFace | \n",
- " OpenFace | \n",
- " DeepFace | \n",
- " DeepID | \n",
- "
\n",
- " \n",
- " | detector | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- "
\n",
- " \n",
- " \n",
- " \n",
- " | retinaface | \n",
- " 98.0 | \n",
- " 95.9 | \n",
- " 95.7 | \n",
- " 95.7 | \n",
- " 88.4 | \n",
- " 89.5 | \n",
- " 90.6 | \n",
- " 70.8 | \n",
- " 67.7 | \n",
- " 63.7 | \n",
- "
\n",
- " \n",
- " | mtcnn | \n",
- " 97.8 | \n",
- " 96.2 | \n",
- " 95.5 | \n",
- " 95.9 | \n",
- " 89.2 | \n",
- " 88.0 | \n",
- " 91.1 | \n",
- " 70.9 | \n",
- " 67.0 | \n",
- " 64.0 | \n",
- "
\n",
- " \n",
- " | fastmtcnn | \n",
- " 97.7 | \n",
- " 96.6 | \n",
- " 96.0 | \n",
- " 95.9 | \n",
- " 89.6 | \n",
- " 87.8 | \n",
- " 89.7 | \n",
- " 71.2 | \n",
- " 67.8 | \n",
- " 62.7 | \n",
- "
\n",
- " \n",
- " | dlib | \n",
- " 96.5 | \n",
- " 89.9 | \n",
- " 94.1 | \n",
- " 93.8 | \n",
- " 95.6 | \n",
- " 63.0 | \n",
- " 75.0 | \n",
- " 75.9 | \n",
- " 62.6 | \n",
- " 61.7 | \n",
- "
\n",
- " \n",
- " | yolov8 | \n",
- " 97.7 | \n",
- " 95.8 | \n",
- " 95.2 | \n",
- " 95.0 | \n",
- " 88.1 | \n",
- " 88.7 | \n",
- " 89.8 | \n",
- " 68.9 | \n",
- " 68.9 | \n",
- " 65.3 | \n",
- "
\n",
- " \n",
- " | yunet | \n",
- " 98.3 | \n",
- " 96.8 | \n",
- " 96.3 | \n",
- " 96.1 | \n",
- " 91.7 | \n",
- " 88.0 | \n",
- " 90.5 | \n",
- " 71.7 | \n",
- " 67.6 | \n",
- " 63.2 | \n",
- "
\n",
- " \n",
- " | centerface | \n",
- " 97.4 | \n",
- " 96.3 | \n",
- " 95.8 | \n",
- " 95.8 | \n",
- " 90.2 | \n",
- " 86.8 | \n",
- " 89.3 | \n",
- " 69.9 | \n",
- " 68.4 | \n",
- " 62.6 | \n",
- "
\n",
- " \n",
- " | mediapipe | \n",
- " 96.3 | \n",
- " 90.0 | \n",
- " 93.1 | \n",
- " 89.3 | \n",
- " 91.8 | \n",
- " 64.8 | \n",
- " 74.6 | \n",
- " 77.6 | \n",
- " 64.9 | \n",
- " 61.6 | \n",
- "
\n",
- " \n",
- " | ssd | \n",
- " 97.9 | \n",
- " 97.0 | \n",
- " 96.7 | \n",
- " 96.6 | \n",
- " 89.4 | \n",
- " 91.5 | \n",
- " 93.0 | \n",
- " 69.9 | \n",
- " 68.7 | \n",
- " 63.8 | \n",
- "
\n",
- " \n",
- " | opencv | \n",
- " 96.2 | \n",
- " 92.9 | \n",
- " 95.8 | \n",
- " 93.2 | \n",
- " 91.5 | \n",
- " 93.3 | \n",
- " 91.7 | \n",
- " 71.1 | \n",
- " 68.1 | \n",
- " 61.1 | \n",
- "
\n",
- " \n",
- " | skip | \n",
- " 91.4 | \n",
- " 67.6 | \n",
- " 90.6 | \n",
- " 54.8 | \n",
- " 69.3 | \n",
- " 78.4 | \n",
- " 83.4 | \n",
- " 57.4 | \n",
- " 62.6 | \n",
- " 61.1 | \n",
- "
\n",
- " \n",
- "
"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- "
"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "euclidean for alignment True\n"
- ]
- },
- {
- "data": {
- "text/html": [
- "\n",
- " \n",
- " \n",
- " | \n",
- " Facenet512 | \n",
- " Facenet | \n",
- " VGG-Face | \n",
- " ArcFace | \n",
- " Dlib | \n",
- " GhostFaceNet | \n",
- " SFace | \n",
- " OpenFace | \n",
- " DeepFace | \n",
- " DeepID | \n",
- "
\n",
- " \n",
- " | detector | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- "
\n",
- " \n",
- " \n",
- " \n",
- " | retinaface | \n",
- " 95.9 | \n",
- " 93.5 | \n",
- " 95.8 | \n",
- " 85.2 | \n",
- " 88.9 | \n",
- " 85.9 | \n",
- " 80.2 | \n",
- " 69.4 | \n",
- " 67.0 | \n",
- " 65.6 | \n",
- "
\n",
- " \n",
- " | mtcnn | \n",
- " 95.2 | \n",
- " 93.8 | \n",
- " 95.9 | \n",
- " 83.7 | \n",
- " 89.4 | \n",
- " 83.0 | \n",
- " 77.4 | \n",
- " 70.2 | \n",
- " 66.5 | \n",
- " 63.3 | \n",
- "
\n",
- " \n",
- " | fastmtcnn | \n",
- " 96.0 | \n",
- " 93.4 | \n",
- " 95.8 | \n",
- " 83.5 | \n",
- " 91.1 | \n",
- " 82.8 | \n",
- " 77.7 | \n",
- " 69.4 | \n",
- " 66.7 | \n",
- " 64.0 | \n",
- "
\n",
- " \n",
- " | dlib | \n",
- " 96.0 | \n",
- " 90.8 | \n",
- " 94.5 | \n",
- " 88.6 | \n",
- " 96.8 | \n",
- " 65.7 | \n",
- " 66.3 | \n",
- " 75.8 | \n",
- " 63.4 | \n",
- " 60.4 | \n",
- "
\n",
- " \n",
- " | yolov8 | \n",
- " 94.4 | \n",
- " 91.9 | \n",
- " 95.0 | \n",
- " 84.1 | \n",
- " 89.2 | \n",
- " 77.6 | \n",
- " 73.4 | \n",
- " 68.7 | \n",
- " 69.0 | \n",
- " 66.5 | \n",
- "
\n",
- " \n",
- " | yunet | \n",
- " 97.3 | \n",
- " 96.1 | \n",
- " 96.0 | \n",
- " 84.9 | \n",
- " 92.2 | \n",
- " 84.0 | \n",
- " 79.4 | \n",
- " 70.9 | \n",
- " 65.8 | \n",
- " 65.2 | \n",
- "
\n",
- " \n",
- " | centerface | \n",
- " 97.6 | \n",
- " 95.8 | \n",
- " 95.7 | \n",
- " 83.6 | \n",
- " 90.4 | \n",
- " 82.8 | \n",
- " 77.4 | \n",
- " 68.9 | \n",
- " 65.5 | \n",
- " 62.8 | \n",
- "
\n",
- " \n",
- " | mediapipe | \n",
- " 95.1 | \n",
- " 88.6 | \n",
- " 92.9 | \n",
- " 73.2 | \n",
- " 93.1 | \n",
- " 63.2 | \n",
- " 72.5 | \n",
- " 78.7 | \n",
- " 61.8 | \n",
- " 62.2 | \n",
- "
\n",
- " \n",
- " | ssd | \n",
- " 88.9 | \n",
- " 85.6 | \n",
- " 87.0 | \n",
- " 75.8 | \n",
- " 83.1 | \n",
- " 79.1 | \n",
- " 76.9 | \n",
- " 66.8 | \n",
- " 63.4 | \n",
- " 62.5 | \n",
- "
\n",
- " \n",
- " | opencv | \n",
- " 88.2 | \n",
- " 84.2 | \n",
- " 87.3 | \n",
- " 73.0 | \n",
- " 84.4 | \n",
- " 83.8 | \n",
- " 81.1 | \n",
- " 66.4 | \n",
- " 65.5 | \n",
- " 59.6 | \n",
- "
\n",
- " \n",
- " | skip | \n",
- " 92.0 | \n",
- " 64.1 | \n",
- " 90.6 | \n",
- " 56.6 | \n",
- " 69.0 | \n",
- " 75.1 | \n",
- " 81.4 | \n",
- " 57.4 | \n",
- " 60.8 | \n",
- " 60.7 | \n",
- "
\n",
- " \n",
- "
"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- "
"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "euclidean_l2 for alignment True\n"
- ]
- },
- {
- "data": {
- "text/html": [
- "\n",
- " \n",
- " \n",
- " | \n",
- " Facenet512 | \n",
- " Facenet | \n",
- " VGG-Face | \n",
- " ArcFace | \n",
- " Dlib | \n",
- " GhostFaceNet | \n",
- " SFace | \n",
- " OpenFace | \n",
- " DeepFace | \n",
- " DeepID | \n",
- "
\n",
- " \n",
- " | detector | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- "
\n",
- " \n",
- " \n",
- " \n",
- " | retinaface | \n",
- " 98.4 | \n",
- " 96.4 | \n",
- " 95.8 | \n",
- " 96.6 | \n",
- " 89.1 | \n",
- " 90.5 | \n",
- " 92.4 | \n",
- " 69.4 | \n",
- " 67.7 | \n",
- " 64.4 | \n",
- "
\n",
- " \n",
- " | mtcnn | \n",
- " 97.6 | \n",
- " 96.8 | \n",
- " 95.9 | \n",
- " 96.0 | \n",
- " 90.0 | \n",
- " 89.8 | \n",
- " 90.5 | \n",
- " 70.2 | \n",
- " 66.4 | \n",
- " 64.0 | \n",
- "
\n",
- " \n",
- " | fastmtcnn | \n",
- " 98.1 | \n",
- " 97.2 | \n",
- " 95.8 | \n",
- " 96.4 | \n",
- " 91.0 | \n",
- " 89.5 | \n",
- " 90.0 | \n",
- " 69.4 | \n",
- " 67.4 | \n",
- " 64.1 | \n",
- "
\n",
- " \n",
- " | dlib | \n",
- " 97.0 | \n",
- " 92.6 | \n",
- " 94.5 | \n",
- " 95.1 | \n",
- " 96.4 | \n",
- " 63.3 | \n",
- " 69.8 | \n",
- " 75.8 | \n",
- " 66.5 | \n",
- " 59.5 | \n",
- "
\n",
- " \n",
- " | yolov8 | \n",
- " 97.3 | \n",
- " 95.7 | \n",
- " 95.0 | \n",
- " 95.5 | \n",
- " 88.8 | \n",
- " 88.9 | \n",
- " 91.9 | \n",
- " 68.7 | \n",
- " 67.5 | \n",
- " 66.0 | \n",
- "
\n",
- " \n",
- " | yunet | \n",
- " 97.9 | \n",
- " 97.4 | \n",
- " 96.0 | \n",
- " 96.7 | \n",
- " 91.6 | \n",
- " 89.1 | \n",
- " 91.0 | \n",
- " 70.9 | \n",
- " 66.5 | \n",
- " 63.6 | \n",
- "
\n",
- " \n",
- " | centerface | \n",
- " 97.7 | \n",
- " 96.8 | \n",
- " 95.7 | \n",
- " 96.5 | \n",
- " 90.9 | \n",
- " 87.5 | \n",
- " 89.3 | \n",
- " 68.9 | \n",
- " 67.8 | \n",
- " 64.0 | \n",
- "
\n",
- " \n",
- " | mediapipe | \n",
- " 96.1 | \n",
- " 90.6 | \n",
- " 92.9 | \n",
- " 90.3 | \n",
- " 92.6 | \n",
- " 64.4 | \n",
- " 75.4 | \n",
- " 78.7 | \n",
- " 64.7 | \n",
- " 63.0 | \n",
- "
\n",
- " \n",
- " | ssd | \n",
- " 88.7 | \n",
- " 87.5 | \n",
- " 87.0 | \n",
- " 86.2 | \n",
- " 83.3 | \n",
- " 82.2 | \n",
- " 84.6 | \n",
- " 66.8 | \n",
- " 64.1 | \n",
- " 62.6 | \n",
- "
\n",
- " \n",
- " | opencv | \n",
- " 87.6 | \n",
- " 84.8 | \n",
- " 87.3 | \n",
- " 84.6 | \n",
- " 84.0 | \n",
- " 85.0 | \n",
- " 83.6 | \n",
- " 66.4 | \n",
- " 63.8 | \n",
- " 60.9 | \n",
- "
\n",
- " \n",
- " | skip | \n",
- " 91.4 | \n",
- " 67.6 | \n",
- " 90.6 | \n",
- " 57.2 | \n",
- " 69.3 | \n",
- " 78.4 | \n",
- " 83.4 | \n",
- " 57.4 | \n",
- " 62.6 | \n",
- " 61.6 | \n",
- "
\n",
- " \n",
- "
"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- "
"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "cosine for alignment True\n"
- ]
- },
- {
- "data": {
- "text/html": [
- "\n",
- " \n",
- " \n",
- " | \n",
- " Facenet512 | \n",
- " Facenet | \n",
- " VGG-Face | \n",
- " ArcFace | \n",
- " Dlib | \n",
- " GhostFaceNet | \n",
- " SFace | \n",
- " OpenFace | \n",
- " DeepFace | \n",
- " DeepID | \n",
- "
\n",
- " \n",
- " | detector | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- " | \n",
- "
\n",
- " \n",
- " \n",
- " \n",
- " | retinaface | \n",
- " 98.4 | \n",
- " 96.4 | \n",
- " 95.8 | \n",
- " 96.6 | \n",
- " 89.1 | \n",
- " 90.5 | \n",
- " 92.4 | \n",
- " 69.4 | \n",
- " 67.7 | \n",
- " 64.4 | \n",
- "
\n",
- " \n",
- " | mtcnn | \n",
- " 97.6 | \n",
- " 96.8 | \n",
- " 95.9 | \n",
- " 96.0 | \n",
- " 90.0 | \n",
- " 89.8 | \n",
- " 90.5 | \n",
- " 70.2 | \n",
- " 66.3 | \n",
- " 63.0 | \n",
- "
\n",
- " \n",
- " | fastmtcnn | \n",
- " 98.1 | \n",
- " 97.2 | \n",
- " 95.8 | \n",
- " 96.4 | \n",
- " 91.0 | \n",
- " 89.5 | \n",
- " 90.0 | \n",
- " 69.4 | \n",
- " 67.4 | \n",
- " 63.6 | \n",
- "
\n",
- " \n",
- " | dlib | \n",
- " 97.0 | \n",
- " 92.6 | \n",
- " 94.5 | \n",
- " 95.1 | \n",
- " 96.4 | \n",
- " 63.3 | \n",
- " 69.8 | \n",
- " 75.8 | \n",
- " 66.5 | \n",
- " 58.7 | \n",
- "
\n",
- " \n",
- " | yolov8 | \n",
- " 97.3 | \n",
- " 95.7 | \n",
- " 95.0 | \n",
- " 95.5 | \n",
- " 88.8 | \n",
- " 88.9 | \n",
- " 91.9 | \n",
- " 68.7 | \n",
- " 67.5 | \n",
- " 65.9 | \n",
- "
\n",
- " \n",
- " | yunet | \n",
- " 97.9 | \n",
- " 97.4 | \n",
- " 96.0 | \n",
- " 96.7 | \n",
- " 91.6 | \n",
- " 89.1 | \n",
- " 91.0 | \n",
- " 70.9 | \n",
- " 66.5 | \n",
- " 63.5 | \n",
- "
\n",
- " \n",
- " | centerface | \n",
- " 97.7 | \n",
- " 96.8 | \n",
- " 95.7 | \n",
- " 96.5 | \n",
- " 90.9 | \n",
- " 87.5 | \n",
- " 89.3 | \n",
- " 68.9 | \n",
- " 67.8 | \n",
- " 63.6 | \n",
- "
\n",
- " \n",
- " | mediapipe | \n",
- " 96.1 | \n",
- " 90.6 | \n",
- " 92.9 | \n",
- " 90.3 | \n",
- " 92.6 | \n",
- " 64.3 | \n",
- " 75.4 | \n",
- " 78.7 | \n",
- " 64.8 | \n",
- " 63.0 | \n",
- "
\n",
- " \n",
- " | ssd | \n",
- " 88.7 | \n",
- " 87.5 | \n",
- " 87.0 | \n",
- " 86.2 | \n",
- " 83.3 | \n",
- " 82.2 | \n",
- " 84.5 | \n",
- " 66.8 | \n",
- " 63.8 | \n",
- " 62.6 | \n",
- "
\n",
- " \n",
- " | opencv | \n",
- " 87.6 | \n",
- " 84.9 | \n",
- " 87.2 | \n",
- " 84.6 | \n",
- " 84.0 | \n",
- " 85.0 | \n",
- " 83.6 | \n",
- " 66.2 | \n",
- " 63.7 | \n",
- " 60.1 | \n",
- "
\n",
- " \n",
- " | skip | \n",
- " 91.4 | \n",
- " 67.6 | \n",
- " 90.6 | \n",
- " 54.8 | \n",
- " 69.3 | \n",
- " 78.4 | \n",
- " 83.4 | \n",
- " 57.4 | \n",
- " 62.6 | \n",
- " 61.1 | \n",
- "
\n",
- " \n",
- "
"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- "
"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
- "source": [
- "for align in alignment:\n",
- " for metric in distance_metrics:\n",
- " df = pd.read_csv(f\"results/pivot_{metric}_with_alignment_{align}.csv\")\n",
- " df = df.rename(columns = {'Unnamed: 0': 'detector'})\n",
- " df = df.set_index('detector')\n",
- "\n",
- " print(f\"{metric} for alignment {align}\")\n",
- " display(HTML(df.to_html()))\n",
- " display(HTML(\"
\"))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "id": "aef6dc64",
- "metadata": {},
- "outputs": [],
- "source": [
- "def create_github_table():\n",
- " for metric in distance_metrics:\n",
- " for align in [True, False]:\n",
- " df = pd.read_csv(f\"results/pivot_{metric}_with_alignment_{align}.csv\")\n",
- " df = df.rename(columns = {'Unnamed: 0': 'detector'})\n",
- " df = df.set_index('detector')\n",
- " \n",
- " print(f\"Performance Matrix for {metric} while alignment is {align} \\n\")\n",
- " header = \"| | \"\n",
- " for col_name in df.columns.tolist():\n",
- " header += f\"{col_name} |\"\n",
- " print(header)\n",
- " # -------------------------------\n",
- " seperator = \"| --- | \"\n",
- " for col_name in df.columns.tolist():\n",
- " seperator += \" --- |\"\n",
- " print(seperator)\n",
- " # -------------------------------\n",
- " for index, instance in df.iterrows():\n",
- " line = f\"| {instance.name} |\"\n",
- " for i in instance.values:\n",
- " if i < 97.5:\n",
- " line += f\"{i} |\"\n",
- " else:\n",
- " line += f\"**{i}** |\"\n",
- " print(line)\n",
- " \n",
- " print(\"\\n---------------------------\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "id": "5004caaa",
- "metadata": {},
- "outputs": [],
- "source": [
- "# create_github_table()"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "965c655f",
- "metadata": {},
- "source": [
- "# Alignment impact"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "id": "6ce20a58",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/html": [
- "\n",
- " \n",
- " \n",
- " | \n",
- " max_alignment_impact | \n",
- "
\n",
- " \n",
- " \n",
- " \n",
- " | ArcFace | \n",
- " 6.0 | \n",
- "
\n",
- " \n",
- " | DeepFace | \n",
- " 3.9 | \n",
- "
\n",
- " \n",
- " | GhostFaceNet | \n",
- " 2.7 | \n",
- "
\n",
- " \n",
- " | Facenet | \n",
- " 2.7 | \n",
- "
\n",
- " \n",
- " | SFace | \n",
- " 2.1 | \n",
- "
\n",
- " \n",
- " | Dlib | \n",
- " 1.4 | \n",
- "
\n",
- " \n",
- " | DeepID | \n",
- " 1.4 | \n",
- "
\n",
- " \n",
- " | OpenFace | \n",
- " 1.1 | \n",
- "
\n",
- " \n",
- " | Facenet512 | \n",
- " 0.5 | \n",
- "
\n",
- " \n",
- " | VGG-Face | \n",
- " 0.4 | \n",
- "
\n",
- " \n",
- "
"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
- "source": [
- "align_df = None\n",
- "\n",
- "for distance_metric in distance_metrics:\n",
- " df1 = (\n",
- " pd.read_csv(f\"results/pivot_{distance_metric}_with_alignment_True.csv\")\n",
- " .rename(columns = {'Unnamed: 0': 'detector'})\n",
- " .set_index('detector')\n",
- " )\n",
- " df2 = (\n",
- " pd.read_csv(f\"results/pivot_{distance_metric}_with_alignment_False.csv\")\n",
- " .rename(columns = {'Unnamed: 0': 'detector'})\n",
- " .set_index('detector')\n",
- " )\n",
- " df1 = df1[df1.index != \"skip\"]\n",
- " df2 = df2[df2.index != \"skip\"]\n",
- " pivot_df = df1.subtract(df2)\n",
- " \n",
- " pivot_df = pivot_df.max()\n",
- " pivot_df = pd.DataFrame(pivot_df, columns=[f'alignment_impact_of_{distance_metric}'])\n",
- " # display(HTML(pivot_df.to_html()))\n",
- "\n",
- " if align_df is None:\n",
- " align_df = pivot_df.copy()\n",
- " else:\n",
- " align_df = align_df.merge(pivot_df, left_index=True, right_index=True)\n",
- "\n",
- "# display(HTML(align_df.to_html()))\n",
- "align_df = pd.DataFrame(align_df.max(axis=1), columns = [\"max_alignment_impact\"])\n",
- "align_df = align_df.sort_values(by=[\"max_alignment_impact\"], ascending=False)\n",
- "display(HTML(align_df.to_html()))"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "f66e349f",
- "metadata": {},
- "source": [
- "## Detection impact"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 8,
- "id": "34eca61b",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/html": [
- "\n",
- " \n",
- " \n",
- " | \n",
- " max_detection_impact | \n",
- "
\n",
- " \n",
- " \n",
- " \n",
- " | ArcFace | \n",
- " 41.8 | \n",
- "
\n",
- " \n",
- " | Facenet | \n",
- " 32.4 | \n",
- "
\n",
- " \n",
- " | Dlib | \n",
- " 27.3 | \n",
- "
\n",
- " \n",
- " | OpenFace | \n",
- " 20.2 | \n",
- "
\n",
- " \n",
- " | GhostFaceNet | \n",
- " 15.9 | \n",
- "
\n",
- " \n",
- " | SFace | \n",
- " 9.6 | \n",
- "
\n",
- " \n",
- " | DeepFace | \n",
- " 7.6 | \n",
- "
\n",
- " \n",
- " | Facenet512 | \n",
- " 6.9 | \n",
- "
\n",
- " \n",
- " | VGG-Face | \n",
- " 6.1 | \n",
- "
\n",
- " \n",
- " | DeepID | \n",
- " 5.6 | \n",
- "
\n",
- " \n",
- "
"
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
- "source": [
- "detect_df = None\n",
- "for distance_metric in distance_metrics:\n",
- " tmp_df = (\n",
- " pd.read_csv(f\"results/pivot_{distance_metric}_with_alignment_False.csv\")\n",
- " .rename(columns = {'Unnamed: 0': 'detector'})\n",
- " .set_index('detector')\n",
- " )\n",
- " ref_df = tmp_df[tmp_df.index == \"skip\"]\n",
- " \n",
- " j = []\n",
- " for i in range(0, len(detectors) - 1):\n",
- " j.append(ref_df)\n",
- " minus_df = pd.concat(j)\n",
- " \n",
- " tmp_df = tmp_df[tmp_df.index != \"skip\"]\n",
- " minus_df.index = tmp_df.index\n",
- " \n",
- " # print(\"performance with no detection\")\n",
- " # display(HTML(ref_df.to_html()))\n",
- " \n",
- " # print(\"pivot\")\n",
- " tmp_df = tmp_df.subtract(minus_df)\n",
- " # display(HTML(tmp_df.to_html()))\n",
- " \n",
- " # print(\"avg of detector impact for models\")\n",
- " # avg_df = pd.DataFrame(tmp_df.mean()).T\n",
- " avg_df = pd.DataFrame(tmp_df.max(), columns=[f\"detection_impact_of_{distance_metric}\"])\n",
- " # display(HTML(avg_df.to_html()))\n",
- "\n",
- " if detect_df is None:\n",
- " detect_df = avg_df.copy()\n",
- " else:\n",
- " detect_df = detect_df.merge(avg_df, left_index=True, right_index=True)\n",
- "\n",
- "# display(HTML(detect_df.to_html()))\n",
- "detect_df = pd.DataFrame(detect_df.max(axis=1), columns = [\"max_detection_impact\"])\n",
- "detect_df = detect_df.sort_values(by=[\"max_detection_impact\"], ascending=False)\n",
- "display(HTML(detect_df.to_html()))\n"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "1bdf64a3",
- "metadata": {},
- "source": [
- "# facial recognition model's best scores"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "id": "0cb1f232",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/html": [
- "\n",
- "\n",
- "
\n",
- " \n",
- " \n",
- " | \n",
- " best_accuracy_score | \n",
- "
\n",
- " \n",
- " \n",
- " \n",
- " | Facenet512 | \n",
- " 98.4 | \n",
- "
\n",
- " \n",
- " | Human-beings | \n",
- " 97.5 | \n",
- "
\n",
- " \n",
- " | Facenet | \n",
- " 97.4 | \n",
- "
\n",
- " \n",
- " | Dlib | \n",
- " 96.8 | \n",
- "
\n",
- " \n",
- " | VGG-Face | \n",
- " 96.7 | \n",
- "
\n",
- " \n",
- " | ArcFace | \n",
- " 96.7 | \n",
- "
\n",
- " \n",
- " | GhostFaceNet | \n",
- " 93.3 | \n",
- "
\n",
- " \n",
- " | SFace | \n",
- " 93.0 | \n",
- "
\n",
- " \n",
- " | OpenFace | \n",
- " 78.7 | \n",
- "
\n",
- " \n",
- " | DeepFace | \n",
- " 69.0 | \n",
- "
\n",
- " \n",
- " | DeepID | \n",
- " 66.5 | \n",
- "
\n",
- " \n",
- "
\n",
- "
"
- ],
- "text/plain": [
- " best_accuracy_score\n",
- "Facenet512 98.4\n",
- "Human-beings 97.5\n",
- "Facenet 97.4\n",
- "Dlib 96.8\n",
- "VGG-Face 96.7\n",
- "ArcFace 96.7\n",
- "GhostFaceNet 93.3\n",
- "SFace 93.0\n",
- "OpenFace 78.7\n",
- "DeepFace 69.0\n",
- "DeepID 66.5"
- ]
- },
- "execution_count": 9,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "df = pd.DataFrame()\n",
- "for align in alignment:\n",
- " for distance_metric in distance_metrics:\n",
- " tmp_df = (\n",
- " pd.read_csv(f\"results/pivot_{distance_metric}_with_alignment_{align}.csv\")\n",
- " .rename(columns = {'Unnamed: 0': 'detector'})\n",
- " .set_index('detector')\n",
- " )\n",
- " df = pd.concat([df, tmp_df])\n",
- "\n",
- "pivot_df = pd.DataFrame(df.max(), columns = [\"best_accuracy_score\"])\n",
- "\n",
- "# add human comparison\n",
- "pivot_df.loc[\"Human-beings\"] = 97.5\n",
- "\n",
- "pivot_df = pivot_df.sort_values(by = [\"best_accuracy_score\"], ascending = False)\n",
- "pivot_df"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "b81ebe92",
- "metadata": {},
- "source": [
- "# ROC Curves"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "id": "bcb4db0a",
- "metadata": {},
- "outputs": [],
- "source": [
- "def plot_roc(model_name, detector_backend, distance_metric, align):\n",
- " alignment_text = \"aligned\" if align == True else \"unaligned\"\n",
- "\n",
- " df = pd.read_csv(f\"outputs/test/{model_name}_{detector_backend}_{distance_metric}_{alignment_text}.csv\")\n",
- " \n",
- " #normalize\n",
- " df[\"distances_normalized\"] = df[\"distances\"] / df[\"distances\"].max()\n",
- " df[\"actuals_normalized\"] = 0\n",
- " idx = df[df[\"actuals\"] == False].index\n",
- " df.loc[idx, \"actuals_normalized\"] = 1\n",
- " \n",
- " y_actual = df[\"actuals_normalized\"].values.tolist()\n",
- " y_pred_proba = df[\"distances_normalized\"].values.tolist()\n",
- " \n",
- " fpr, tpr, _ = metrics.roc_curve(y_actual, y_pred_proba)\n",
- " auc = metrics.roc_auc_score(y_actual, y_pred_proba)\n",
- " auc = round(auc, 4)\n",
- "\n",
- " # best accuracy score\n",
- " result_path = f\"results/pivot_{distance_metric}_with_alignment_{align}.csv\"\n",
- " result_df = pd.read_csv(result_path)\n",
- " acc = result_df[result_df[\"Unnamed: 0\"] == detector_backend][model_name].values[0]\n",
- "\n",
- " label = f\"{model_name}_{detector_backend}_{distance_metric}_{alignment_text} (acc: {acc}, auc: {auc})\"\n",
- "\n",
- " return acc, auc, fpr, tpr, label"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 11,
- "id": "84b3d5b5",
- "metadata": {
- "scrolled": false
- },
- "outputs": [
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAABXwAAAKnCAYAAAA1L4U5AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdeXhU9dn/8c/M5MxkJoGoYFgkgopFERfAQt21UnF5XH5txV2aujxabW1pXHhE0LpXq9hqpS7BrZaqtWqt1SotbqWILAqKWlwwlaVQKyFMmJnMfH9/UCIhs5zJnDmzvV/Xlesi8z1zzp2FZOaTe+6vxxhjBAAAAAAAAAAoed5CFwAAAAAAAAAAcAaBLwAAAAAAAACUCQJfAAAAAAAAACgTBL4AAAAAAAAAUCYIfAEAAAAAAACgTBD4AgAAAAAAAECZIPAFAAAAAAAAgDJB4AsAAAAAAAAAZaKq0AW4LZFIaOXKlerVq5c8Hk+hywEAAAAAAABQAYwx2rBhgwYOHCivN399uBUX+K5cuVINDQ2FLgMAAAAAAABABWppadGgQYPydv6KC3x79eolafMntnfv3gWuBgAAAAAAAEAlaG1tVUNDQ2c+mS8VF/huGePQu3dvAl8AAAAAAAAArsr3mFk2bQMAAAAAAACAMkHgCwAAAAAAAABlgsAXAAAAAAAAAMoEgS8AAAAAAAAAlAkCXwAAAAAAAAAoEwS+AAAAAAAAAFAmCHwBAAAAAAAAoEwQ+AIAAAAAAABAmSDwBQAAAAAAAIAyQeALAAAAAAAAAGWCwBcAAAAAAAAAygSBLwAAAAAAAACUCQJfAAAAAAAAACgTBL4AAAAAAAAAUCYIfAEAAAAAAACgTBD4AgAAAAAAAECZIPAFAAAAAAAAgDJB4AsAAAAAAAAAZYLAFwAAAAAAAADKBIEvAAAAAAAAAJSJgga+r7zyio4//ngNHDhQHo9HTz31VMb7zJkzR6NGjVIgENDQoUP1wAMP5L1OAAAAAAAAACgFBQ18N27cqH333Vd33XWXreM//vhjHXfccTriiCO0ePFi/fCHP9S5556rF154Ic+VAgAAAAAAAEDxqyrkxY855hgdc8wxto+fMWOGdtllF/3sZz+TJO2555567bXXdPvtt2v8+PH5KhMAAAAAAAAASkJBA99szZ07V+PGjety2/jx4/XDH/6wMAUBAFBGjDHqiCYKXQYAVBZjpFi40FW4zhij9nik0GUAQEUxxshENhW6jIrWumGDK9cpqcB39erV6tevX5fb+vXrp9bWVrW3tysYDHa7TyQSUSTy5QOJ1tbWvNcJAICbnAhqjTH6/a0Lte6fbQ5VBQAAYI8xRlJHocsAgJz5Mqy3Rze6UkdJBb49ceONN+qaa64pdBkAkBVjjNpj8UKXgRJgjNFz09/Svz7mD5oAuiNEAVD8jKIbfisTX1voQgCUKJ/HKnQJ8kj6+oAztH2gX9rjNkSMfuxCPSUV+Pbv319r1qzpctuaNWvUu3fvpN29kjR58mRNmjSp8/3W1lY1NDTktU4A7iq3cNQY6eQZc/XuKgI8/FcioZpE8pf7Wsaj8zb2dexS/w7+U88Nm+7Y+VDepsyKa8i/Cl1FeTKS4h5PbifxSG/sWq8NQb8jNQEAUGmKIUhEanZD1kpUUoHvAQccoOeee67LbS+++KIOOOCAlPcJBAIKBAL5Lg1AEm4EsYSj5cQoKJdm+Rm3fgEayRPLeEggnvoYj4zOX+tRNDgo49UOfv1y+eLRbIvswpuI6uQ/5XQKoKwZSXFvjkGsDX8fupNagzyGBYoJwU9+7TCoQf9zyf9tTnCAQjPSpsc+VWIts8aRHW+/oEJn7Z5yPbGhVZqe/zoKGvi2tbVp+fLlne9//PHHWrx4sXbYYQftvPPOmjx5sj777DM99NBDkqQLLrhAd955py677DJ997vf1V/+8hc99thj+uMf/1ioDwFwDF2q+eZimOiQPfr31iPnjFGuDV65MsZoUzz3wf7GGGlTqvMYWY+dqcTaj3K+TmYePf/Ftfq8Y1cXruWMaPIXsXRRt/5DWbE2niPBdYHtYhpy5Lq8PkE3Ruow3vxdwG4dkh775z5aG6ktdClZ6dMwWCc2TSNEQVExm/9jF7oMewh+XBO+6x+FLgFACbIG1GjHC/Ytisc6HssrT5on8V6/O0UWNPB98803dcQRR3S+v2X0wsSJE/XAAw9o1apV+vTTTzvXd9llF/3xj3/Uj370I91xxx0aNGiQ7rvvPo0fP9712oFsZApz7YejpRda2sipHJE+HDWqfvh/5F2zxKVqHPKFpJ+lXjaS2tP8Itn8PCr37rDz+9VrhTf3jpZ0L/02Ho8W7vcjtfVi5E4qgfYWfWvQFZIn+ZPjqr4ReYbmfp2gMcXwOAklxOMzOf1hKlOYayTN+mTfkgtZnbDj4CE6dco16f9PWiFl+gJUBQJpn3igPBhjZGK5beDpGiOtnfGWYqvc2bgGAHqqmIJEpJYpZK1EHrN5J4eK0draqrq6Oq1fv169e/cudDmoAMYYfXvGXC1Y8Z9cz6Qn/Ndof+8HjtSF0mYknT2gnxZXpwh0jXTSO5eo/4bS6WJ1W+2GFo1afLs8RfBrsOoru6nfL29LG9r03m57+XyZ9nwFSosxRrOunaqV/yit321OBbGZENTCLmOM1s54W9EVxfLKqvJE8ANUHoJEOM2tXLKkZvgChZDrqIVwNG477B0+oLcev+CA5M8PoxsVurW0nhAXnf57S43P5/wE3A3GGLWnGaPQ3tGuxb8/NuV6VcJfkmHvDgOqdfSFQ115IlVl7SOP53/yfp1gVXXGB4meYJAHknCVMUYdkcK/YiQW2WQ77N1xyK469Zqb5SmCpIUgFsXGxBIlGfaWWoBK8AMAKBUEvuhUbjNkneD0HNo3p4xTyJ+6Qy9o+dI8iNzqv2vTcskfcqSmSmGMUYepluTZ3B5bxIwxOu/P5+ntdW+nPa5Km3ddf/6bf1J1VdfhGR3RhGa9sUCSNOGq4aryJ3+59Jrzv6eO9+3NSttt9ovyVuc2pCNTsFnl54kUik+xhKNOMTKaNe1yrf3EjZnZ9l14zyOyAtUp1wlZYVdJjTZwiIl++Rh+wJSx8qR5vFlMCFABAMgPAl9IcnLsAFLZf/D26lPjd+ZBrT8k+WtyP08OjDHqiJbGkyljjH7/s4Va19JW6FJsG6OzNMbmsU+8sTTt+ppvjJcvEU25bucpYXDUKFX335EnZSg5uYa1xRqOlpuBw4Yr2LuOnzEFUlYBKbNh5fH75C2RwBcAAOQHgW+FyNS9m83YgUqUdtSCTem7d7W5nTgWTr0eTbPmsExhbikGqJWqbv2H8qYJeyUpsOeeGvLIw2lHXTByAKXIGKNZUy/Tyg+WFbqUosSIhOLmWghLQFpW/IN7y2Ol3gQRAABUBgLfCpBt926msQOVKGNYmytjpObxUss8B06VW+dtOYe5xbRJl1uCw3bTLgveJMxFyXFijEIsssmxsLeYwlGnELIWLzbgyl2pzYZ1CiMSAACAROBbEdpj9rt3HR07APtiYVthrzFSx06HSImAFOnesW2M0e9vXah1/3QnrC21ANWbiBbN8z5rj2Hq/+B9SvdM1M5mX5kQ5qIU5aMzN9N82EwIRwurrEYO2GCicdfD3nILSAk+AQBAJSPwrTC5bRpWoTKNWnBCNLw5zDUB6ZIlkr/7xlhG0u+nv6t1C8PSwlfyW4/shbl2AlQ74wJyZYxRe8emvJ1/a+e/dL4++Pz9jMfNmfCyQlbqDc4IYoHUOiIRR8Ne5sMWt4xhboWPHHBrAy4CUgAAgPJB4FthQn6fQn6+7LY5OGoh02We/PwGrY7tKV35Ts7nc6Lz1k6Ya6dL1ROs1qY8P4Gc+Px39N7n7+X1Gl340388I+tHqqb3DjxxBlLINK4hFvnyDzi5duZKdOcWM0YXpOcf3FveGovvXwAAAGSF5A9Ix+aohVx1mMDmsNcGu523X3n9NXmDSTqFjdF5fz5Pb697O8OVMncTRazl0lNHZDyu3Oyxwx568OgHU64Hq+jeBVLJdlyDFaiWVZ1b4IviZWIJ22FvuY0csIOuWwAAAPQEgS9gV9NyyR/q0V2NMWqPpx45EIskpMlLJUkTrhquKn/33ZVN+yatPPp4e2MURu6rSK/qpFMU2js2aX7rkoxdqqUmUwjrJAJdVLJcN1PLZiO1gcOGqyoQ6PG1kH+5ztY10S/n0WcaXUD4CQAAANhD4AvY5Q9J/pqs72aM0dnPna2la95NeUxV3NJ3dIMkac03xsuXiCY9bsvT4HN/4FPESn3NiLVU+s3XMtY2Z8IcBatSz5ktJYSwQHq5BrWSZGQ0a9rlWvvJR47UlGlcA6MYipvT4xg8fp+8LsyqBQAAAModgS+QZ+FYWEP+epi+1tboyPneGyS1hpTzJmgj60dqh2rmzAKVINsxCm4olo3Ucu1QrWQmGncs7PUP7i2P1f3VLQAAAACyR+CL8mXM5hm8uYjau78xRqa9PfkpNm5U/7ZdbZ2nbv2H2nn2s/KGUo+O2DlYrfEOBCR0xAKVoyMScTTs3XHIrjr1mpvlyWGYqlvdu2kDXSOtnfGWYqs25r2OcpdpHEMmjGsAAAAAnEPgi/JkjNQ8PuOGa8ZIJp7pCeZ/18PtUkeSY43RJ2eepciy5GFK3OuXDr1dknTw65fLF08+rkGSavYboV79z+VJL4C8yTRGwY5SGbXg9MgBJOcf3FveGqskvicAAACASkDgWwaMMWqPxVOuh6Op18pWLGwr7F0xu6/a1/ntnfOJQ3Iua+e//FF1NbUp1z1Bum4BZC/TfN5Y5MtNI61Atazq3ALfUmFiCVthrzWgRjtesK9yaFiuaHTnAgAAAMWFwLfEGWP07RlztWDFfwpdSvFqWr55w7VtmHC72n+be4grSUZS1Z57a/D993Wbrdsa3ihdu3nDNm8wmHZcAwBkqxjn87ol0/xds9UfPNONHCCwBAAAAFBOCHxLXHssbjvs3X/w9gpaJbL7da7zd7eevesPSf6a7sdsNZ5h99dfkzcY7NGljDF66hfvaPUnbdL/vdmjcwBAT2Uzn3fgsOGqCgTyXJE7sh3X4PH75M1hxiwAAAAAlAoC3zLy5pRxCqV5Mhu0fKXRwWRz/q4jl5KU8PoV9/plfD0LQWKR+OawN4NVvT5SlX9sj64BAHZkms9bLLN3M3Xm2jpHNG477PUP7i2P5c3pegAAAABQKgh8y0jI71PIXwZfUhvzd21r+JpkJR+hYIzRwpGTtL5uN708eb4jl3tg/yvV4U2+KVuHN6r/85zuyHUAIJlSmM+bj43U0o1rkBjZAAAAAKCylEE6iJKTaVzD1uMYUszftXcZI9Phkdrbk1+mdaPW1+3Wo3Mns6rXR9pU1ZZy05+R9SMVrOrZ2AgAKBa5dudm05lrh39wb3lrLAJdAAAAAPgvAl+4K9txDSnm7xpjZFIEuVuu8/GZZ6v9/Q9THhL3+aWDbpYkTbhquPx1Seb8Sjr/xfP1wX8+yFhqhzeqOafMSRnqBquCBBIAeiRdyGqicfk8Vue/E9540uOcKURaO+MtxVZtdOR0mTpz7aB7FwAAAAC6IvCFu7IZ15BiHIMxRp+cfoY2vvVOyrsaj0cL9/uR2g5tsHWpo54/Vh1VsdQH2MgjRtaP1A7VOxA8AHCUnREI3x4ySZL0rxsWulVWzujMBQAAAID8IPCFsxwa12CMUYeplqLdO9oS4bBe8Y5X26Hn5lqtJGlVrw/V4UsT9kraY4c99ODRD6Y9hg5eAD2RaUSC0yMQnGANqNGOF+ybcoSNHXTmAgAAAEB+EPgWOWOM2mOpX54bjubxpbvZcnBcw5O3LNTqj9anvm8ve52760L/1FMj7kh7zNC+u2reMelrJswFkA/ZbmCWbARCx6ZN+uX5Z0qSvnfPI6pyYdM2wloAAAAAKF4EvnmUKazNfH/p5Blz9e4qlzq7MnXnZhLNfVyDJHVEE+nD3v+q3dCiU+/8pryh7ucJx8I6/LHDM87VlQhzAeSPk927VkOt4r64PImu5+swMcXN5lcpePw+eXOciQsAAAAAKG0EvnlijNG3Z8zVghX/ceV6+w/eXkErhyf52XbnpjlNhwlIlyyR/KlDVlmhpOMaJCkW+TIkb/zpwbICXT+uRDisfxx0sLyJqKzAyfIGun/cltenDl9U0uZAN5QiXAaAnsoU5ma7wdlTK36hDpN6vEz845j0SrZVAgAAAAAqDYFvnrTH4o6FvcMH9NbjFxygdE2oQcuXW5eqjc3UOsPcVOvy6PefX691HbtKV6beUC0bVsDXPfCN++RLRB05P4DykzGIdeQi2YW5mazd9E9FEjm8wkLSwGHDVRVI/TMaAAAAAFAZCHxd8OaUcQrl8BLbnMPcbCXZTM0Yoyenv6vVH7e5VsaA3epU5fe6dj0ApS/bmbhu+E9kjf6y6tcyaY6Jm5guvOcRWYGez9+tCgQYTwMAAAAAIPB1Q8jvU8hfQp/qJJupdUTitsPevg21+n8/HpVz8FDlZ1MgAF05ORPXCdaAGu14wb5Skh9VWzZTi6cZ07DFwGHDFexdx888AAAAAEDOSiiFRLFINld3a04EtcYYtXe0J11LxL68PRxrlzdJlpLqvgCKl9MzcQdMGStPHjcwM8YonvjvhmlJ2ne33kwtU/cu3bkAAAAAAKcQ+KJT54zeSFwy8S5rW2+klmyurrN1GJ393FlatnJx0vVATLrvv/8+/LHDFPETkgClIG2g6/BMXP/g3vLWWHkLUY0x+u3Uy7Tyg2W2jrcC1bKqez6uAQAAAAAAuwh8Iem/M3o/v0GrY3tKl76Z92uZ9tQduOFYWKf9dIF2WZP7tUbWj1SwKpj7iYAy5coGZ5KjgW66MQpbeKz8joTpiERsh71spgYAAAAAcBOBLyRJHdHE5rA3g1w3UjPGaMXpZ6h90aK0x+1i41yBkfvq5Yn3pw11glVBXiYNpFCMG5wVQ5ibLcY1AAAAAACKCYEvumm8fpSs2l5J13Kdz2va2zOGvVt83E869JlXFbJCSdc9QcJclCbXumoz1eHyBmdS5kC3mMJcY4w6IpGka7HIps5/M64BAAAAAFBMCHzRjeX35nVG7xa7v/6avMHu4xbCsfbNs3ktaV4oJG+KwBcoRk5vPOaWfG9wtkUxBbrpGGM0K4sZvQAAAAAAFAsCXxSMNxiUN9Q9zPXGxEZsKEnFOCLBjnxvcFaK7M7oZT4vAAAAAKDYEPgCgENMLGE77LUzq9YtpdJ1WyjpZvQynxcAAAAAUGwIfMuAMUYd0dzmgcZyvD+ArjKNSCBkLax083klZvQCAAAAAEoXgW+JM8boyVsWavVH6wtdStbCsXZ5Y91vb+9od78YwGEev09eF2biInvM5wUAAAAAlDMC3xLXEU04GvYOsJapyj/GsfNtyxjT+e/DHzuMWb0oOek2ZTPRuMvVIBk73bt2w15m9AIAAAAASg2Bbxlp/OnBsgI97CiMbpRuGaoqT0Qez3ccrWtr7R2bMh/0XyPrRypYFcxbLUC2SnVTtkqSbfduuvm8EjN6AQAAAAClh8C3jFgBX88DX49P8qbuiMuH57/1vEK9dki5HqwKErSgqNjdlM0/uLc8lteFiiqP0927wd51/JwBAAAAAJQVAl/YZoyRac9tvu7W96/2BRWyQrmWBRREuk3Z2JAtP+jeBQAAAAAgMwJf2GKM0YrTz1D7okWFLgXokXSzd22fY6sZvWzK5r6OSITuXQAAAAAAMiDwhS2mvd3RsPe9QdLOwdSdd4CTmL1bfujeBQAAAAAgOQLfHjLGqD0WT7kejqZeK3W7v/6avMHum6mFY+06/LHDbJ1j+E4jdRLjHOCQTN27Jhp3NOxlRm/hWYFqWdX80QgAAAAAgG0R+PaAMUbfnjFXC1b8p9ClFIQ3GJQ31D2s9cakiH9zR92cCXMUrOoeCm/BhmxwSrbdu+lm79rFjF4AAAAAAFCsCHx7oD0Wtx327j94ewWt0pjzaYxk4h4p3C51dA2zEllu1hasYkM2uMPEErbDXv/g3vLWWIS1AAAAAACgbBH45ujNKeMUStMtGLR8JREuGWO0YnZfta/zS08ckvbYcKxd3lj329s7sguFAadl6t6lMxcAAAAAAJQ7At8chfw+hfzF/2k0xqQNZBMbPt8c9mbw3iBpwu8PkwjNUIQ8fp+8OY5rAAAAAAAAKGXFn1QiZ8YYnf2ns7V47eKUxwSiRg//99/n/sCniJX8uIiljGHvyPqRaef3AgAAAAAAAMgPAt8K0N7Rnjbs3dYLE56Tt65vj6/HhmxwkjFGJpZIvR6Nu1gNcmGMUUck0uP7xyKbHKwGAAAAAIDyROBbYeZMmJO0+zbR+m+1/OwoSVKoKiQvG66hCBhjtHbG27Y3ZUPxMsZo1tTLtPKDZYUuBQAAAACAskbgW+SMMeqIpu5ujEWy624MVgUVShLmJnzhrGsD8s3EErbDXv/g3vJY3jxXhJ7qiEQcC3sHDhuuqkDAkXMBAAAAAFBuCHyLmDFGT96yUKs/Wl/oUoCCGzBlrDxpNmTzWF5GiZSIC+95RFagusf3rwoE+FoDAAAAAJACgW8R64gmbIe9A3arU5Wf7kaUL4/fJ2+awBelwwpUy6rueeALAAAAAABSI/AtEY0/PVhWIHXYVeWnuxGlKd2mbGzIBgAAAAAAkB0C3wLKZj6vFfClDXxtXEyBmJQItythdV9OtLf3/NxAD7EpGwAAAAAAgLMIfPMoXaBrjNHvf7ZQ61raXKnjJw/HtcdnUsvPDs779YAt0nXvSps7eO2EvWzIBgAAAAAAYA+Bb544ueFarvN5Tfsm7fGZvWODfSPyBJmtidxl272bblM2NmQrfsYYdUQiKddjkU0uVgMAAAAAQOUi8M0Tuxuu9W2o1f/78ai0YZaT83kHzXlRNb136L4QDUu3DpXHZwjW4AgTS9gOe/2De8tbY/G9VwCZglpb55DRrGmXa+0nHzlUFQAAAAAA6CkCXxek23DN7c3WPMGgvKFQkkLM5jcgD9J170p08PZUrmFtIYLagcOGqyoQcO16AAAAAABUGgJfF+S84RpQhOzM593C4/fJmybwRXeZwtxi7KrdcciuOvWam+VRmlcsBAKE+wAAAAAA5BGBb08ZyZIUi8QVM93Di1gk3v0++SrFGJn29tTradaAZDKFuTLS2hlvKbZqo3tFVRBjjGZNvUwrP1jmyvXsBLV2EOYCAAAAAFB4BL49YIzR6W1+7RT36ZFLXy94LStOP0PtixYVtA6Uj2w3W8vEP7i3PFbPNx2sRB2RiO2w14mwlqAWAAAAAIDyQeDbAx3RhHaK23t5+oDd6lTlz1/YZdrbbYe97w2Sdg5W560WlI50HbwmGrcd9loDarTjBfsqXdbIfN7uMo1riEU2df77wnsekRVI/f+WsBYAAAAAAGyNwDdHp17/NfWuTb0BkZubsu3++mvyBoPdbg/H2nX4Y4cpYknjCYZKWsZRC7ZOYn8cA5utZc/p2btWoFpWNX+oAQAAAAAA9hD45qjKXzwbsm2qkrxW99sjHiniJ5QrJLeDWif4B/eWt8Yi0M2C07N3Bw4brqpA6j8oAQAAAAAAbIvAt8QZYzr/ffhjhxHsFiGnZ+I6JdM4Brp3s+f07F3GNQAAAAAAgGwR+Ja49o5NmQ/6r5H1IxWs6j7yAfllYglHw147c3PtINDNL2bvAgAAAACAQiDwLSPPf+t5hXrtkHI9WBUkYCqwTDNx7SCoLax0M3q33myN2bsAAAAAAKAQCHzLSLUvqJAVKnQZSMPj98mbY+CLwnF6Ri8AAAAAAIDTvIUuAABKhd0ZvWy2BgAAAAAACoUOXwDogXQzepnPCwAAAAAACoXAFwB6gBm9AAAAAACgGBH4Ag4wxsjEEsnXonGXq0FPpduQTeq6KRsAAAAAAEAxIvAtcsYYmfb21Otp1uAOY4zWznhb0RWthS4FOWBDNgAAAAAAUA4IfIuYMUYrTj9D7YsWFboUpGFiCVthr39wb3ks9kksVnY3ZJPYlA0AAAAAABQvAt8CytS9Gw+HbYe97w2Sdg4yT7TQBkwZK4/fl3TNY3nZyKtEpNuQTWJTNgAAAAAAULwIfAsk2+7dc3/gU8RKvT58p5E6yQo5VB16yuP3yZsi8EXpYEM2AAAAAABQqgh8C8S0t2fVvfv0d+YolCbQDVYF6TgE0mBDNgAAAAAAkC/GGMVisbTHRKNRV2oh8M0jY4zaO5KPbEjEvrx90JwX5QkGux2zKd6uo393tCKWNM8KpQ18AaTGhmwAAAAAAJQfOyGrW3XMnDlTq1evTntcJE0jmpMIfPPEGKOz/3S2Fq9dnHQ9EDV6+L//PuIPRyviT9Gdm+p2AJ3sdO+yIRsAAAAAAOXDGKPm5ma1tLQUupSiQ+CbJ+0d7SnD3myNrB+pYFX3DmAA2XfvsiEbAAAAAAClLxaLFV3Y279/fzU2NqbMFVpbW3XTTTflvQ4CXxfMmTCnW2CbCLer5WcH/3f9ZXlDqQNd5vOikjndvRvsXcf/JwAAAAAAykhTU5P8fn+hy5BlWWkzB7dqJPB1QbAq2G3+bsL68t8hKygv83mBbujeBQAAAACg/Dgxe3frDdD8fn9RBL7FgsAXQNHqiETo3gUAAAAAoIjkGtba3eAMPUfgC6Ak0L0LAAAAAEDPOdFVW4xhbUNDgyzLynxgBSHwBVASrEC1rOrUgS8AAAAAAEjOGKPm5uai2uQs0wZndmWam1uJCHwBAAAAAACAMhaLxRwNe50Iawlq84fAFwAAAAAAAChiuY5j2HqDs6amppw3OCOsLW4EvgAAAAAAAECRcnocg9/vzznwRXHzFroAAAAAAAAAAMk5OY6BDc4qAx2+SRhj1B6Lp1xvj365tqmjXVaSY9s72vNSGwAAAAAAAHKT64gENzk5joFRDJWBwHcbxhh9e8ZcLVjxn5THWMbohwpJko7+/ZHq8EVTHgsAAAAAAIDi4fSIBDcxjgF2MNJhG+2xeNqwV5Lksf8XoJH1IxWsCuZYFQAAAAAAAJzg5IgENzGOAXbR4ZvGm1PGKeT3dbt9w8Y2PTF5oSTp+W/+Sb1qalOeI1gVLI5WeWOkWDj1ejTNGpBHxhh1RCJJ12KRTS5XAwAAAAAoZk6MYnByRIKbGMcAuwh80wj5fQr5u3+KYtEvQ+DqqqBCVsjNsrJnjNQ8XmqZV+hKgC6MMZo19TKt/GBZoUsBAAAAABS5fIxiYEQCyhGBbyWIhe2HvQ1fk4o9wEbZ6IhEbIW9A4cNV1Ug4EJFAAAAAIBCydS9G41GHQ17GZGAckXgW2malkv+NIGuFZJ4eQAK4MJ7HpEVqE66VhUI8LIVAAAAAChx6QJdY4xmzpyp1atX2zqXE6MYGJGAckXgW2n8IclfU+gqUCHSzeeVus7otQLVsqqTB74AAAAAgPxwYiau3etkE+im09DQoJqaGsJaIIWCB7533XWXbrnlFq1evVr77ruvfvGLX2jMmDEpj58+fbruvvtuffrpp+rbt6++/e1v68Ybb1Q1QRFQVJjPCwAAAADFLR8zcXPVv39/NTY2pg1z6cwF0ito4Pvb3/5WkyZN0owZMzR27FhNnz5d48eP1/vvv6/6+vpuxz/66KO64oor1NzcrAMPPFAffPCBvvOd78jj8ei2224rwEcAIBW783klZvQCAAAAQCHEYjHXw95MgS5hLpC7gga+t912m8477zw1NjZKkmbMmKE//vGPam5u1hVXXNHt+L/97W866KCDdPrpp0uShgwZotNOO03z5tnckMw2I3liau9olzy+bqubOtodvh5Q3tLN55WY0QsAAAAAhebETFw7CHSB/CtY4BuNRrVgwQJNnjy58zav16tx48Zp7ty5Se9z4IEH6pFHHtEbb7yhMWPG6KOPPtJzzz2ns846y7G6jDEKDZ4hX2iFDn98atJjquJ+natbHLsmUO6YzwsAAACgHLg179Yt0Wi0899+v9+VwBdA/hUs8F23bp3i8bj69evX5fZ+/frpvffeS3qf008/XevWrdPBBx+8eTOojg5dcMEF+r//+7+U14lEIopstWlUa2tr2ro2xTfJF1ph++MIVgVtHwsAAAAAAIpXukDXyU3HACCfCr5pWzbmzJmjG264Qb/85S81duxYLV++XJdccomuvfZaXXXVVUnvc+ONN+qaa67p0fX+9P9ma4dgbbfbY5G4HnljviTxMgQAAAAAAArMic7bSg50GxoaZFlWocsA4JCCBb59+/aVz+fTmjVruty+Zs0a9e/fP+l9rrrqKp111lk699xzJUl77723Nm7cqPPPP19XXnmlvF5vt/tMnjxZkyZN6ny/tbVVDQ0NtmoMVgUVskLdbo8l4rbu7xpjpFg49Xo0zRqQhDFGHVt1xvdELLLJoWoAAAAAlLNcw1q3g9pMm46VIubqAuWlYIGv3+/X6NGjNXv2bJ100kmSpEQiodmzZ+viiy9Oep9wONwt1PX5Nm+qZoxJep9AIKBAIOBc4fkQDUtVyevPyBhp5tHS6iXO1oSKZYzRrKmXaeUHywpdCgAAAIASlynMLcau2kyBLuEogGJX0JEOkyZN0sSJE7X//vtrzJgxmj59ujZu3KjGxkZJ0tlnn62ddtpJN954oyTp+OOP12233aaRI0d2jnS46qqrdPzxx3cGvyVj64D61qE9D3yz0fA1KUnHMrC1jkjE0bB34LDhqir2P7oAAAAA6KIURyQ41XlLoAug1BU08D3llFO0du1aTZ06VatXr9Z+++2n559/vnMjt08//bRLR++UKVPk8Xg0ZcoUffbZZ9pxxx11/PHH6/rrry/Uh5CWMUamvT3pWqL1c2cv1n9vqfF5Kd0vJSuUfh0VI93Ihq1HMVx4zyOyAtU5XasqEODBEgAAAOAQJ4JYO9dwu+vWibCWoBYANiv4pm0XX3xxyhEOc+bM6fJ+VVWVpk2bpmnTprlQWW6MMVpx+hlqX7Qo88GXvC3V9cntgoS5sCmbkQ1WoFpWdW6BLwAAAABnGGPU3NyslpaWQpeSFTthLmEtADin4IFvuTLt7bbC3mDfiDy9d5D8NS5UBdgf2cAoBgAAAMBdmbp3o9Goq2EvIxIAoDQR+Lpg99dfkzcY7HpjNCzdOlQen+EXX5EzxsjEEqnXo3EXq3FWupENjGIAAAAA3JNt925TU5P8fn9eayKoBYDSRODrAm8wKG9om83Sqow7G7UhrUxhroy0dsZbiq3a6F5RLmJkAwAAAFAcYrGY7bC3oaFBNTU1hLEAgKQIfFGxjDFaO+NtRVe0OnI+/+De8ljezAcCAAAAKClubJQWjUY7/52pe5fOWwBAOgS+qFgmlrAd9loDarTjBftKaR5TeSwvD7oAAACAMlOIjdL8fn/exzUAAMoXgS8gacCUsfL4fSnXCXMBAACAypTNqAUnNDQ0yLIs164HACg/BL6AJI/fJ2+awBcAAAAA2CgNAFAKCHwBAAAAALCBUQsAgFLADlMAAAAAAAAAUCbo8AUAAAAAlBxjjGKxWN6vE41G834NAACcROALAAAAAHBVrmGtMUYzZ87U6tWrHawKAIDyQOCbo0Q4rES8+2Zfifb2AlQDAAAAAIWVKcwt1bC2oaFBlmUVugwAADIi8O0BY0znv/9x0MHyJXiJDwAAAIDyV2xhbv/+/dXY2CiPx5P3a1mW5cp1AADIFYFvD5gsuneDo0bJEwzmsRoAAAAAlc6NebZOh7lOhLWEsAAAdEfgm6PdZr+oQF1tynVPMMgDELjKGKOOSCTleiyyycVqAAAAkG/GGDU3N6ulpaXQpXSyE+YS1gIAkB8EvjnyVgflDYUKXQYgafOD/VlTL9PKD5YVuhQAAAC4JBaLuRr2EuYCAFDcCHyBMtIRidgOewcOG66qQCDPFQEAAMBNTU1N8vv9eb0GYS4AAMWNwBcoUxfe84isQHXK9apAgAfqAAAAZcbv9+c98AUAAMWNwBcoU1agWlZ16sAXAAAAAAAA5cdb6AIAAAAAAAAAAM6gwxcAAAAACsQYo1gsltM5otGoQ9UAAIByQOALAAAAAAVgjFFzc7NaWloKXQoAACgjjHQAAAAAgAKIxWKOhr0NDQ2yLMux8wEAgNJEhy8AAAAA9ECu4xi2HsXQ1NQkv9+fUz2WZcnj8eR0DgAAUPoIfFG2jDEysUTq9WjcxWoAAABQTpwex+D3+3MOfAEAACQCX5QpY4zWznhb0RWthS4FAAAAZcjJcQyMYgAAAE4i8EVZMrGE7bDXP7i3PBbjrAEAANAzuY5jYBQDAABwEoEvyt6AKWPl8ftSrnssLw+wAQAA0GOMYwAAAMWEwBdlz+P3yZsm8AUAAAC2lWlDtq03XAMAACgmBL4AAAAAsBWnN2QDAABwE4EvAAAAgIpip3vXbtjLhmsAAKDYEPiiJBljZGKJ1OvRuIvVAAAAoFhkCnONMZo5c6ZWr15t63yZNmRjwzUAAFBsCHxRcowxWjvjbUVXtBa6FAAAABQRp0cxNDQ0qKamhkAXAACUFAJflBwTS9gOe/2De8tjefNckXuMMeqIRFKuxyKbXKwGAADAWZm6czPJZhRD//791djYmDbMpXsXAACUIgJflLQBU8bK4/elXPdY3rJ5kG6M0aypl2nlB8sKXQoAAEDWnB61kAmjGAAAQKUi8EVJ8/h98qYJfMtJRyRiO+wdOGy4qgKBPFcEAABgj9OjFjJhFAMAAKhkBL5ACbrwnkdkBapTrlcFAjzBAQAARSMWizk6aiETuncBAEAlI/AFSpAVqJZVnTrwBQAAcFOmcQ3RaLTz34xaAAAAyC8CXwAAAAA9lu24Br/fnzbwBQAAQG68hS4AAAAAQOnKZlxDQ0ODLMvKc0UAAACVjQ5fAAAAAI5gXAMAAEDhEfgCAAAAcATjGgAAAAqPkQ4AAAAAAAAAUCYIfAEAAAAAAACgTFTsSIdwtENV0Y4kt8cLUA0AAAAAAAAA5K5iA98x18+WNxDqvuCJqtce7tcDAAAAFCtjjGKxWNK1aDTqcjUAAABIp2IDXzuqq5h4AQAAgMpmjFFzc7NaWloKXQoAAABsqNjA9+VLD1e/vtt3u729o12HPz5VkuTxeNwuCwAAAHBVuu5daXMHr52wt6GhQZZlOVkaAAAAeqBiA9+g36eQP8mH7/G5XwwAAACwlUwhrJPXmTlzplavXm3r+KamJvn9/qRrlmXRMAEAAFAEKjbwRXEzxsjEEsnX2FgPAACUuHSBbrYhrFsaGhpUU1NDqAsAAFDkCHxRdIwxWjvjbUVXtBa6FFcZY9QRiaRcj0U2uVgNAADIl2Kcidu/f381NjamDXPp4AUAACgNBL4oOiaWsBX2+gf3lscqjY31MoW5Rkazpl2utZ985GJVAAAgW06MWrA7E9dOCOsUwlwAAIDyQeAL16Ub1yB1HdkwYMpYefzJ5yp7LG/RPDFJF+g6HeYOHDZcVYGAI+cCAAD25aMzl5m4AAAAcBqBL1yV7bgGj98nb4rAt1gYYzRr6mVa+cGynM+145Bddeo1N8uj1E/uqgIBnvwBAJAHmbp37Xbm2sVMXAAAAOQDgS9cZXdcg1Q6Ixs6IhFbYS9hLgAAhZMpzM12o7R0nbl20cELAACAfCDwRcGkG9cgFdfIBrsuvOcRWYHqpGuEuQAAFIbToxjozAUAAEAxI/BFwZTCuIZsWYFqWdXJA18AAFAYsVjMdthrZ6M0OnMBAABQzAh8AQAAUNQyjWPIJBqNdv470ygGwlwAAACUOgLfZIxRICYlwu1KWN2XE5va3a8JAACgAjk9jsHv9+c8excAAAAoZgS+2zDG6CcPx7XHZ1LLzw5Oekzc65cOvd3lykqDMUYmlki9Ho27WI0zjDHqiERSrscim1ysBgCAypLNOIZMGhoaZFlJ/poPAAAAlBEC322Y9k3a4zP7x3uCwfwVU2Qyhbky0toZbym2aqN7ReWZMUazpl6mlR8sK3QpAABUvEzjGDJhXAMAAAAqAYFvGoPmvKia3jt0uz0WievlyfMlqWKeNBhjtHbG24quaHXkfP7BveWxvI6cK586IhHbYe/AYcNVFQjkuSIAAMpLpvm8W8/fZRwDAAAAkBmBbxqeYFDeUKjb7V5f6Y0lyJWJJWyHvdaAGu14wb5SmizcY3lLLiy/8J5HZAWqU65XBQIl9zEBAJBPmcJcY4xmzpyp1atXu1gVAAAAUN4IfJG1AVPGyuP3pVwvpTA3m/m8VqBaVnXqwBcAAHzJ6c3WmL8LAAAA2EPgi6x5/D550wS+pYL5vAAA5E82m631799fjY2Naf9gzPxdAAAAwB4CX1Qs5vMCAOCOTJutEeYCAAAAziHwRdnKZlwD83kBAMheuhm9bLYGAAAAFAaBL8pStuMamM8LAEB2nJ7RCwAAAMAZBL4oS4xrAAAgN+m6d6XNHbx2wl42WwMAAADcReCLsse4BgAAusoU5hpjNHPmTK1evdrW+dLN6GU+LwAAAOAuAl+UPcY1AADwJadHMTQ0NKimpoZQFwAAACgSBL4AAAAlIlNnrh12RzFIUv/+/dXY2Jg2zKWDFwAAACguBL4AAAAlIB+bpKUbxSAR5gIAAACliMAXAACgBMRiMUfDXkYxAAAAAOWJwBcAAKDEZOrMtYPuXQAAAKA8EfgCAACUGL/fn3PgCwAAAKA8eQtdAAAAAAAAAADAGQS+AAAAAAAAAFAmGOkAAABQJIwxisViSdei0ajL1QAAAAAoRQS+AAAARcAYo+bmZrW0tBS6FAAAAAAljMAXAAAgR+k6c+2KRqO2wt6GhgZZlpXTtQAAAACULwJfAACAHOSjM7epqUl+vz/pmmVZ8ng8jl0LAAAAQHkh8AUAAMhBLBZzNOxtaGhQTU0NoS4AAACAHiHwBQAAcEi6zly76OAFAAAAkAsCXwAAAIf4/f6cA18AAAAAyIW30AUAAAAAAAAAAJxB4AsAAAAAAAAAZYKRDgAAAGkYYxSLxVKuR6NRF6sBAAAAgPQIfAEAAFIwxqi5uVktLS2FLgUAAABAiTPGuHIdAl8AAIAUYrGY7bC3oaFBlmXluSIAAAAAbjPGKJFoz/kcCxac4lBF6RH4AgAA2NDU1CS/359y3bIseTweFysCAAAAypcTIatTdSxYeKra2t7N+VwbNyYcqCgzAl8AAAAb/H5/2sAXAAAAgDM2h6wTtH79wkKXUpIIfAEAAAAAAAAUjUSivejC3tra4Ro9alZOr+prbW2VNMC5olIg8AUAAEXHGKNYLFboMhSNRgtdAgAAAFDRDjl4nny+UKHLkNcbzHmEm8/X4VA16RH4AgCAomKMUXNzs+3N0gAAAACUL58vVBSBbykh8EUnY4xMLPnwaBONu1wNAKBSxWKxogt7GxoaZFlWocsAAAAAgIwIfCFpc9i7dsbbiq5oLXQpAIAi5saoha3HKDQ1NRXFRmmWZeX88i0AAAAAcAOBLyRJJpawFfb6B/eWx/K6UBEAoNgUYtSC3+8visAXAAAAAEoFgS+6GTBlrDx+X9I1j+WlwwkAKpTboxYYowAAAAAA2SPwRTcev0/eFIEvAKA0OTGKwe1RC4xRAAAAAFBOjDGuXIfAFwCAMpePUQyMWgAAAACALxljFE4k0hwgnbhwuSu15DSMddOmTTkXcNddd2nIkCGqrq7W2LFj9cYbb6Q9/osvvtBFF12kAQMGKBAI6Ctf+Yqee+65nOsAAKBcOT2KgVELAAAAAHJhjFE8Hk77VkqMMTph4XLt9sqS1G+vLtG7G9tdqSfrDt9EIqHrr79eM2bM0Jo1a/TBBx9o11131VVXXaUhQ4bonHPOsX2u3/72t5o0aZJmzJihsWPHavr06Ro/frzef/991dfXdzs+Go3qG9/4hurr6/XEE09op5120ooVK7Tddttl+2EAAFCRnBjFwKgFAAAAAD1ljNGChRO0fv3CQpfimHAiofmtGwtdRqesA9/rrrtODz74oH7605/qvPPO67x9xIgRmj59elaB72233abzzjtPjY2NkqQZM2boj3/8o5qbm3XFFVd0O765uVmff/65/va3v3V2Fg0ZMiTbDwEAgJKS6/zdrWfvMooBAAAAQL4ZY5RIJO9mjcfDtsPeurrR8nqDTpaWd0sO2kshX/KhCq2trRroQg1ZB74PPfSQ7rnnHh155JG64IILOm/fd9999d5779k+TzQa1YIFCzR58uTO27xer8aNG6e5c+cmvc8zzzyjAw44QBdddJGefvpp7bjjjjr99NN1+eWXy+djkzEAQPnJx/xdAAAAAMiXbDp4Dzl4nny+UMp1rzdYcq8uDPm8qkmRU8Zdyi+zDnw/++wzDR06tNvtiUQiq+6jdevWKR6Pq1+/fl1u79evX8rg+KOPPtJf/vIXnXHGGXruuee0fPlyfe9731MsFtO0adOS3icSiSgSiXS+39raartGAAAKzcn5u8zeBQAAAJBviUS7rbC3rm60LKtPyQW6pSDrwHf48OF69dVXNXjw4C63P/HEExo5cqRjhSWTSCRUX1+ve+65Rz6fT6NHj9Znn32mW265JWXge+ONN+qaa67Ja10pGSPFUgyZjpbW8GkAQH5kGtew9TiGXOfvMnsXAAAAQDrpRjHYtfWGa+k6eEuxe7dUZB34Tp06VRMnTtRnn32mRCKhJ598Uu+//74eeughPfvss7bP07dvX/l8Pq1Zs6bL7WvWrFH//v2T3mfAgAGyLKvL+IY999xTq1evVjQaTfokePLkyZo0aVLn+62trWpoaLBdZ48ZIzWPl1rm5f9aAICSlO24BubvAgAAAJXJiSDWzjUWLDxVbW3vOnZOny+UdmQD8iPrwPfEE0/UH/7wB/3kJz9RTU2Npk6dqlGjRukPf/iDvvGNb9g+j9/v1+jRozV79myddNJJkjZ38M6ePVsXX3xx0vscdNBBevTRR5VIJOT1bh5+/MEHH2jAgAEpnwAHAgEFAoHsPkgnxML2wt6Gr0kW3/gAUImyGdfAOAYAAACgMmUzE7eYlOKGa+Ui68BXkg455BC9+OKLOV980qRJmjhxovbff3+NGTNG06dP18aNG9XY2ChJOvvss7XTTjvpxhtvlCRdeOGFuvPOO3XJJZfo+9//vv7xj3/ohhtu0A9+8IOca8mrpuWSP0Woa4Uk2tcBoCw5Oa6BcQwAAABAZbI7E9cptbXDNXrUrJyffzCyoXCyDnx33XVXzZ8/X3369Oly+xdffKFRo0bpo48+sn2uU045RWvXrtXUqVO1evVq7bfffnr++ec7N3L79NNPOzt5pc3dTS+88IJ+9KMfaZ999tFOO+2kSy65RJdffnm2H4a7/CHJX1PQEowxMrFE6vVo3MVqAKCwMgWxTl1j5syZWr16ta3jGdcAAAAAVKZM4xrszsR1CkFt6cs68P3kk08Uj3cPByORiD777LOsC7j44otTjnCYM2dOt9sOOOAA/f3vf8/6OpXMGKO1M95WdEVroUsBgILLdm6uGxjXAAAAAFSmbMc1MBMXdtgOfJ955pnOf7/wwguqq6vrfD8ej2v27NkaMmSIo8XBGSaWsB32+gf3lsfyZj4QAEpUNnNzndC/f381Njam/Qs54xoAAACA0uPERmrxeNh22MtM3PwwxiicSP2qeDvC8dzu7zTbge+WjdU8Ho8mTpzYZc2yLA0ZMkQ/+9nPHC0OzhswZaw8fl/KdY/lJXQAUDEyzc11AmEuAAAAUHoyhbmbO3NPVVvbu45dM9O4BkYtOM8YoxMWLtf81o2FLsVRtgPfxH+T7l122UXz589X375981YU8sfj98mbJvAFgErC3FwAAAAA28p2zIIT6upGy7L6EOhmKdfu3HA84WjYO6auRiFv4V85n/UM348//jgfdQAA4IhMG7JFo1EXqwEAAABQjNJ18GYzZqG2drhGj5qVc1BL9272nO7OXXLQXgr5cgtrQ97ieOV81oGvJG3cuFEvv/yyPv30025PnH/wgx84UhgAANvKFOYaYzRz5kytXr3axaoAAAAAuMWJubnZjGNgzELhZOredbI7d0xdjfpaVWXztcw68F20aJGOPfZYhcNhbdy4UTvssIPWrVunUCik+vp6Al8AQI+4HeY2NDTIsixHzgUAAAAgd4WYm5sOYxYKJ9vu3Vy7c4ulM9cpWQe+P/rRj3T88cdrxowZqqur09///ndZlqUzzzxTl1xyST5qBACUOWOMmpub1dLS4sj5+vfvr8bGxrS/sNlMDQAAAHBXukDX7TBXyjyOge7dwgkn7Hfvllt3rhOyDnwXL16sX/3qV/J6vfL5fIpEItp1113105/+VBMnTtQ3v/nNfNTpuEQ4rES4utvtpj23lwUAQCXK1J2bSTQatR32EuYCAAAApcfJjdCYm1tZMnXvllt3rhOyDnwty5L3v7vN1dfX69NPP9Wee+6puro6xzqz3PDZkV/Xep+v0GUAQMlzuju3qalJfr8/5TphLgAAAFB6Eol2W2GvnTCXoLayhHxe1ZDhZSXrwHfkyJGaP3++dt99dx122GGaOnWq1q1bp4cfflgjRozIR40F8d4gaedg9w5gAKg0mbp3s+nOzaShoUE1NTU8eAMAAABc4sQmaHbE4+HOf6fbCI0wF8hd1oHvDTfcoA0bNkiSrr/+ep199tm68MILtfvuu+v+++93vMB8GfjH5zRg0MBut4dj7Tr8scMUsaTx/IABUOGy7d7N1J2bCd27AAAAgHucHLOQDZ8vlDLwBZC7rAPf/fffv/Pf9fX1ev755x0tyC2e6mp5Q91/uHhjUsRP2AAAkhSLxWyHvXTnAgAAAKXF7pgFJ9XVjZbXG3T1mkClyTrwTWXhwoWaOnWqnn32WadOCQAoIszWBQAAAEpLpnENdscsOImRDUD+ZRX4vvDCC3rxxRfl9/t17rnnatddd9V7772nK664Qn/4wx80fvz4fNUJACgwv9+f07gGAAAAAPblOlt387iGU9XW9q6t4xmzADcZYxROJFKuh+Op15CZ7cD3/vvv13nnnacddthB//nPf3Tffffptttu0/e//32dcsopWrp0qfbcc8981goAAAAAAFDyMoW52Ya1uWLMAtxkjNEJC5drfuvGQpdStmwHvnfccYduvvlmXXrppfrd736nk08+Wb/85S+1ZMkSDRo0KJ81AgAAAAAAlAW3N0qrrR2u0aNmpR2jwJgFuCmcSNgOe8fU1Sjk9ea5ovJjO/D98MMPdfLJJ0uSvvnNb6qqqkq33HILYS8AAAAAAIBN2WyUZieszYQwF07LNI4hk63HNSw5aC+FfKkD3ZDXy/dvD9gOfNvb2xUKbZ7l4vF4FAgENGDAgLwVBvuMMTKx1P/RTDTuYjUASokxRrFYLOV6NBp1sRoAAACgsmTaKI2wFsXG6XEMIZ9XNT6fI+fCl7LatO2+++5TbW2tJKmjo0MPPPCA+vbt2+WYH/zgB85Vh4yMMVo7421FV7QWuhQAJcYYo+bmZrW0tBS6FAAAAKAisVEaSk024xgyYVxD/tgOfHfeeWfde++9ne/3799fDz/8cJdjPB4Pga/LTCxhO+z1D+4tj8V/JACbxWIx22FvQ0ODLMvKc0UAAAAAgELKNK4hm3EMmTCuIX9sB76ffPJJHsuAEwZMGSuPP3UbvMfiPxKA5JqamuT3+1OuW5bFzw8AAADABmOMEon2lOvxeNjFagD7sh3XwDiG4pXVSAcUN4/fJ2+awBcAUvH7/WkDXwAAAACZGWO0YOEE25uyAU7JdSM1aXP3rt2wl3EMxY3AFyXJGKOOSCTleiyyycVqAAAAAACQEol222FvXd1oeb3BPFeEUpBzWGukExct19K21J3l2co0roFxDMWNwBclxxijWVMv08oPlhW6FAAAAABAGck0jiGTrcc1HHLwvLQbsnm9QQIzZD1GwQ1j6mrU16ri+7OEEfiiKKXr4I1FNtkOewcOG66qQMDJ0gAAAAAAJShTmLt5HMOpamt715Hr+XyhtIEvSp/bYxQyGVEb1NMjh0o55rR075Y+Al8UnWw6eC+85xFZgeqU61WBAD+kAAAAAKDCuT1bl3ENxa8UxyhkQlCLLXoU+H744YeaOXOmPvzwQ91xxx2qr6/Xn/70J+28887aa6+9nK6xohljZGKpfwCZaNzFatzREYnYCnsHDhuuYO86fpgBAAAAANLKZrZube1wjR41K6fnmoxryB8numrzEdbmijEKcFLWge/LL7+sY445RgcddJBeeeUVXX/99aqvr9dbb72l+++/X0888UQ+6qxIxhitnfG2oitaC11KwaTr4KV7FwAAAACQLWbrlq5inHfLGAUUo6wD3yuuuELXXXedJk2apF69enXe/vWvf1133nmno8VVOhNL2A57/YN7y2P1vO2/WFmBalnVqUc2AAAAAACQDWbrlq5wwrl5t5IzYS1BLYpR1oHvkiVL9Oijj3a7vb6+XuvWrXOkKHQ3YMpYefy+lOseix8wAAAAAACgMBwZtZBBOP7l+XOddysR1qJ8ZR34brfddlq1apV22WWXLrcvWrRIO+20k2OFoSuP3ydvmsAXAAAAAACgEAoxaiHk86rGR04CJJN14Hvqqafq8ssv1+OPPy6Px6NEIqHXX39dTU1NOvvss/NRIwCgB4wxisViKdej0aiL1QAAAAAoRk505objzo5ayGRMXY1C3vIbawk4JevA94YbbtBFF12khoYGxeNxDR8+XPF4XKeffrqmTJmSjxoBAFkyxqi5uVktLS2FLgUAAADIO2OMEon2lOvxeNjFakpHPjpznRi1kAmjGID0sg58/X6/7r33Xl111VVaunSp2traNHLkSO2+++75qA8Ayk6mzlsnRKNR22FvQ0ODLMvKaz0AAABAvhhjtGDhBK1fv7DQpZQcpzdBG1NXo75WFWEsUGBZB76vvfaaDj74YO28887aeeed81ETAJStQnTeNjU1ye/3p1y3LIsHZAAAAOgiU8dsMYnHw7bD3rq60fJ6g3muqHhkGtfAJmhAeco68P3617+unXbaSaeddprOPPNMDR8+PB91AUBZisViroa9DQ0Nqqmp4UEXAAAAbCvljtlDDp4nny+Uct3rDZbVY+O0ga6RTly0XEvb7AX3bIIGlI+sA9+VK1dq1qxZ+s1vfqObbrpJ++yzj8444wyddtppGjRoUD5qBICylKnz1gl07wIAACBbiUR7SYa9dXWjZVl9Kubxr5Pzd9kEDSgvWQe+ffv21cUXX6yLL75YH3/8sR599FE9+OCDmjx5sg499FD95S9/yUedAFB2/H5/3gNfAAAAlBc3Ri1svcFZpo7ZYlJu3buZ2J2/O6I2qKdHDpXSfGoYxQCUl6wD363tsssuuuKKK7Tvvvvqqquu0ssvv+xUXQAAAAAAVJRMYe7mUQunqq3tXddq8vlCJRP4lpJMs3XtsDt/lzAXqDw9Dnxff/11/frXv9YTTzyhTZs26cQTT9SNN97oZG0AAAAAAJSFYgxzM6m0Dc7c4uQohi2Yvwtga1kHvpMnT9asWbO0cuVKfeMb39Add9yhE088UaEQf/GDPcYYdUQiKddjkU0uVgMAAAAA+eX0Jmi1tcM1etSsvHdtVtqIBKdk6t4Nx+2NYrCL+bsAtpV14PvKK6/o0ksv1YQJE9S3b9981IQyZozRrKmXaeUHywpdCpAXxhjFYrGU69Fo1MVqAAAA4JZ0HbzxeNh22GsnzCWILV7Zdu+mG8VgFyMbAGwr68D39ddfz0cdqBAdkYjtsHfgsOGqCgTyXBGQnXSBrjFGM2fO1OrVq12uCgAAAMm4scHZluvYHceQaRM0wtzi5mT37pi6GvW1qvh6A3CcrcD3mWee0THHHCPLsvTMM8+kPfaEE05wpLB8iyQiCsfC3W5v78j/gwFsduE9j8gKVKdcrwoE+MUHV2XqznUy0G1oaJBlWTmfBwAAAMk5PUbBCXV1o2VZfXieUyA5b5RmpBMXLdfSNnu5QabuXTpzAeSLrcD3pJNO0urVq1VfX6+TTjop5XEej0fxeNyp2vLqm8/8jzrqerxnHRxgBaplVacOfAE3GWPU3NyslpaWnM/Vv39/NTY2pn3wZlkWD+4AAADyKJFodz3szTSOge7d/MkY5mYZ1uaK7l0AhWQr8Uxs9UMzkctfw0rIyPqRClaxGylQKWKxmO2wN1OgS5gLAABQXDKNUXAKgW5hZDs3N1cjaoN6euRQKc2Xmu5dAIWUdYvrQw89pFNOOUWBbWarRqNRzZo1S2effbZjxeXTr499XEMG75pyPVjFL2qgUjU1Ncnv96dcJ9AFAAAoLT5fyJXAF4URTtifm2snrM2EMBdAscs68G1sbNTRRx+t+vr6Lrdv2LBBjY2NJRP4BquCCln8wgfQnd/vTxv4AgAAAChOzM0FgB4EvsaYpD8c//nPf6qurs6RogAAAAAAALIV8nlV4/MVugwAKCjbge/IkSPl8Xjk8Xh05JFHqqrqy7vG43F9/PHHOvroo/NSJAAAAAAAAAAgM9uB70knnSRJWrx4scaPH6/a2trONb/fryFDhuhb3/qW4wUCgBOMMYrFYinXo9Goi9UAAAAAsMsYo3CaDeTD8crYXB4A7LId+E6bNk2SNGTIEJ1yyimqrq7OW1EA4CRjjJqbm9XS0lLoUgAAAABsI22ga6QTFy3X0rZ2d4sCgBKW9QzfiRMn5qOOomKMUUc09V8IY5G4i9UAyFUsFrMd9jY0NMiyrDxXBAAAAEDa/Pz7hIXLNb91Y87nGlNXo5A39YZtAFApbAW+O+ywgz744AP17dtX22+/fdodLT///HPHiisEY4yevGWhVn+0vtClAMiDpqYm+f3+lOuWZbFrLwAAQAkwxiiRSN31GY+HXaymMmUatWBHOJ6wFfaOqA3q6ZFDpTQP1UNeL4/lAUA2A9/bb79dvXr16vx3Of8A7YgmbIe9A3atVZXapWiSz0eUBxeAW7KZz+v3+9MGvgAAACgO6QJdY4wWLDxVbW3vulwVtnCyM3eLJQftpZAveYcuYS4A2Gcr8N16jMN3vvOdfNVSdBp/erCsgK/7gjHSQyeq6rPX5LnR/boAfIn5vAAAAOVnc6A7QevXL8z5XHV1o+X1Bh2oClsLJ+x15to1pq5Gfa0qQl0AcEDWM3wXLlwoy7K09957S5KefvppzZw5U8OHD9fVV19dVp1zVsCXPPCNbpRWvpb2pSSdGr4mWSHHawOwGfN5AQAAyk8i0W4r7K2tHa7Ro2alDQm93iAhYp6l68y1iw5eAHBO1oHv//7v/+qKK67Q3nvvrY8++kinnHKKvvnNb+rxxx9XOBzW9OnT81BmEWtaLvnTBLpWSOKXFpBSpnEMmWw9roH5vAAAAOXnkIPnyedL/pyLMLc4hHxe1fiSNEsBAAoi68D3gw8+0H777SdJevzxx3XYYYfp0Ucf1euvv65TTz218gJff0jy1xS6CqAkOT2Ogfm8AAAA+ZVpozSnbL3hms8XShn4In8ybcgWjue2WRsAIH+yDnw3/4Lf/IP9pZde0v/8z/9I2vxS6XXr1jlbHYCyls04hkwY1wAAAJCbTGEuG6VVjnxsyAYAcE/Wge/++++v6667TuPGjdPLL7+su+++W5L08ccfq1+/fo4XCKB0ZRrXkM04hkwY1wAAAJBeukC3WMNcNlwrjGw2ZBtTV6OQN7f5vQAAZ2Ud+E6fPl1nnHGGnnrqKV155ZUaOnSoJOmJJ57QgQce6HiBAIpTpjDXGKOZM2dq9erVts7HOAYAAICec7M7185GaU5hRm9+ZDOuIdOGbGy2BgDFJ+vAd5999tGSJUu63X7LLbfIx5B2oCI4PXuXcQwAAKBSOTET1+0wlxC2tGU7roEN2QCg9GQd+G6xYMECLVu2TJI0fPhwjRo1yrGiABS3bGbv9u/fX42NjWmfFDCOAQAAlKtiG6OQKdAlzC1/jGsAgPKXdeD7r3/9S6eccopefvllbbfddpKkL774QkcccYRmzZqlHXfc0ekaARSxTLN3CXMBAECl2hzoTtD69QtduR7ducgW4xoAoDxlHfh+//vfV1tbm9555x3tueeekqR3331XEydO1A9+8AP95je/cbxIAMWL2bsAAADJJRLttsJep2biEuZii3Qzereez8u4BgAoT1kHvs8//7xeeumlzrBX2jzS4a677tJRRx3laHEAAAAAUA4OOXiefL5Q0jWCWjgp2xm9AIDyk3Xgm0gkkm6uZFmWEml2+QQAAACAcpJpw7V4PNz5b58vlDLwBZxkd0Yv83kBoHxlHfh+/etf1yWXXKLf/OY3GjhwoCTps88+049+9CMdeeSRjhcIwH3GGMVisZTr0WjUxWoAAACclSmotXsOtzdcA7KVbkYv83kBoHxlHfjeeeedOuGEEzRkyBA1NDRIklpaWjRixAg98sgjjheYLx3RhGKReLfbk90GVBJjjJqbm9XS0lLoUgAAABzn9kZqklRXN1peb9C16wFbMKMXACpT1oFvQ0ODFi5cqNmzZ2vZsmWSpD333FPjxo1zvLh8evqm9xT0E2ihMqXr4I1Go7bD3oaGhqQjXgAAAArFzpgFJ8NeOxuuMaMXAAC4KavA97e//a2eeeYZRaNRHXnkkfr+97+fr7oKbsBudaryM88I7sk0RsHJ68ycOVOrV6/OeGxTU5P8fn/KdcuyePICAABclS7QzXbMQrqN1OwizIXbjDEKp9k/Jxxnbx0AqHS2A9+7775bF110kXbffXcFg0E9+eST+vDDD3XLLbfks768OW7SVzR096Ep16v8zDOCczKFudmEsG5paGhQTU0N/w8AAEDRcHIcQ13daFlWHx7roKQYY3TCwuW2NmUDAFQu24HvnXfeqWnTpmnatGmSpEceeUT/+7//W7KBb5XfKyvALCOnGWPUEYmkXI9FNrlYTXEo1pm4/fv3V2NjY8onOXTvAgAAJzmxUZrdcQyMWUAxytSZa0c4nrAd9o6pq1HIy6tWAaAS2Q58P/roI02cOLHz/dNPP13nnHOOVq1apQEDBuSlOJQWY4xmTb1MKz9YVuhSikosFrMd9mYKYZ1EoAsAANySj43S0o1jIMxFsclHZ+6Sg/ZSyJc60A15edUqAFQq24FvJBJRTU1N5/ter1d+v1/t7bn9lR7loyMSsR32Dhw2XFWBQJ4rKj7MxAUAAJUokWh3NOxlHAOKUboO3mw6c+0YU1ejvlYV/wcAAElltWnbVVddpVDoy7+iR6NRXX/99aqrq+u87bbbbnOuOpSsC+95RFagOuV6VSBQkQ9O/H5/2sAXAACg3LFRGkpRxnEMRjpx0XItbcvcEJWpM9cOuncBAOnYDnwPPfRQvf/++11uO/DAA/XRRx91vs8vHGxhBaplVacOfAEAAFA6cp2/G4+HO//t84VyDnwBNzk5joHOXACAG2wHvnPmzMljGQAAAACy4cQmaHavs2DhqWprezfv1wKKUThhfxzDiNqgnh45VEqR59KZCwBwQ1YjHQAAAAAUXj42QXNDXd1oeb3BQpcB9BgbpQEASgGBLwAAAOAyJ0YkuB321tYO1+hRs3IKs5i9C7dlnL1rQzj+5f1DPq9qfL5cywIAIK8IfAEHGGMUi8WSrkWjUZerAQAAxczp7lwnNkGzg7AWpcbJ2bsAAJQSAl8gR8YYNTc3q6WlpdClAACAIpCpe9fJ7ty6utGyrD4EsUAS2czetWNMXY1C3tTjHAAAKBYEvkCOYrGYrbC3oaFBlmW5UBEAACiUbLt3c+3OpesWsCfT7F07mM8LACgVPQp8X331Vf3qV7/Shx9+qCeeeEI77bSTHn74Ye2yyy46+OCDna4RKBlNTU3y+/1J1yzL4gEiAABFLNe5ulJ23bt05wLuYfYuAKCSZB34/u53v9NZZ52lM844Q4sWLVIkEpEkrV+/XjfccIOee+45x4sESoXf708Z+AIAgOLl9FxdKXP3Lt25AAAAyIesA9/rrrtOM2bM0Nlnn61Zs2Z13n7QQQfpuuuuc7Q4AAAAwA2JRLujYS/du0B6xhiFE4m8XiMcz+/5AQAoVlkHvu+//74OPfTQbrfX1dXpiy++cKImAAAAoGBynasr0b0LpGOM0QkLlzu6oRoAAPhS1oFv//79tXz5cg0ZMqTL7a+99pp23XVXp+oCAAAACsLnC+Uc+AJILZxIuBr2jqmrUcib24ZtAACUkqwD3/POO0+XXHKJmpub5fF4tHLlSs2dO1dNTU266qqr8lEjioQxRh3/ndmcTCyyycVqAAAAAJS6JQftpZAvv2FsyOul4x4AUFGyDnyvuOIKJRIJHXnkkQqHwzr00EMVCATU1NSk73//+/moEUXAGKNZUy/Tyg+WFboUAACArBljlEi0p1yPx8MuVgOUt0zzebeerRvyeVXj87lRFgAAFSPrwNfj8ejKK6/UpZdequXLl6utrU3Dhw9XbW1tPuora8YYmVjqB0ImGnexmvQ6IhHbYe/AYcNVFQjkuSL3GGMUi8VSrkejURerAQAA2TLGaMHCCY5uygaUI0c2UjPSiYuWa2lb6j+wAACA/Mo68N3C7/dr+PDhTtZSUYwxWjvjbUVXtBa6lKxdeM8jsgLVKderAoGyecmUMUbNzc1qaWkpdCkAAKCHEol222FvXd1oeb3BPFcEFJ9CbKTGbF0AAPIj68D3iCOOSBvm/eUvf8mpoEphYgnbYa9/cG95rOJ5IGQFqmVVpw58y0ksFrMd9jY0NMiyrDxXBABAeck0asEJW49rOOTgeWk3ZPN6g2Xzh2tgW+k6eMNxZzdSG1Eb1NMjh0pp/jsxWxcAgPzIOvDdb7/9urwfi8W0ePFiLV26VBMnTnSqrrKQbmTD1uMaBkwZK48/9dwqj8UDoXzJZlxDU1OT/H5/ymMty+LrBABAFgoxasHnC6UNfIFylU0HrxMbqRHmAgBQOFkHvrfffnvS26+++mq1tbXlXFC5yGZkg8fvkzdN4Iv8yHZcg9/vTxv4AgCA7GQzasEJjGtAJQsn7HXwjqmrUV+rirAWAIAS1uMZvts688wzNWbMGN16661OnbJwjJFiaXZqjmbexdnuyIZiG9dQTux07zKuAQCAnst1HEM2oxacwLgGYLN0Hbx05gIAUPocC3znzp2r6nKY62qM1Dxeapnn2CnTjWxgXEN+ZNu9y7gGAACy4/Q4BkYtAO4J+byq8fEKQwAAylXWge83v/nNLu8bY7Rq1Sq9+eabuuqqqxwrrGBiYfthb8PXJCvzExNGNrgv283WampqCHQBAMiCk+MYGLUAAAAAOCfrwLeurq7L+16vV8OGDdNPfvITHXXUUY4VVhSalkv+NIGuFZIICYse3bsAAORXruMYGLUA5M4Yo3Ai+YbRkhSOp14DAADlJavANx6Pq7GxUXvvvbe23377fNVUPPwhyV9T6CqQIzZbAwAgvxjHABSWMUYnLFxua1M2AABQ/rLaLczn8+moo47SF1984WgRd911l4YMGaLq6mqNHTtWb7zxhq37zZo1Sx6PRyeddJKj9QAAAFQ6Y4zi8XDaNwDFIZxI2A57x9TVKORl02gAAMpZ1iMdRowYoY8++ki77LKLIwX89re/1aRJkzRjxgyNHTtW06dP1/jx4/X++++rvr4+5f0++eQTNTU16ZBDDnGkDgAAAGzm9IZsANyz5KC9FPKlDnRDXjaNBgCg3GX9p93rrrtOTU1NevbZZ7Vq1Sq1trZ2ecvWbbfdpvPOO0+NjY0aPny4ZsyYoVAopObm5pT3icfjOuOMM3TNNddo1113zfqaAAAAlSxT924s9m/bYS8brgHFJeTzqsbnS/lG2AsAQPmz3eH7k5/8RD/+8Y917LHHSpJOOOGELg8WjDHyeDyKx+O2Lx6NRrVgwQJNnjy58zav16tx48Zp7ty5aWupr6/XOeeco1dffdX29QAAACpdtt27mTZkY8M1IL1Mm6k5gQ3ZAADA1mwHvtdcc40uuOAC/fWvf3Xs4uvWrVM8Hle/fv263N6vXz+99957Se/z2muv6f7779fixYttXSMSiSgSiXS+35MuZAAAgHKRSLRn1b1rWX0IdIEUMoa5Rjpx0XItbWt3rygAAFDxbAe+xhhJ0mGHHZa3YjLZsGGDzjrrLN17773q27evrfvceOONuuaaa/JcGQAAqGTGGCUSpRHobL3ZGt27QM8ZY3TCwuW2N0tzAxuyAQAAKctN25x+wN+3b1/5fD6tWbOmy+1r1qxR//79ux3/4Ycf6pNPPtHxxx/feVviv39Rr6qq0vvvv6/ddtuty30mT56sSZMmdb7f2tqqhoYGJz8MFIAxRrFYLOV6NBp1sRoAQCUr5Q3OfL5Q2sAXqGSZunfD8YTtsHdEbVBPjxwq5fnvJ2zIBgAApCwD36985SsZH0B8/vnnts/n9/s1evRozZ49WyeddJKkzQHu7NmzdfHFF3c7fo899tCSJUu63DZlyhRt2LBBd9xxR9IgNxAIKBAI2K4Jxc8Yo+bmZrW0tBS6FABAiXOiMzceD5dk2MtmayhXjszMzXIUw5KD9lLIl7qzliAWAAC4KavA95prrlFdXZ2jBUyaNEkTJ07U/vvvrzFjxmj69OnauHGjGhsbJUlnn322dtppJ914442qrq7WiBEjutx/u+22k6Rut6N8xWIx22FvQ0ODLMvKc0UAgFKUj87cTCMSignjGlCOCjFmYUxdjfpaVfx/AgAARSOrwPfUU09VfX29owWccsopWrt2raZOnarVq1drv/320/PPP9+5kdunn34qL3OokEJTU5P8fn/KdcuyePANAEgqm83L7GCDM6Dwwgn7YxbssDOKge5dAABQbGwHvvl8EHPxxRcnHeEgSXPmzEl73wceeMD5glAy/H5/2sAXAAA7nOjMpWMWKC6ZxizYQZgLAABKke3A1xiTzzoAAAAKhs3LgPIT8nlV4/MVugwAAADX2Q58E7lufAAAAAAAAAAAyCuG4wIAAAAAAABAmSDwBQAAAAAAAIAyQeALAAAAAAAAAGXC9gxflD9jjDoikaRrscgml6sBACB3xhglEu1J1+LxsMvVAHCCMUbhFPuLhOPsOwIAAEDgC0mbHzjPmnqZVn6wrNClAADKXLoQ1unrLFh4qtra3s37tQC4wxijExYu1/zWjYUuBQAAoGgR+EKS1BGJ2Ap7Bw4brqpAwIWKAADlaHMIO0Hr1y8sdCmd6upGy+sNFroMoOyl68y1KxxP2Ap7x9TVKORleh0AAKhMBL7o5sJ7HpEVqE66VhUIyOPxuFwRAKBcJBLtroe9tbXDNXrUrJS/v7zeIL/bgDzLR2fukoP2UsiXPNQNeb38vwYAABWLwBfdWIFqWdXJA18AAJxyyMHz5POF8n4dAl2g8MIJe525do2pq1Ffq4r/2wAAAEkQ+AIAgILw+UKuBL4Aiku6zly76OAFAABIjcAXAAA4Kt2mbPF42OVqABSbkM+rGp+v0GUAAACULQLfCmGMUUckknI9FtnkYjUAgHJVjJuyAQAAAEAlIfCtAMYYzZp6mVZ+sKzQpdhmjFEsFku6Fo1GXa4GAGCX3U3Z6upGy+sNulARADcYYxROJFKuh+Op1wAAAOAsAt8K0BGJ2A57Bw4brqpAIM8VpWeMUXNzs1paWgpaBwCgu3TjGqSuIxvSbcrGRmpA8cgU1mY+gXTiouVa2pb6ZwMAAADcQ+BbYS685xFZgeqU61WBQMGfgMdiMVthb0NDgyzLcqEiAICU/bgGNmUD8ivnoFZyPawdU1ejkDe3DdsAAACQHoFvhbEC1bKqUwe+xaapqUl+vz/pmmVZBQ+nAaCS2B3XIDGyAcg3Y4xOWLhc81s3FrqUTiNqg3p65FApzcOzkNfL4zcAAIA8I/BFUfP7/SkDXwBA4aQb1yAxsgHIlZ2ZuE6GvXbC2kwIcwEAAIoDgS8AAMga4xqA/Mm2e3fJQXsp5MttTAJhLQAAQPkg8IXrjDGKxWIp16PRqIvVAAC2yGZDNgD5E07Y794dU1ejvlYVYS0AAAA6EfjCVcYYNTc329qUDQDgnExh7uYN2U5VW9u7LlYFlB5HNkrLIBz/8vyZunfpzAUAAMC2CHzhqlgsZjvsbWhokGVZea4IAEqf22EuG7KhFDkS1BrpxEXLtbQt9f83p4V8XtX4fK5dDwAAAKWPwBcF09TUlHZDNsuy6FgBgAw2h7kTtH79QkfOV1s7XKNHzUr785cN2eC2nMPaAgS1ThhTV6OQN7fZvAAAAKg8BL4oGL/fnzbwBQBklki02w57CXPhtlLtqs1kRG1QT48cKuX5vwrjGgAAANATBL4AAJSJQw6eJ58vlHKdMBduMsbohIXLbW8+5ganglqCWAAAABQzAl8AAMqEzxdKG/gCbgonEo6GvU6EtQS1AAAAqAQEvj1kjJGJpX6JoonGXawGAFDO0m3KFo+HXa4G+FK6kQ3h+Je3LzloL4V8uc2iJawFAAAA7CHw7QFjjNbOeFvRFa2FLgUAUOac3pQNcEo2IxtCPq9qfD4XqgIAAABA4NsDJpawHfb6B/eWx2J3ZQAoNem6at0Uj4dthb11daPl9QZdqAjFzpGN0mwIx+2NbBhTV6OQl8dCAAAAgFsIfHM0YMpYefypO1Y8Fi8/BIBSU6xdtek2ZWNDNkiF2ygt3cgGRjEAAAAA7iLwzZHH75M3TeALACg9iUR70YW9dXWjZVl9CM6QltMbpdkxpq5Gfa0qvjcBAACAIkHgCwBAGum6at1EBy+kzOManN4ozQ46eAEAAIDiQuALACgrTszejcfDnf/2+UJFEfgC2Y5rYKM0AAAAoDIR+AIAykaxzt4FnJDNuAY2SgMAAAAqF4EvAKBkZOrejcfDjoa9dXWj5fUGHTsf4JRM4xoYswAAAABULgJfAEBJyLZ714nZu8zNRbFiXAMAAACAVAh8AQAlIZFotx321tWNlmX1IawFAAAAAFQcAl8AQMnJ1L1LZy4AAAAAoFIR+AIASo7PF8p5XANQjIwxCicSSdfC8eS3AwAAAMDWCHwBAEXBzoZsQDkzxuiEhcs1v3VjoUsBAAAAUMIIfAEABZfthmxAsUnXmWtXOJ6wFfaOqatRyOvN6VoAAAAAyheBLwCg4LLdkM3rDea5IlQKJ4JaGenERcu1tC11h3q2lhy0l0K+5KFuyOtlRjUAAACAlAh8AQBFhQ3Z4JZiHaEwpq5Gfa0qvs8BAAAA9AiBbxkwxqgjEkm5HotscrEaAEgu3YzerefzsiEb3BJO2BuhYNeI2qCeHjlUyjGnpYMXAAAAQC4IfEucMUazpl6mlR8sK3QpAJASM3qRLUdGLWQQjn95/nQjFOwiqAUAAABQDAh8S1xHJGI77B04bLiqAoE8VwSglKTrunVSPB62FfYynxdSYUYthHxe1fh8rl0PAAAAAPKFwLeMXHjPI7IC1SnXqwIBOo8AdCpU1226Gb3M54Xk/KiFTMbU1Sjkza27FwAAAACKBYFvGbEC1bKqUwe+ALC1RKLd9bC3rm60LKsPoS5sc2LUQiaMYgAAAABQTgh8AQBpu26dRAcvssWoBQAAAADIDoEvHGWMUSwWS7kejUZdrAaobJnm88bj4c5/+3whVwJfQMq8IdvWm6kBAAAAALJTuYFvdOPmt263h7vfBluMMWpublZLS0uhSwEqXqHm8wKZFGJDNgAAAACoJBUb+PofPEqqiRS6jLISi8Vsh70NDQ2yLCvPFQGVK5v5vHV1o+X1BvNcESpJug7ecNz+hmxspgYAAAAA2avYwDejhq9JFi9v7qmmpib5/f6U65ZlMccTcEmm+bzM1UU2Mo1jkJFOXLRcS9tSjxPZItOGbGymBgAAAADZq9jAN3r6M9Jee6c+wApJPMnsMb/fnzbwBeAe5vPCKU6OYxhTV6O+VhWBLgAAAAA4rGIDX1nVkr+m0FUAAFA07GymZjfsHVEb1NMjh0op8ly6dwEAAAAgPyo38AUAAJ2y7d5lHAMAAAAAFCcCXwAAoHAiu83UGMcAAAAAAMWJwBcAAHRB9y4AAAAAlC4CXwAA0EXI51WNz1foMgAAAAAAPZC6fQcAAAAAAAAAUFLo8AUAoAIYYxROJFKuh+Op1wAAAAAApYPAFwCAEpcpzJWRTly0XEvb2t0rCgAAAABQEAS+AFCijDFKJJIHePF42OVq0FMZw9qMJ3A2zB1TV6OQl4lPAAAAAFCqCHwBoAQZY7Rg4QStX7+w0KUgB8YYnbBwuea3bnTleiNqg3p65FDJk/qYkNcrjyfNAQAAAACAokbgCwAlKJFotxX21tWNltcbdKEi9EQ4kXAs7CXMBQAAAABIBL4AUPIOOXiefL5Q0jWvN0jAVyKWHLSXQr6ej1IgzAUAAAAASAS+AFCU0s3nlbrO6PX5QikDX5SOkM+rGp+v0GUAAAAAAEocgS8AFBnm8wIAAAAAgJ4i8AWAImN3Pq/EjN5CMsYonEjkdI5wPLf7AwAAAACwLQJfAChi6ebzSszoLRRjjE5YuNyxDdcAAAAAAHAKgW+RM8aoIxJJuR6LbHKxGgBOYD5v6QsnEo6GvWPqahTy9nzDNgAAAAAAtiDwLWLGGM2aeplWfrCs0KUAcAjzecvPkoP2UsiXW1gb8nrp1AYAAAAAOILAt4h1RCK2w96Bw4arKhDIc0UAcsV83vIT8nlV4/MVugwAAAAAACQR+JaMC+95RFagOuV6VSCQ9+4wY4xisVjK9Wg0mtfrA8Ug0ziGTLYe18B8XgAAAAAA4DQC3xJhBaplVacOfPPNGKPm5ma1tLQUrAag0Jwex8B8XgAAAAAA4DQCX9gSi8Vsh70NDQ2yLCvPFQHOs7OZmlNhL+MaAAAAAABAPhD4ImtNTU3y+/0p1y3L4mXoKDqZwtzN3bunqq3tXVvnyzSOIRPGNQAAAAAAgHwg8C0gY4w6IpGU67HIJhersc/v96cNfAEn5Tozd8s5sglzM6mrGy3L6kNgW8KMMQonEj2+fzje8/sCAAAAAJBPBL4FYozRrKmXaeUHywpdClBQ6QJdp4NaO2prh2v0qFlpw1y6c0ubMUYnLFyu+a0bC10KAAAAAACOI/AtkI5IxHbYO3DYcFUFAnmuCHCe02MUckWYWxkyde+G4wnHwt4xdTUKeb2OnAsAAAAAACcQ+BaBC+95RFagOuV6VSDgSgBljFEsFku6Fo1G8359lJfNYe4ERzY5sxPU2kGYW/6y7d5dctBeCvl6HtiGvF6+pwAAAAAARYXAtwhYgWpZ1akDXzcYY9Tc3KyWlpaC1oHSkal7Nx4P2w57MwW6BLWwK5yw3707pq5Gfa0qvrcAAAAAAGWFwBeSpFgsZivsbWhokGVZLlSEYpZt9+4hB8+TzxdKuU6gi3zI1L1Ldy4AAAAAoBwR+KKbpqYm+f3+pGuWZRGQQIlEu+2wt65utCyrD983cF3I51WNz1foMgAAAAAAcBWBL7rx+/0pA19gW3TvAgAAAAAAFA8CXwA58flCaQNfAAAAAAAAuIfAF0A3djZkAwAAAAAAQPEh8AXQRbYbsgFuMsYonEikXA/HU68BAAAAAFAJCHwBdJHthmxebzDPFQGbGWN0wsLlmt+6sdClAAAAAABQtAh8AaTEhmxwk53uXbth75i6GoW8XqdKAwAAAACgZBD4AkiJDdnglmy7d5cctJdCvtSBbsjr5Y8RAAAAAICKROALACi4cCK77t2+VhWBLgAAAAAASRD4AgCKCt27AAAAAAD0HIEvAKCohHxe1fh8hS4DAAAAAICSxI42AAAAAAAAAFAm6PAFALjCGKNwIpF0LRxPfjsAAAAAAMgOgS8AIGfpwtzNB0gnLlqupW3t7hUFAAAAAEAFIvAFAKTlZpg7pq5GIS/ThgAAAAAA6CkCXwCoYG535o6oDerpkUMlT/L1kNcrjyfFIgAAAAAAyIjAF6hAxhglEskDvHg87HI1KBRjjE5YuFzzWzc6cr5MYa5EoAsAAAAAQL4R+AIVxhijBQsnaP36hYUuBQUWTiRsh72EuQAAAAAAlIaiCHzvuusu3XLLLVq9erX23Xdf/eIXv9CYMWOSHnvvvffqoYce0tKlSyVJo0eP1g033JDyeABdJRLttsLeurrR8nqDLlSEfMk0riEc/3JtyUF7KeRLPTuXMBcAAAAAgNJQ8MD3t7/9rSZNmqQZM2Zo7Nixmj59usaPH6/3339f9fX13Y6fM2eOTjvtNB144IGqrq7WzTffrKOOOkrvvPOOdtpppwJ8BEDpOuTgefL5QknXvN4gAV8Jy3ZcQ8jnVY3Pl+eqAAAAAABAvhV8K/TbbrtN5513nhobGzV8+HDNmDFDoVBIzc3NSY//9a9/re9973vab7/9tMcee+i+++5TIpHQ7NmzXa4cKH0+XyjlG2Fv8TPGaGM8nvRtXazDdtg7pq5GIW/Bfx0AAAAAAAAHFLTDNxqNasGCBZo8eXLnbV6vV+PGjdPcuXNtnSMcDisWi2mHHXbIV5kAUHSy6eBlXAMAAAAAAJWjoIHvunXrFI/H1a9fvy639+vXT++9956tc1x++eUaOHCgxo0bl3Q9EokoEol0vt/a2trzggGgSNjdcG1MXY36WlUEugAAAAAAVIiCz/DNxU033aRZs2Zpzpw5qq6uTnrMjTfeqGuuucblyoDCMcYokWhPuR6Ph12sBslk2kzNDrsbrtG9CwAAAABAZSlo4Nu3b1/5fD6tWbOmy+1r1qxR//7909731ltv1U033aSXXnpJ++yzT8rjJk+erEmTJnW+39raqoaGhtwKB4qUMUYLFk7Q+vULC10KUsh2MzU72HANAAAAAABsUdBdevx+v0aPHt1lw7UtG7AdcMABKe/305/+VNdee62ef/557b///mmvEQgE1Lt37y5vQKkyxigeD6d8i8X+bTvsrasbLa83mOeKsS27oxjsYsM1AAAAAACwtYKPdJg0aZImTpyo/fffX2PGjNH06dO1ceNGNTY2SpLOPvts7bTTTrrxxhslSTfffLOmTp2qRx99VEOGDNHq1aslSbW1taqtrS3YxwHkW7bdu4ccPE8+Xyjlutcb5KX+eZBpXIPdUQx2MbIBAAAAAABsreCB7ymnnKK1a9dq6tSpWr16tfbbbz89//zznRu5ffrpp/Ju1b129913KxqN6tvf/naX80ybNk1XX321m6UDrkok2rPq3rWsPgSBLst2XAOjGAAAAAAAgNMKHvhK0sUXX6yLL7446dqcOXO6vP/JJ5/kvyCgyNG9W5yyGdfAKAYAAAAAAJAPRRH4AsiOzxdKG/ii8DKNa2AUAwAAAAAAyAcCXwDIQroZvVvP52VcAwAAAAAAKAQCXwCwKdsZvQAAAAAAAG5jgCQA2GR3Ri/zeQEAAAAAQKHQ4QsAPZBuRi/zeQEAAAAAQKEQ+AJADzCjFwAAAAAAFCNecwwAAAAAAAAAZYIOXwD4L2OMwolEyvVwPPUaAAAAAABAMSDwBQBtDntPWLjc1qZsAAAAAAAAxYrAF3CBMUaJRHtO54jHww5Vg2TCiYTtsHdMXY1CXibiAAAAAACA4kPgWyGMMYrFYinXo9Goi9WUn3SBrjFGCxaeqra2d12uCj215KC9FPKlDnRDXq88Ho+LFQEAAAAAANhD4FsBjDFqbm5WS0tLoUspS5sD3Qlav36hK9erqxstrzfoyrXKSTbzeUM+r2p8PjfKAgAAAAAAcBSBbwWIxWK2w96GhgZZlpXnispLItFuK+ytrR2u0aNm5dwZ6vUGK6q7NFNQa+8k0omLlmtpW25jNQAAAAAAAIodgW+FaWpqkt/vT7luWVZFhYlOO+TgefL5QknXKi2odUIhNlJjPi8AAAAAAChlBL4Vxu/3pw180V2mDde23kzN5wulDHyRvWw2UrNjRG1QT48cKqXJ3ZnPCwAAAAAAShmBLypapjCXDdeKR6aN1OwgzAUAAAAAAOWOwDePjDHqiESSrsUim1yuBttybrO1anm926tXrxGKRj3yePjaOiUSj2uQ10iSfLGofIncNlJL/r8RAAAAAIDULMuSj429UUIIfJMwxsjEUm8SZaLxzn93bNokT5IQysho1rTLtfaTj/JSI3Jnd7M1KfmGa8YYrVnzb61fv0GS5PF49cknn+Sj1IqVMEY39N7c1btqxQp56c4FAAAAABTAdtttp/79+/OqUZQEAt9tGGO0dsbbiq5otXX8L88/U3ET6/H1Bg4brqpAoMf3hzPSbbYmJd9wbdWqVWpt3ah+/forFArxQz8P4sYotnFzx/SQmmr5+BwDAAAAAFxkjFE4HNa//vUvSdKAAQMKXBGQGYHvNkwsYTvsXbvpnxnD3h2H7KpTr7lZnhS7RFUFAgSFRSDbzdbi8bi++OIL1dfXq0+fPnmsrLLFjZEntnmkQ3U1gS8AAAAAwH3BYFCS9K9//Uv19fWMd0DRI/BNY8CUsfL4u/8n7ti0qbOz98J7HpEVqE55DgLdwsm0IVs8Hu7xuWOxzUF/KGQ/JAYAAAAAAKVpy/P/WCxG4IuiR+CbhsfvkzdJ4OtJ+Do7e61Atazq1IEvCsO5DdnSI8wHAAAAAKD88fwfpYTAF2Upmw3Z6upGy+sN5rkipGKMUaotEhPGuFoLAAAAAABAqfMWugAg3w45eJ4OP2xJyrfRo37LX+oKxBij5eGIlm5oT/r2btumQpdYUN/5znd00kknOX7ep556SkOHDpXP59MPf/hDx89fTB544AFtt912hS6ji22/rocffnjRfB3s1nLooYfq0UcfzX9BqChf+9rX9Lvf/a7QZVQEj8ejp556qtBlALZdffXV2m+//QpdhuPmzJkjj8ejL774QlJxPW6xW8v999+vo446Kv8FAdvgcQOQHoEvyt6WDdlSvVVa2Pud73xHHo+n29vy5csdv5YxRvE0bx3GKBxP1d/7pZDP2+WHVbInqlseMG/7tnr16s5jXnnlFR1//PEaOHBg0nPEYjFdfvnl2nvvvVVTU6OBAwfq7LPP1sqVK3P4LGT2ySefyOPxaPHixV1uv+OOO/TAAw84fr3//d//1be//W21tLTo2muvdfz8xeSUU07RBx98UOgy0nryySdL6uvwzDPPaM2aNTr11FMLXUoXCxcu1De+8Q1tt9126tOnj84//3y1tbV1OWb+/Pk68sgjtd1222n77bfX+PHj9dZbb9k6vzFGxxxzTMkHZXPmzNGoUaMUCAQ0dOhQWz9jHnvsMe23334KhUIaPHiwbrnllm7H3HXXXdpzzz0VDAY1bNgwPfTQQ13WDz/88KQ/o4877rjOY6ZMmaIrrrhCiUTm3wtIbvXq1brkkks0dOhQVVdXq1+/fjrooIN09913Kxzu+d4F2Ur1B8tk3wMHH3ywKzVdffXV8ng8uuCCC7rcvnjxYnk8Hn3yySe2z1VMf6irFE1NTZo9e3ahy8i7UnjcsrVNmzbpqquu0rRp0wpdShcbNmzQD3/4Qw0ePFjBYFAHHnig5s+f3+WYq6++WnvssYdqamq0/fbba9y4cZo3b17a8959993aZ5991Lt3b/Xu3VsHHHCA/vSnP+XzQ8mrTz/9VMcdd5xCoZDq6+t16aWXqqOjI+197Dzekjb/wWCfffZRdXW16uvrddFFF3Wubfl5vO1bTU1N5zFPPvmk9t9/f2233XaqqanRfvvtp4cffrjLNXjcAKRXsYGviRklovFubyYaL3RpQN4dffTRWrVqVZe3XXbZxdFrZOre3baDd3httUb0CiZ9Gxqyv/nh+++/3+Xjqq+v71zbuHGj9t13X911111J7xsOh7Vw4UJdddVVWrhwoZ588km9//77OuGEE3r8ediywV9P1NXVOd7l0dbWpn/9618aP368Bg4cqF69ejl6/mITDAa7fA8Uox122KGkvg4///nP1djYKK+3eB5CrFy5UuPGjdPQoUM1b948Pf/883rnnXf0ne98p/OYtrY2HX300dp55501b948vfbaa+rVq5fGjx9v6//p9OnTS/4PhB9//LGOO+44HXHEEVq8eLF++MMf6txzz9ULL7yQ8j5/+tOfdMYZZ+iCCy7Q0qVL9ctf/lK333677rzzzs5j7r77bk2ePFlXX3213nnnHV1zzTW66KKL9Ic//KHzmCeffLLLz+alS5fK5/Pp5JNP7jzmmGOO0YYNG0r6yXMhffTRRxo5cqT+/Oc/64YbbtCiRYs0d+5cXXbZZXr22Wf10ksvFbpESdLMmTO7fC8888wzrl27urpa999/v/7xj3+4dk04o7a2Vn369Cl0GXlXCo9btvbEE0+od+/eOuiggwpdShfnnnuuXnzxRT388MNasmSJjjrqKI0bN06fffZZ5zFf+cpXdOedd2rJkiV67bXXNGTIEB111FFau3ZtyvMOGjRIN910kxYsWKA333xTX//613XiiSfqnXfecePDclQ8Htdxxx2naDSqv/3tb3rwwQf1wAMPaOrUqSnvY+fxliTddtttuvLKK3XFFVfonXfe0UsvvaTx48d3rjc1NXV7Ljp8+PAujwl22GEHXXnllZo7d67efvttNTY2qrGxsctjFh43ABmYCrN+/Xojybz7wz+ZlstfSfsWj3QkPUe0vd3cOuE4c+uE40y0vd3ljyB7kUjETJs2zUybNs1EIpFCl+OIRCJhOjo2pnyLRNaal2bval6avavp6Njo+PXb29vNu+++a9pL4Ou/rYkTJ5oTTzwx6drPfvYzM2LECBMKhcygQYPMhRdeaDZs2NDlmNdee80cdthhJhgMmu22284cddRR5vPPPzfGGBOPx80NN9xghgwZYqqrq81XRowwtzz4iFm8fqNZvH6juffZPxlJ5ldPP2uG7zfSVAeDZp8xY83zC98yiUSi8xpPPfWUGTlypAkEAmaXXXYxV199tYnFYsYYYwYPHmwkdb4NHjzYGGPMX//6VyPJ/Oc//7H1eZBkfv/732c87o033jCSzIoVKzIe+/HHHxtJZtasWebQQw81gUDAzJw50xhjzL333mv22GMPEwgEzLBhw8xdd93VpZat3w477DBjTPev1WGHHWa+//3vm0svvdRsv/32pl+/fmbatGldakj3NdzyOdr67a9//atZt26dOfXUU83AgQNNMBg0I0aMMI8++miX88bjcXPzzTeb3Xbbzfj9ftPQ0GCuu+66zvVPP/3UnHzyyaaurs5sv/325oQTTjAff/xxxs/ZFvfff78ZPny48fv9pn///uaiiy7qXFuxYoU54YQTTE1NjenVq5c5+eSTzerVqzvXFy9ebA4//HBTW1trevXqZUaNGmXmz59vjDFm5syZpq6urvPYadOmmX333dc89NBDZvDgwaZ3797mlFNOMa2trV0+1q2/j/fZZx/z+OOP2/o4Ojo6zHe/+90v/w985Stm+vTpXY5J9nW95JJLOt9fuXKlOfbYY011dbUZMmSI+fWvf20GDx5sbr/99s5jJJl7773XnHTSSSYYDJqhQ4eap59+ust1lixZYo4++mhTU1Nj6uvrzZlnnmnWrl3bud7W1mbOOussU1NTY/r3729uvfXWbrVs61//+pfxeDxm6dKlXW534mdHuu+vTH71q1+Z+vp6E4/HO297++23jSTzj3/8wxhjzPz5840k8+mnn6Y8JpVFixaZnXbayaxatcr2z46tLV++3Jxwwgmmvr7e1NTUmP3339+8+OKLXY5Jdt66urrOnyHGGNPS0mJOPfVUs/3225tQKGRGjx5t/v73v9uu47LLLjN77bVXl9tOOeUUM378+JT3Oe2008y3v/3tLrf9/Oc/N4MGDer8uX3AAQeYpqamLsdMmjTJHHTQQSnPe/vtt5tevXqZtra2Lrc3NjaaM88809bHg67Gjx9vBg0a1O1zusXWv2ft/AyZM2eO+epXv9r5c/nyyy/v/F1sjDGPP/64GTFihKmurjY77LCDOfLII01bW5uZNm1a0t81W66b7P+PG7+Htvz8/8Y3vmFOPvnkztsXLVpkJHU5Nt3Pz4kTJ3b7+Oz8vsv0+TzssMPMRRddZC666CLTu3dv06dPHzNlypQuX7dNmzaZH//4x2bgwIEmFAqZMWPGdH5ujfnyd97zzz9v9thjD1NTU2PGjx9vVq5c2aWWVL9zTzvtNDNhwoQux0ajUdOnTx/z4IMPZvwYM32N3n77bXPEEUd0fs+cd955XX5X/PWvfzVf/epXTSgUMnV1debAAw80n3zyiTHmy6/fFlt+l95yyy2mf//+ZocddjDf+973TDQatf35Smfb6xmz+efWlseddmt46KGHzOjRo01tba3p16+fOe2008yaNWu6fMxbP37d9nGLMcZce+21ZscddzS1tbXmnHPOMZdffnlePhczZ840DQ0NJhgMmpNOOsnceuut3WrZ1nHHHdft5/8bb7xhxo0bZ/r06WN69+5tDj30ULNgwYIux/znP/8x559/vqmvrzeBQMDstdde5g9/+EPnerrHDJmEw2Hj8/nMs88+2+X2UaNGmSuvvDLl/bbkBC+99JKt62yx/fbbm/vuu8/28XYeKyZ7PHbiiSeaiRMndr6/adMmc9lll5lBgwYZv99vdtttt6zqeO6554zX6+3ymPruu+82vXv3TpkZ2Hm89fnnn5tgMJjV53Hx4sVGknnllVfSHjdy5EgzZcqULre5/bihlHMAFI8tP2/Wr1+f1+sUT3tOkfEP7i2PxaenGBljtGDhBM15ee+Ub6++NrYgdYWjHQV5Mw5tbub1evXzn/9c77zzjh588EH95S9/0WWXXda5vnjxYh155JEaPny45s6dq9dee03HH3+84vHNnfE33nijHnroIc2YMUNvL12qM753sa48/xytWzBPI3oFtUsoIEm6/4Zr9Yvbb9Mb8+erd8Cvn1x8YWfn3Kuvvqqzzz5bl1xyid5991396le/0gMPPKDrr79ekjpfjrWlQ2jbl2ftt99+GjBggL7xjW/o9ddfz/lzsn79enk8nqw6ba+44gpdcsklWrZsmcaPH69f//rXmjp1qq6//notW7ZMN9xwg6666io9+OCDkqQ33nhDkvTSSy9p1apVevLJJ1Oe+8EHH1RNTY3mzZunn/70p/rJT36iF198sXM93dfwwAMP1Pvvvy9J+t3vfqdVq1bpwAMP1KZNmzR69Gj98Y9/1NKlS3X++efrrLPO6qxLkiZPnqybbrpJV111ld599109+uij6tevn6TNXczjx49Xr1699Oqrr+r1119XbW2tjj76aEWj0Yyfr7vvvlsXXXSRzj//fC1ZskTPPPOMhg4dKklKJBI68cQT9fnnn+vll1/Wiy++qI8++kinnHJK5/3POOMMDRo0SPPnz9eCBQt0xRVXyLKslNf78MMP9dRTT+nZZ5/Vs88+q5dfflk33XRT5/rW38fvvPOOfvSjH+nMM8/Uyy+/nPFjSSQSGjRokB5//HG9++67mjp1qv7v//5Pjz32WMb7brFljMicOXP0u9/9Tvfcc4/+9a9/dTvummuu0YQJE/T222/r2GOP1RlnnKHPP/9ckvTFF1/o61//ukaOHKk333xTzz//vNasWaMJEyZ03v/SSy/Vyy+/rKefflp//vOfNWfOHC1cmH6zy9dee02hUEh77rlnl9tz/dmR7vtL2vzy6W27R7YWiUTk9/u7dB0Hg8HOmiVp2LBh6tOnj+6//35Fo1G1t7fr/vvv15577qkhQ4akPHc4HNbpp5+uu+66S/3790/7+Umlra1Nxx57rGbPnq1Fixbp6KOP1vHHH69PP/00q3Mcdthh+uyzz/TMM8/orbfe0mWXXdb5MsYto2HmzJmT8hxz587VuHHjutw2fvx4zZ07N+V9IpGIqquru9wWDAb1z3/+UytWrEh7zBtvvJGye/r+++/Xqaee2uXlm5I0ZswYvfrqqynrKQRjjGKbNhXkze7v93//+9/685//rIsuuqjb53SLbTvU0/0M+eyzz3Tsscfqq1/9qt566y3dfffduv/++3XddddJklatWqXTTjtN3/3ud7Vs2TLNmTNH3/zmN2WMUVNTkyZMmNDl1UQHHnhg2vrd/D1000036Xe/+53efPPNpLVk+vl5xx136IADDtB5553X+fE1NDSk/fgyfT63ePDBB1VVVaU33nhDd9xxh2677Tbdd999nesXX3yx5s6dq1mzZuntt9/WySefrKOPPrpLx3I4HNatt96qhx9+WK+88oo+/fRTNTU1da6n+517xhln6A9/+EOXl2e/8MILCofD+n//7/+l/Ril9F+jjRs3avz48dp+++01f/58Pf7443rppZd08cUXS5I6Ojp00kkn6bDDDtPbb7+tuXPn6vzzz0/7yoq//vWv+vDDD/XXv/61sztx6zE1dj5fucpUQywW07XXXqu33npLTz31lD755JO0v8+29etf/1rXX3+9br75Zi1YsEA777yz7r777qzryPS5mDdvns455xxdfPHFWrx4sY444ohu35/JvPbaa9p///273LZhwwZNnDhRr732mv7+979r991317HHHqsNGzZI2vxY6ZhjjtHrr7+uRx55RO+++65uuukm+Xw+SZkfMzzwwANpvy86OjoUj8eT/l7a8phgW9FoVPfcc4/q6uq07777Zvy4pc0dsrNmzdLGjRt1wAEH2LqP5MxjRWnz48Xf/OY3+vnPf65ly5bpV7/6lWprazvXhwwZoquvvjrl/efOnau99967y+Ot8ePHq7W1NWXHsp3HWy+++KISiYQ+++wz7bnnnho0aJAmTJiglpaWlLXcd999+spXvqJDDjkk6boxRrNnz9b777+vQw89tMtaMT5uAIpGXuPkIrQlSX//jQUmHulI+bb1X9O3RYdvYXV0bOzs3s30Nv/Nk9N+LXsq2V/2NkZiZvDlzxbkbWMklqbariZOnGh8Pp+pqanpfNu2e2uLxx9/3PTp06fz/dNOOy1lx9amTZtMKBQyf/vb34wxxnQkEmbx+o3m/5010Zx62mnGmC+7GLb+i+8f//hHI6nzc3nkkUeaG264ocu5H374YTNgwIDO95WkQ+i9994zM2bMMG+++aZ5/fXXTWNjo6mqqurWUZDuHNtqb283o0aNMqeffnra47bY0uG77V/pd9ttt26dStdee6054IADutxv0aJFXY5J1gl68MEHdznmq1/9qrn88stT1rTt1/A///lPl26rVI477jjz4x//2BhjTGtrqwkEAubee+9NeuzDDz9shg0b1uX/WiQSMcFg0Lzwwgtpr2OMMQMHDkzZcfHnP//Z+Hy+Ll2Z77zzjpFk3njjDWOMMb169TIPPPBA0vsn6/ANhUJdOnovvfRSM3bsWGNM9+/jLc455xxz2n+/j7N10UUXmW9961ud76fr8F22bJmR1NmhbIwx//jHP4ykbh2+W3c4tLW1GUnmT3/6kzFm8/fXUUcd1aWOlpaWzb//3n/fbNiwwfj9fvPYY491rv/73/82wWAwbYfv7bffbnbdddeMH3M2PzsyfX8ZY8xZZ51lrrjiipTrS5cuNVVVVeanP/2piUQi5vPPPzff+ta3jKQuP0+WLFlidtttN+P1eo3X6zXDhg3r7B5L5fzzzzfnnHNO5/t2fnbYsddee5lf/OIXac+7dYfvr371K9OrVy/z73//O+n5/vnPf5phw4aZefPmpbzm7rvv3u3n65afweFwOOl9fvX/2TvvsCqOr49/L+XCpRdBQBCUpqhYgwFjsAEaxRKjiCgolliDGhs20MQusfeusccSI6IiiiJBVBQsNCESDXZRkQ6X8/7Be/fHctuiIJrs53n2gTszO3tmZnfm7OyZOZs3k5aWFp0/f57EYjGlpaVRkyZNCADznAQHB5OZmRnduHGDysvL6fr161S/fn0CIGVZSEQUHx9PAGTK+vvvv5OKigrLeqiuqaz3feyDq5559epVAkDHjh1jhRsbGzNj/fTp05lwZX3IrFmzpPr19evXk46ODonFYkpISCAAcp8feauJAJCmpiZLB5H3PNX0OFTZYnPQoEHUpUsXIpK28FXWfxLJtr5ThLL6lOTZtGlTVpoZM2ZQ06ZNiahitYuqqiplZ2ez8u7atSsFBwcTUcWYB4AyMjJY16lfvz7zW9GYW1paSvXq1aM9e/YwYb6+vuTj46O0jMraaMuWLWRoaMiyQA8PD2csDF+9ekUAKDo6Wub5six8ra2tqazsfysyBwwYwMjKpb4UwdXCV5EMspCsNqm6+kqehW/79u1Zq56IiDp06FDjdeHr60vffPMNK97Hx0ehha9Ep1RmlSkWi0lXV5ex4D179iypqKgwz1NVFOkMRETHjh0jR0dHhdd0dXUld3d3ys7OprKyMtq7dy+pqKiQg4MDK90ff/xB2traJBAIyMLCgtEtFXH79m3S1tYmVVVV0tfXp/DwcKXnKKOqrqjMwjctLY0ASK0WqkyXLl1YekZVRo0aJdXX5efnEwA6ffq0zHO46FuLFy8mdXV1cnR0pDNnzlBcXBx17dqVHB0dZc5FFBYWkqGhIS1dulQq7s2bN6StrU1qamqkoaFB27dvl0rzsfUG3sKXpyb4WBa+arU6m/wpoyaAilC1rqWoEYhI4f6DXCzsPlc6fhUPVVUtufEqKqLPfs/F2qBz584s6wCJNdD58+exePFipKamIjc3F2VlZSgqKkJBQQG0tLSQmJjI2lupMhkZGSgoKICHhwcTVg6gtKQErVu3ZqV1dnZm/jc3NwcAPH/+HA0bNkRSUhJiY2MZi16g4gt6ZTlk4ejoCEdHR+a3m5sbMjMzsXLlSqkN/rlQWlqKgQMHgohkWlIoorKlQ35+PjIzMzFixAiMGjWKCS8rK4O+vn615apcd0BF/VW2/lTWhrIQi8VYtGgRDh8+jOzsbJSUlKC4uJhJn5KSguLiYnTt2lXm+UlJScjIyJDah7aoqAiZmZkKy/P8+XM8fvxYbt4pKSmwsrJiWU85OTnBwMAAKSkp+OKLLzBlyhSMHDkSe/fuRbdu3TBgwADY2trKvaaNjQ1L1sp1KOs+Bir60ar3sTzWr1+PHTt24OHDhygsLERJSQlnz+JpaWlQU1NDmzZtmDA7OzsYGhpKpa18L2hra0NPT48pR1JSEi5evMiy9JCQmZnJyNW+/f9WQxgZGbGeIVkUFhZKWcwAH9Z3KLu/AEg5AKtKs2bNsHv3bkyZMgXBwcFQVVXFDz/8gPr16zNWKIWFhRgxYgQ6dOiAAwcOQCwWY8WKFejZsyeuX7/OWKhU5uTJk7hw4QJu3bql8PrKyMvLQ2hoKMLDw/HkyROUlZWhsLCwWha+iYmJaN26NYyMjGTGN2jQAKmpqR8kpyxGjRqFzMxM9OrVC6WlpdDT00NQUBBCQ0OZup07dy6ePn2KL7/8EkSE+vXrIyAgAMuWLZO51/P27dvRokULuLi4SMWJRCKUl5ejuLhYZpvwVI9r166hvLwcfn5+KC4uZsUp6kNSUlLg6urK0qE6dOiAvLw8/PPPP2jZsiW6du2KFi1awMvLC56envjuu+9k9lVVWblyJcvS3Nzc/KOPQz///DOaNm2Kc+fOSe2Xqqz/dHBwUFrGqiirz4YNGwKo8DhfOY2rqyvCwsIgFotx584diMViqesXFxez9rbV0tJijYGVxzhlY66amhoGDhyIffv2YejQocjPz8fvv/+OgwcPciqjojZKSUlBy5YtWRboHTp0QHl5OWO1N2zYMHh5ecHDwwPdunXDwIEDGT1RFs2aNWOsQiVlvXPnDgBwrq8PRZEMAJCQkIDQ0FAkJSXh9evXzKqMhw8fwsnJSWn+aWlpGDduHCvMxcUFFy5c4CwHl7pISUmRsuJ2dXXFmTNn5MpWWFgIAFJ6wbNnzzBnzhxER0fj+fPnEIvFKCgoYMa8xMREWFpayn2WFOkMANCvXz+lFud79+5FYGAgGjRoAFVVVbRp0wa+vr5ISEhgpZPsaf/y5Uts3boVAwcORHx8vMJ9lB0dHZGYmIi3b9/it99+Q0BAAC5dusSpPSV8iK4IVNSRqqoq3N3d5aapDSeHXPSt8vJylJaWYs2aNfD09AQAHDhwAGZmZrh48SJrL18AOH78OGMVXhVdXV0kJiYiLy8PUVFRmDJlCho3boxOnToxaXi9gYdHPv/dCd9/CUSEHTt2KFwi8W9GVVVL4YTvx0SkrorkBV7KE9bStauDtrY2s3xPQlZWFnr16oWxY8di4cKFMDIywpUrVzBixAiUlJRAS0uLNYgSESr7Q337/8u0Tp46hQYNGqCcCBn5FS+XLYzZE5uVl9tLXmwkCnBeXh7mz5+Pb7/9VkpuWRNNinBxcZG7dEsRksnev//+GxcuXICenl61zq/8MiNZFrl161bW5BoAlmLOlapbFQgEAtaSbmVtKIvly5dj9erVWLVqFVq0aAFtbW1MmjSJ+VikTHnKy8tD27ZtsW/fPqk4ExMThefWhGIWGhqKwYMHIzw8HBEREQgJCcHBgwflvgwoqkNJe4WHh6NBgwasdBoaGkplOXjwIKZOnYqwsDC4urpCV1cXy5cvV+r1+X1QVg5vb28sXbpU6jxzc3NkZGS81zXr1auH169fs8Kq23dUpaaU88GDB2Pw4MF49uwZtLW1IRAI8Msvv6Bx48YAgP379yMrKwtxcXHMS8n+/fthaGiI33//HYMGDZLK88KFC8jMzJTa0qV///7o2LGjwu0TKjN16lRERkZixYoVsLOzg0gkwnfffcf6ICsQCKSW71f+mFsT9WRmZoZnz56xwp49ewY9PT25+QsEAixduhSLFi3C06dPYWJiwrxESupWJBJhx44d2Lx5M549ewZzc3Ns2bIFurq6Un1Afn4+Dh48iAULFsi8Xk5ODrS1tT+plzY1DQ38sPu3Ors2F+zs7CAQCJhteyRUbqOqKOpDlKGqqorIyEj8+eefOHfuHNauXYvZs2cjPj5eqRNYMzMzKR1kyZIlH3UcsrW1xahRozBz5kxs375dKi9F/WddkZeXB1VVVSQkJEjpD5Unp2W1q6Rv4fJc+fn5wd3dHc+fP0dkZCREIhG6d++u9LyaeGZ37tyJH374AWfOnMGhQ4cwZ84cREZG4ssvv5SZXtk4yKW+5KGioqKwT+Yig2QbC8n2XiYmJnj48CG8vLxq3CCnNutCHsbGxhAIBFJ6QUBAAF69eoXVq1fD2toaGhoacHV15fw818S9ZGtri0uXLiE/Px+5ubkwNzeHj48P0ydKkLwT2dnZ4csvv4S9vT22b9+O4OBguXkLhUKmD2vbti2uX7+O1atXY/PmzZxk46IrKrv/akonqLxtDgBGR1C0hZUyfUvST1aeADcxMUG9evVkfujetm0bevXqxdpaQoKKigpT161atUJKSgoWL17MmvD9FPUGHp5PBX7C9zOntLSU82SvlZWVwn0teT4MgUAALeHn+0glJCSgvLwcYWFhFUoigIOHDgEAxEQQE6GFszPOR0VhXmgoMgqKUSSu9GJo1QhCDQ3EpWegV5sKq62Gkihd7gNwmzZtkJaWJvUyWBl1dXVmHy9FJCYmVvvlTDLZe//+fVy8ePGDrUDq168PCwsL/PXXX/Dz85OZRigUAgCnMimichtKJrS47AcWGxuLPn36YMiQIQAqJt/T09MZRc3e3h4ikQhRUVEYOXKk1Plt2rTBoUOHYGpqWu3JcV1dXdjY2CAqKgqdO3eWim/atCkePXqER48eMVa+ycnJePPmDUuRdHBwgIODAyZPngxfX1/s3LmT036DVXFycoKGhgYePnyo0GpCHrGxsXBzc2NZ4yizcq6Mo6MjysrKcOvWLbRt2xZAhdVx1ZcpZbRp0wZHjx6FjY0N1NSk+yVbW1uoq6sjPj6esSx7/fo10tPTFZa7devWePr0KV6/fs1Y8nG575ydnREVFYX58+dL5ans/qoukheGHTt2QFNTk7HWLigogIqKCst6TvJb3iTXzJkzpWRq0aIFVq5cCW9vb84yxcbGYtiwYcw9mZeXh6ysLFYaExMTPHnyhPl9//59FBQUML+dnZ2xbds25OTkyLXyVYarqytOnz7NCouMjOS096CqqirzEeTAgQNwdXWVmkhTV1eHpaUlgIoX2l69eklZ+B45cgTFxcVMf1OVu3fvcram/1gIBAKoV/OD48fG2NgYHh4eWLduHSZOnCh3H1+uNG3aFEePHgURMc9MbGwsdHV1mTYWCATo0KEDOnTogHnz5sHa2hrHjx/HlClTIBQKqzWm1cU4NG/ePNja2kpZryrrPwFUu3xc6hOA1MdByf6nqqqqaN26NcRiMZ4/fy53n0tlKBtzgYoVUlZWVjh06BAiIiIwYMAATu8PytqoadOm2LVrF/Lz85n7MzY2FioqKqzVJa1bt0br1q0RHBwMV1dX7N+/X+6EryI+tL5MTEzw9OlTVpslJiZWK4/U1FS8evUKS5YsYXQYeXtHy8PR0RHXr1+Hv78/E1bVf4UyuNRF06ZNZd5/ihAKhXByckJycjJjyQlUtOuGDRvwzTffAAAePXqEly9fMvHOzs74559/kJ6eLtPKV5HOUF20tbWhra2N169f4+zZs1i2bJnC9BJL0epQ3XO46IpVdQKxWIy7d+8yz22LFi1QXl6OS5cuSe3NzxVXV1csXLgQz58/ZyyaIyMjoaenx8laWZ6+1aFDBwAV1umS/i0nJwcvX76EtbU1K48HDx7g4sWLOHnyJCeZZdX1p6g38PB8KvBeyf5FTJ06FbNmzZJ7BAYG8tsb8MjFzs6OWX5z4V4qlmzdjvWbNgEAkt8V4u67Qnw7cRKuX78O31Hf43ZSEh6kp+Hwtq14/eoltHV14T8xCCuCZ+Lk/l/x6K+/kJJ4C79t2YS9/++cjAvz5s3Dnj17MH/+fNy7dw8pKSk4ePAg5syZw6SRvKxIJp4AYNWqVfj999+RkZGBu3fvYtKkSbhw4QLGjx/PnJeXl4fExERGYX/w4AESExOZr82lpaX47rvvcOPGDezbtw9isRhPnz7F06dPP8gSY/78+Vi8eDHWrFmD9PR03LlzBzt37sQvv/wCADA1NYVIJGIcw7x9+/a9riNpw7Vr1+Kvv/7C3r17sen/21AR9vb2jKVWSkoKvv/+e5YVoKamJmbMmIHp06djz549yMzMxNWrVxmrKD8/P9SrVw99+vRBTEwMHjx4gOjoaPzwww/4559/lF4/NDQUYWFhWLNmDe7fv4+bN29i7dq1AIBu3bqhRYsW8PPzw82bN3Ht2jX4+/vD3d0d7dq1Q2FhISZMmIDo6Gj8/fffiI2NxfXr16WcinFFV1cXU6dOxeTJk7F7925kZmYy8uzmcB/b29vjxo0bOHv2LNLT0zF37txqvZg1adIE3bp1w+jRo3Ht2jXcunULo0ePhkhUve1pxo8fj5ycHPj6+uL69evIzMzE2bNnMXz4cIjFYujo6GDEiBGYNm0aLly4gLt372LYsGEyl99XpnXr1qhXrx7LISKX+y44OBjXr1/HuHHjcPv2baSmpmLjxo14+fKl0vsLqHBMosjaBgDWrVuHmzdvIj09HevXr8eECROwePFixjrXw8MDr1+/xvjx45GSkoJ79+5h+PDhUFNTY16gsrOz0aRJE8bixczMDM2bN2cdANCwYUOlVoyVsbe3x7Fjx5CYmIikpCQMHjxYapK5S5cuWLduHW7duoUbN25gzJgxrEkWX19fmJmZoW/fvoiNjcVff/2Fo0ePMg7XqsouizFjxuCvv/7C9OnTkZqaig0bNuDw4cOYPHkyqx4rL8l++fIlNm3ahNTUVCQmJiIoKAhHjhzBqlWrmDTp6en49ddfcf/+fVy7dg2DBg3C3bt3sWjRIikZtm/fjr59+8r9mBYTE8OaOODhzoYNG1BWVoZ27drh0KFDSElJQVpaGn799VekpqZWa1XJuHHj8OjRI0ycOBGpqan4/fffERISgilTpkBFRQXx8fFYtGgRbty4gYcPH+LYsWN48eIF0/fa2Njg9u3bSEtLw8uXLxVuPQbUzThUv359TJkyBWvWrGGFK+s/JeWLj49HVlYWXr58qdQyWll9Snj48CGmTJmCtLQ0HDhwAGvXrkVQUBCAig+bfn5+8Pf3x7Fjx/DgwQNcu3YNixcvRnh4uMLrV0bRmCth8ODB2LRpEyIjI+V+rK4KlzbS1NREQEAA7t69i4sXL2LixIkYOnQo6tevjwcPHiA4OBhxcXH4+++/ce7cOdy/f/+9x/MPra9OnTrhxYsXWLZsGTIzM7F+/XpERERUS4aGDRtCKBQy4+PJkyfx008/VSuPiRMnYvv27di9ezfu37+Pn3/+Gbdv366WTsClLiSW1StWrMD9+/exbt06hds5SPDy8pJaTWdvb4+9e/ciJSUF8fHx8PPzY1lfuru74+uvv0b//v0RGRmJBw8eICIigrmeIp0BqNgCoEmTJgrlOnv2LM6cOYMHDx4gMjISnTt3RpMmTTB8+HAAFdbXs2bNwtWrV/H3338jISEBgYGByM7OZm0n0bVrV6xbt475HRwcjMuXLyMrKwt37txBcHAwoqOjOT8nkvpRpit26dIF4eHhCA8PR2pqKsaOHYs3b94w8TY2NggICEBgYCBOnDjB9HmVP7hXlb0qnp6ecHJywtChQ5GUlISzZ89izpw5GD9+PLOq7dq1a2jSpAmys7OZ85TpWw4ODujTpw+CgoLw559/4u7duwgICECTJk2kPjTt2LED5ubm6NGjh5R8ixcvZpw1p6SkICwsDHv37pX6YMzrDTw8CqjVHYI/QRinbTdvvncen5LTtn+bQzYuVHbaVlaWXycyfM6btUscqZSVl0sdK8LCyNzcnDRFInLr2o1+3ry1whnD39mU+DafEt/m07bwM9Sy/Zck1NAgPQMD8vTyopc5OVRWXk6lYjH9snIlOTo6krq6OpmYmJCXlxddunSJiKQdUxBJO0shIjpz5gy5ubmRSCQiPT09cnFxoS1btjDxJ0+eJDs7O1JTU2OcZyxdupRsbW1JU1OTjIyMqFOnTnThwgVW2SXXr3pIHCBInKfJOpQ5Oat8flXna0RE+/bto1atWpFQKCRDQ0P6+uuvWc51tm7dSlZWVqSiokLu7u6stpKgzIEDEdEvv/xC5ubmJBKJyMvLi/bs2cOqc1lO2169ekV9+vQhHR0dMjU1pTlz5pC/vz/r2mKxmH7++WeytrYmdXV1atiwIcv505MnT8jf35/q1atHGhoa1LhxYxo1ahTnjeg3bdrE3Dfm5uY0ceJEJu7vv/+m3r17k7a2Nunq6tKAAQPo6dOnRFTRBw4aNIisrKxIKBSShYUFTZgwgXk2ZTltU+aEpby8nFatWiX3PlZEUVERDRs2jPT19cnAwIDGjh1LM2fOlHKuoqhdHz9+TD169CANDQ2ytram/fv3k6mpKW3atIlJAyUOvoiI0tPTqV+/fmRgYEAikYiaNGlCkyZNYhwCvXv3joYMGUJaWlpUv359WrZsGSdHRNOnT6dBgwaxwpTdd0RE0dHR5ObmRhoaGmRgYEBeXl5MvLL7y93dnXWfy2Lo0KFkZGREQqGQnJ2dWU6HJJw7d446dOhA+vr6ZGhoSF26dKG4uDgmXvIMK3reZdW9tbU1hYSEyD3nwYMH1LlzZxKJRGRlZUXr1q2Tquvs7Gzy9PQkbW1tsre3p9OnT0u1aVZWFvXv35/09PRIS0uL2rVrxzg+4yI7UUU/KOmLGjduzMqfqOIZqfw8vHjxgr788kvS1tYmLS0t6tq1K129epV1TnJyMrVq1Yrps/v06UOpqalS105NTSUAdO7cOZmy/fPPP6Surk6PHj1SWAYe+Tx+/JgmTJhAjRo1InV1ddLR0SEXFxdavnw55ef/T2fi0odER0fTF198QUKhkMzMzGjGjBlUWlrhJDY5OZm8vLzIxMSENDQ0yMHBgeUc6Pnz5+Th4UE6Ojqs+1LWdYk+zjgkq/9/+/Yt1atXT0oPUdZ/pqWl0ZdffkkikUjqXHkoqk+iin5u3LhxNGbMGNLT0yNDQ0OaNWsWy4lbSUkJzZs3j2xsbJjxsl+/fnT79m0ikh7ziIiOHz9OVV/5FI25RBXtC4Csra2r5fxYWRvdvn2bOnfuzOhqo0aNYpyXPX36lPr27Uvm5uYkFArJ2tqa5s2bxzhikuW0rapjwKCgIEaH4lJfyti4cSNZWVmRtrY2+fv708KFC6WctimTYf/+/WRjY0MaGhrk6upKJ0+eZOmKypy2EREtWLCA6tWrRzo6OhQYGEg//PADffnllzVeF9u3bydLS0sSiUTk7e1NK1asUOi0jajCka5IJKI3b94wYTdv3qR27dqRpqYm2dvb05EjR8ja2prlfPbVq1c0fPhwMjY2Jk1NTWrevDmdOnWKiVekM0icEyri0KFD1LhxY+Z5Gz9+PEvGwsJC6tevH1lYWJBQKCRzc3Pq3bu3lNO2quN7YGAgWVtbk1AoJBMTE+ratavUmBYQEMCq+6pw0RVLSkpo7NixZGRkRKamprR48WIpnb+wsJAmT57MPDN2dna0Y8cOubLLIisri3r06EEikYjq1atHP/74I6tfktyflfs4LvrW27dvKTAwkAwMDMjIyIj69evHcsBMVNFfWFpa0qxZs2TKNnv2bLKzsyNNTU0yNDQkV1dXOnjwICtNXegNn/M8AM+nw8dy2iYgqrI5zL+c3Nxc6OvrI+3mTTi8p+l/aVER1gR8BwD4YfdvdbrMr6SkhLGgmTVrFrM0/N+MWFyA6EstAACd3O/UyR6+RUVFePDgARo1alTtfWXrGiJCRkExCsTK9+lz0tGEigILAhWAtxrn4all/vnnH1hZWeH8+fMKHZt9LJ4+fYpmzZrh5s2bUkvz/osUFBTA2NgYERERrD3leKrPjBkz8Pr1a2zZsqWuReHh+eh06tQJrVq1YlnP8/DIwsPDA2ZmZu/llLg2GDBgANq0aaN0Jc5/BXd3d3Tu3BmhoaF1Lcq/nrrQGz7neQCeTwfJvOTbt2+rvSVidfh8Nxzl4eGRCVVxplaVciJOk71aqipQEwj4CV0eno/MhQsXkJeXhxYtWuDJkyeYPn06bGxs8PXXX9e1aAAqtjnYvn07Hj58yE/4Arh48SK6dOnCT/bWAKamppgyZUpdi8HDw8PzyVBQUIBNmzbBy8sLqqqqOHDgAM6fP4/IyMi6Fo1h+fLl+OOPP+pajE+Ct2/fIjMzs1rbrPC8P7zewMOjGH7Cl4fnX0R1rHcBxRa8vPWuNIsWLZK5JyUAdOzYsdp7u/1XUOQBOiIi4r2dz9QFY8aMwa+//iozbsiQIZz2TFZGaWkpZs2ahb/++gu6urpwc3PDvn37Pimnm3379q1rET4ZevbsiZ49e9a1GP8Kfvzxx7oWgYfnvfgYY0Nd8/DhQ4WOnJKTkxknoJ8DPXr0QExMjMw4if+TTwGBQIDTp09j4cKFKCoqgqOjI44ePfrejrpqAxsbG0ycOLGuxfgk0NfX5+S/gqdm4PUGHh7F8BO+PDyfETVlvQvwFrzvw5gxYzBw4ECZcZWdUfCwUeTVukGDBh9PkBpgwYIFmDp1qsy4mlqO4+XlBS8vrxrJi4eHh4en9qmJsSE6OroGJap5LCwsFI7nFhYWH0+YGmDbtm0oLCyUGWdkZPSRpZGPSCTC+fPn61oMHh4eHp7PEH7Cl4fnM6EmrXcB3oL3fTAyMvqkXgI+F+zs7OpahBrD1NQUpqamdS0GDw8PD88nxH9hbFBTU/tXjeef2wdnHh4eHh6e6sJP+PLwfASUWeZygbfe5eHh4eHh4eHh4eHh4eHh4eFRBj/hy8NTy1TXMpcLvPUuDw8PDw8PDw8PDw8PDw8PD48s+AlfHp5aphyo0cle3nqXh4eHh4eHh4eHh4eHh4eHh0ce/ISvDIgIZcXFcuNLi4s+ojQ8/yaUWeZygbfe5eHh4eHh4eHh4eHh4eHh4eGRBz/hWwUiwsF50/E4PaWuReH5F6IiEECVn6zl4eHh4eHh4eHh4eHh4eHh4aklVOpagE+NsuJizpO9Fo5OUNPQqGWJeHh4eGoXGxsbrFq1ivktEAhw4sQJuemzsrIgEAiQmJhY67JxZdeuXTAwMPho16taB9HR0RAIBHjz5s1Hk0EeXGWJiopC06ZNIRaLP45gPDz/z6ZNm+Dt7V3XYvD8S/mU+mMACA0NRatWrZjfw4YNQ9++fRWe06lTJ0yaNKlW5aouynSDT5Gq98LH1hUUwVWW7du3w9PTs/YF4uGpRElJCWxsbHDjxo26FoWHh+cD4Cd8FTB2y6/4Yfdvco9B85fyS+v/AxAR8sVi1lEgFqOcCGIORzlRXReBxbBhwyD4/z2AKx8ZGRl1LRpnPseXDuDTewmVx5MnT9CjR4+6FuODOHbsGDw8PGBiYgI9PT24urri7NmztXY9Nzc3PHnyBPr6+rV2jZpm+vTpmDNnDlRVVetaFIbS0lIsWLAAtra20NTURMuWLXHmzBlWGhsbG5l92Pjx4zld4+DBgxAIBEonXD5lcnJy4OfnBz09PRgYGGDEiBHIy8tTeE5mZib69evHPBMDBw7Es2fPWGlu3rwJDw8PGBgYwNjYGKNHj5bK94cffkDbtm2hoaHBmsSqzOHDh9GqVStoaWnB2toay5cvZ8UHBgbi5s2biImJqX7heeTy4sULjB07Fg0bNoSGhgbMzMzg5eWF2NhYJo2s58fS0rIOpf7vsXr1auzatauuxfggsrKyMGLECDRq1AgikQi2trYICQlBSUlJXYumEB8fH6Snp9e1GJwpKirC3LlzERISUteisHj37h0mTZoEa2triEQiuLm54fr166w0oaGhaNKkCbS1tWFoaIhu3bohPj5ead7r16+HjY0NNDU10b59e1y7dq22ilHrPHz4ED179oSWlhZMTU0xbdo0lJWVKTxH2Ti8a9cumTqQQCDA8+fPAfzvfaPq8fTpUyaf0NBQqfgmTZow8UKhEFOnTsWMGTNquFZ4eHg+JvyErwLUNTShrin/4Cd7//0QEXrfzIDt5Tusw/NGOrKLS3E/vwh33xUqPJLzPr09n7t3744nT56wjkaNGtW1WDyfCGZmZtD4zFcvXL58GR4eHjh9+jQSEhLQuXNneHt749atW7VyPaFQCDMzs89mXLhy5QoyMzPRv3//uhaFxZw5c7B582asXbsWycnJGDNmDPr168dqt+vXr7P6rsjISADAgAEDlOaflZWFqVOnomPHjrVWho+Bn58f7t27h8jISJw6dQqXL1/G6NGj5abPz8+Hp6cnBAIBLly4gNjYWJSUlMDb2xvl5RVORR8/foxu3brBzs4O8fHxOHPmDO7du4dhw4ZJ5RcYGAgfHx+Z14qIiICfnx/GjBmDu3fvYsOGDVi5ciXWrVvHpBEKhRg8eDDWrFnzYRXBw6J///64desWdu/ejfT0dJw8eRKdOnXCq1evWOkWLFjAeoZqq1/kkY2+vv4nY2X6vqSmpqK8vBybN2/GvXv3sHLlSmzatAmzZs2qa9EUIhKJYGpqWtdicOa3336Dnp4eOnToUNeisBg5ciQiIyOxd+9e3LlzB56enujWrRuys7OZNA4ODli3bh3u3LmDK1euwMbGBp6ennjx4oXcfA8dOoQpU6YgJCQEN2/eRMuWLeHl5cVMZH5OiMVi9OzZEyUlJfjzzz+xe/du7Nq1C/PmzZN7Dpdx2MfHR+odzsvLC+7u7lL3dlpaGitd1fhmzZqx4q9cucKK9/Pzw5UrV3Dv3r0PrxAeHp66gf5jvH37lgBQ2s2bMuNLCgtpxcCetGJgTyopLPzI0lWf4uJiCgkJoZCQECouLq5rcT4KZWX5dD6qMZ2PakxlZfkflFd5eTnllZXJPZ4Xl1D9C7ekjrbRN+nszUS69SKHEt/mczrS8wqpvLy8hmrh/QkICKA+ffrIjAsLC6PmzZuTlpYWWVpa0tixY+ndu3esNFeuXCF3d3cSiURkYGBAnp6elJOTQ0REYrGYFi1aRDY2NqSpqUnOzs505MgR5tyLFy8SADp//jy1bduWRCIRubq6UmpqKusaJ06coNatW5OGhgY1atSIQkNDqbS0lIiIrK2tCQBzWFtbKyzvgwcPSCAQ0PXr11nhK1eupIYNG5JYLKadO3eSvr4+K/748eNUuYsMCQmhli1b0p49e8ja2pr09PTIx8eHcnNzmTSKyv/gwQOW3AAoICBAoexc6pSL7EREJ0+epHbt2pGGhgYZGxtT3759mThra2tauXIl8xsAHT9+nPkdHx9PrVq1Ig0NDWrbti0dO3aMANCtW7eYNHfu3KHu3buTtrY2mZqa0pAhQ+jFixdMfEREBHXo0IH09fXJyMiIevbsSRkZGUy8pH6OHj1KnTp1IpFIRM7OzvTnn38qrSN59VAVJycnmj9/Pqf8uMorqQPJvf369WsmzZYtW8jS0pJEIhH17duXwsLCWDJ+6D0lITw8nOzt7UlTU5M6depEO3fulJKlKuPHj6fvvvuOFZaRkUG9e/cmU1NT0tbWpnbt2lFkZCQrTVFREU2fPp0sLS1JKBSSra0tbdu2jYm/e/cu9ezZk3R1dUlHR4e++uorVr0pw9zcnNatW8cK+/bbb8nPz0/uOUFBQWRra6u0fy0rKyM3Nzfatm2bwn5QEdOnTyd7e3sSiUTUqFEjmjNnDpWUlDDxsvINCgoid3d35rdYLKalS5eSra0tCYVCsrKyop9//pmzDMnJyQSA1adFRESQQCCg7OxsmeecPXuWVFRU6O3bt0zYmzdvSCAQMG28efNmMjU1JbFYzKS5ffs2AaD79+9L5Sm5f6vi6+srdW+tWbOGLC0tWW106dIlEgqFVFBQwK3gPAp5/fo1AaDo6GiF6ar29xLKysooMDCQ6WscHBxo1apVUum2b99OTk5OJBQKyczMjMaPH8+SYcSIEVSvXj3S1dWlzp07U2JiIif5ExMTqVOnTqSjo0O6urrUpk0b5h7PysqiXr16kYGBAWlpaZGTkxOFh4cz575PH1iZmJgY+uqrr0hTU5MsLS1p4sSJlJeXx8RXHROJiPT19Wnnzp3M70ePHtGgQYPI0NCQtLS0qG3btnT16lUikn5WqvYTeXl5NHToUNLW1iYzMzNasWIFubu7U1BQEJOmqKiIfvzxR7KwsCAtLS1ycXGhixcvMvEvX76kQYMGkYWFBYlEImrevDnt37+fJbO7uztNnDiRpk2bRoaGhlS/fn0KCQnhVEfy6qEyy5Yto0aNGnHKS1b/sXLlSpZOJ6mn5cuXk5mZGRkZGdG4ceNYfe6ePXuobdu2pKOjQ/Xr1ydfX1969uwZE191bJalK/z0009kYmJCOjo6NGLECJoxY4bM9lIkh7L2kVzbysqK0QlWrFihVG/p2bMnTZ06lRV27do16tatGxkbG5Oenh59/fXXlJCQwErz+vVrGj16NJmampKGhgY1a9aM/vjjDyZekS6vjIKCAlJVVaVTp06xwtu0aUOzZ8+We57kHfz8+fNy07i4uLD6FLFYTBYWFrR48WJOshFx68uqPl9ERH369GHp5cr0HWWcPn2aVFRU6OnTp0zYxo0bSU9PT+47e3XHYSKi58+fk7q6Ou3Zs4cJk6WTVkXeGF6Vzp0705w5c5Sm+y9RWFhIycnJVPgZzBXxfLpI+sTKunltwFv48vxnITnWu5WPFrH/+6J5p0MzZH7dAplft8C5dg5ooKEOe21NNNcVVRw6mmiuUS73sFMrg6C0ACjJr/mjhraNUFFRwZo1a3Dv3j3s3r0bFy5cwPTp05n4xMREdO3aFU5OToiLi8OVK1fg7e3N7AG6ePFi7NmzB5s2bcK9e/cwefJkDBkyBJcuXWJdZ/bs2QgLC8ONGzegpqaGwMBAJi4mJgb+/v4ICgpCcnIyNm/ejF27dmHhwoUAwCwZ27lzJ548eSK1hKwqNjY26NatG3bu3MkK37lzJ4YNGwYVFe7dYGZmJk6cOIFTp07h1KlTuHTpEpYsWcLEKyq/lZUVjh49CuB/X9xXr16t9Jpc61QR4eHh6NevH7755hvcunULUVFRcHFx4XRuXl4eevXqBScnJyQkJCA0NBRTp05lpXnz5g26dOmC1q1b48aNGzhz5gyePXuGgQMHMmny8/MxZcoU3LhxA1FRUVBRUUG/fv0Y60IJs2fPxtSpU5GYmAgHBwf4+voqXf7GhfLycrx79w5GRkac0nOVVx6xsbEYM2YMgoKCkJiYCA8PD+YersyH3FMA8OjRI3z77bfw9vZGYmIiRo4ciZkzZyqVLyYmBu3atWOF5eXl4ZtvvkFUVBRu3bqF7t27w9vbGw8fPmTS+Pv748CBA1izZg1SUlKwefNm6OjoAACys7Px9ddfQ0NDAxcuXEBCQgICAwOZ9pMsMczKypIrV3FxMTQ1NVlhIpFIyupEQklJCX799VcEBgYqta5esGABTE1NMWLECIXpFKGrq4tdu3YhOTkZq1evxtatW7Fy5cpq5REcHIwlS5Zg7ty5SE5Oxv79+1G/fn0mvlOnTjKtaiXExcXBwMCA1X7dunWDioqK3OWyxcXFEAgELMt9TU1NqKioMHVbXFwMoVDI6hNFIhEAyK1/edeS1Yb//PMP/v77byasXbt2KCsr47TEt64hIpSXiOvkII7ju46ODnR0dHDixAkUFxdXu4zl5eWwtLTEkSNHkJycjHnz5mHWrFk4fPgwk2bjxo0YP348Ro8ejTt37uDkyZOws7Nj4gcMGIDnz58jIiICCQkJaNOmDbp27YqcnByl1/fz84OlpSWuX7+OhIQEzJw5E+rq6gCA8ePHo7i4GJcvX8adO3ewdOlSpt953z5QQmZmJrp3747+/fvj9u3bOHToEK5cuYIJEyZwziMvLw/u7u7Izs7GyZMnkZSUhOnTp3MeL6ZNm4ZLly7h999/x7lz5xAdHY2bN2+y0kyYMAFxcXE4ePAgbt++jQEDBqB79+64f/8+gIql/23btkV4eDju3r2L0aNHY+jQoVLL4Xfv3g1tbW3Ex8dj2bJlWLBgAbNK4kN5+/Yt5zGWKxcvXkRmZiYuXrzIWEhW3g6jtLQUP/30E5KSknDixAlkZWUp7D+rsm/fPixcuBBLly5FQkICGjZsiI0bN1ZbDmXtEx8fjxEjRmDChAlITExE586d8fPPPyuV78qVK1Jj9bt37xAQEIArV67g6tWrsLe3xzfffIN3794BqHiWe/TogdjYWPz6669ITk7GkiVLmO2blOnyki0D5FFWVgaxWFztsXrLli3Q19dHy5Yt5aZJSEhAt27dmDAVFRV069YNcXFxSmrqf3Dpy7igSN8BKt4vQkND5Z4fFxeHFi1asMZ3Ly8v5ObmyrWYfZ9xeM+ePdDS0sJ3330nFdeqVSuYm5vDw8ODtbWPhPv378PCwgKNGzeGn58fS9+T4OLiwm+/xMPzOVOr08mfILyF7+dPTVn45pWVybTelXV4J6SzLJNkftkrziMK0aubozhPRgllExAQQKqqqqStrc0cVa2xJBw5coSMjY2Z376+vtShQweZaYuKikhLS0vKInPEiBHk6+tLRGwLXwnh4eEEgKnLrl270qJFi1h57N27l8zNzZnfUGJlUpVDhw6RoaEhFRUVERFRQkICCQQCevDgARFxs5INCQkhLS0tlvXltGnTqH379tUuP1erIy55cpHd1dVVoYWkIgvfzZs3k7GxMete37hxI8u69aeffiJPT09Wno8eParoa9PSZF7zxYsXBIDu3LlDRP+zmK1sPXHv3j0CQCkpKXJll6DMwnfp0qVkaGjIsvypDvLklWfh6+PjQz179mTl4efnJ2Xh+6H3VHBwMDk5ObHiZ8yYofQ+09fXZ1mDyKNZs2a0du1aIiJKS0sjAFJWvxKCg4OpUaNGLKunysTHx5OjoyP9888/cq/n6+tLTk5OlJ6eTmKxmM6dO0cikYiEQqHM9IcOHSJVVVW5lq0SYmJiqEGDBozV+fta+FZl+fLl1LZtW+a3Mgvf3Nxc0tDQoK1bt8rNc+jQoTRz5ky58QsXLiQHBwepcBMTE9qwYYPMc54/f056enoUFBRE+fn5lJeXRxMmTCAANHr0aCKqsM5WU1OjZcuWUXFxMeXk5FD//v0JgFSfTCTfOmjz5s2kpaVF58+fJ7FYTGlpadSkSRMCIHUvGxoa0q5du+SW9VNBXFxGj2ZcrpNDXFzGWc7ffvuNDA0NSVNTk9zc3Cg4OJiSkpJYaaytrUkoFLJ0gNWrV8vMb/z48dS/f3/mt4WFhVwLvpiYGNLT02PGWQm2tra0efNmpbLr6urKvRdatGhBoaGhMuPetw+UMGLECOYZkBATE0MqKirMuCdL56hs4bt582bS1dWlV69eybyGIgvfd+/ekVAopMOHDzPxr169IpFIxFgg/v333zL7ua5du1JwcLDcsvXs2ZN+/PFH5re7uzt99dVXrDRffPEFzZgxQ24elVGke92/f5/09PRoy5YtnPLiauFrbW1NZWX/ewYGDBhAPj4+cvO9fv06AWBWpymz8G3fvj3LopSIqEOHDlLtpUgOLu3j6+tL33zzDSvex8dHod4isdq/fPmy3DREFVawurq6jAWvZEWHPP1LkS5PRHTs2DFydHRUeE1XV1dyd3en7OxsKisro71795KKiorU2PTHH3+QtrY2CQQCsrCwoGvXrsnNMzs7W+Y4MW3aNHJxcVEojzKq9mXKLHyV6TtERF26dGH0I1mMGjVKSjfOz88nAHT69GmZ51R3HCYiatq0KY0dO5YVlpqaSps2baIbN25QbGwsDR8+nNTU1FiW4KdPn6bDhw9TUlISnTlzhlxdXalhw4YsnZSIaPXq1WRjYyO3nP9FeAtfnprgY1n4qtX+lDIPz6fPnQ7NoKUq39JTS0Xls9mbkwudO3dmWTBoa2sDAM6fP4/FixcjNTUVubm5KCsrQ1FREQoKCqClpYXExES5+2RmZGSgoKAAHh4erPCSkhK0bt2aFebs7Mz8b25uDgB4/vw5GjZsiKSkJMTGxrKsIcViMUuO6tK3b1+MHz8ex48fx6BBg7Br1y507twZNjY21crHxsYGurq6LNkl+4pVp/xcqak8ExMTMWrUqPeSISUlBc7OzixLDldXV1aapKQkXLx4kWX5ICEzMxMODg64f/8+5s2bh/j4eLx8+ZKxfHr48CGaN2/OpJd3b1R2JFFd9u/fj/nz5+P333/nvHcfV3nlkZaWhn79+rHCXFxccOrUKVbYh95TKSkpaN++PSu+avvIorCwUMo6Jy8vD6GhoQgPD8eTJ09QVlaGwsJCxuIjMTERqqqqcHd3l5lnYmIiOnbsyFjlVcXFxQWpqakK5Vq9ejVGjRqFJk2aQCAQwNbWFsOHD8eOHTtkpt++fTt69OgBCwsLuXm+e/cOQ4cOxdatW1GvXj2F11fGoUOHsGbNGmRmZiIvLw9lZWXQ09PjfH5KSgqKi4vRtWtXuWn27NnzQTLKwsTEBEeOHMHYsWOxZs0aqKiowNfXF23atGEsiZo1a4bdu3djypQpCA4OhqqqKn744QfUr1+/WishRo0ahczMTPTq1QulpaXQ09NDUFAQQkNDpfIRiUQoKCio0bL+l+nfvz969uyJmJgYXL16FREREVi2bBm2bdvGsnqcNm0a67fkuVi/fj127NiBhw8forCwECUlJYxjvufPn+Px48dy792kpCTk5eXB2NiYFV5YWIjMzEylsk+ZMgUjR47E3r170a1bNwwYMAC2trYAKhwFjh07FufOnUO3bt3Qv39/Zqx43z6wsty3b9/Gvn37mDAiQnl5OR48eICmTZsqzSMxMRGtW7d+L+vWzMxMlJSUsMpgZGQER0dH5vedO3cgFovh4ODAOre4uJipb7FYjEWLFuHw4cPIzs5GSUkJiouLpXSmymMswB5z3pfs7Gx0794dAwYMeG9dQx7NmjVjORY1NzfHnTt3mN+SlUdJSUl4/fo1a6x2cnJSmn9aWhrGjRvHCnNxccGFCxc4y8GlfVJSUqR0AldXVymnpJUpLCwEAKmx+tmzZ5gzZw6io6Px/PlziMViFBQUsMZqS0tLKXkkKNLlAaBfv35SslZl7969CAwMRIMGDaCqqoo2bdrA19cXCQkJrHSdO3dGYmIiXr58ia1bt2LgwIGIj4+v9X2UFfVlXFCm7wBAVFRUDUjKprrjcFxcHFJSUrB3715WuKOjI6sPcXNzQ2ZmJlauXMmkreyc2dnZGe3bt4e1tTUOHz7MWgnFj9M8PJ83/IQvDw8ALVUVaH+op3p1LWDW45oR6H2uXQ20tbVZyzCBCmdGvXr1wtixY7Fw4UIYGRnhypUrGDFiBEpKSqClpcUsK5KFxINseHg4GjRowIqr6gCs8oSQZCJdoqTn5eVh/vz5+Pbbb6WuUVXp5YpQKIS/vz927tyJb7/9Fvv372dtp6CioiK1bLa0tFQqn6oTWQKBgCU3wK38XOGSJxfZFbVbTZCXlwdvb28sXbpUKk4yaevt7Q1ra2ts3boVFhYWKC8vR/PmzaU8eiu6N96HgwcPYuTIkThy5AhrmaAyuMr7oXzse0pCvXr18Pr1a1bY1KlTERkZiRUrVsDOzg4ikQjfffcdU2Zl91FN3GcmJiY4ceIEioqK8OrVK1hYWGDmzJlo3LixVNq///4b58+fx7FjxxTmmZmZiaysLHh7ezNhkjpWU1NDWloaM7GkiLi4OPj5+WH+/Pnw8vKCvr4+Dh48iLCwMCaNsuexJurIzMxMaoKmrKwMOTk5MDMzk3uep6cnMjMz8fLlS6ipqcHAwABmZmasuh08eDAGDx6MZ8+eQVtbGwKBAL/88ovM+peHQCDA0qVLsWjRIjx9+hQmJibMi3HVfHJycmBiYsI577pCoK4CiwVudXbt6qCpqQkPDw94eHhg7ty5GDlyJEJCQqQmeKvqAAcPHsTUqVMRFhYGV1dX6OrqYvny5cyWG8ru3by8PJibmyM6OloqjouDstDQUAwePBjh4eGIiIhASEgIDh48iH79+mHkyJHw8vJCeHg4zp07h8WLFyMsLAwTJ05Umq8y8vLy8P333+OHH36QimvYsCGAinu6tp9rZTKqqqoiISGBNekIgPnQunz5cqxevRqrVq1CixYtoK2tjUmTJikcYwH2mPM+PH78GJ07d4abmxu2bNnC+bya0Lvy8/Ph5eUFLy8v7Nu3DyYmJnj48CG8vLw++litrH3eB2NjYwgEAqmxOiAgAK9evcLq1athbW0NDQ0NuLq6ftSx2tbWFpcuXUJ+fj5yc3Nhbm4OHx8fqT5e8r5hZ2eHL7/8Evb29ti+fTuCg4Ol8qxXrx5UVVXx7NkzVvizZ88Ujm1VUdaXAR9vrK66pYqkbIrKU51xeNu2bWjVqhXatm2rVB4XFxeF2zMZGBjAwcEBGRkZrPDPZZzm4eGRzX92wldcWoLSoiKp8NJi6bC6hIhkKkASalqh+TdBRChQoMQWiN9fwZWJQAAItWs2z49IQkICysvLERYWxnxFrrrflbOzM6KiojB//nyp852cnKChoYGHDx8q/CKujDZt2iAtLU3qZbQy6urqzF5jXBk5ciSaN2+ODRs2oKysjDWhbGJignfv3iE/P5+xdk5MTKxW/lzKLxQKAYCz7Fzy5CK7pN2GDx/OsTT/o2nTpti7dy+KioqYCferV6+y0rRp0wZHjx6FjY0N1NSkh5VXr14hLS0NW7duRceOHQFUb0/Q9+XAgQMIDAzEwYMH0bNnT87n1YS8jo6OUvtLK9tvuipc2r9p06Y4efIkK6xq+8iidevWSE5OZoXFxsZi2LBhjGVPXl4ea7/dFi1aoLy8HJcuXZI5ee7s7Izdu3ejtLRUrpUvVzQ1NdGgQQOUlpbi6NGjrP2gJezcuROmpqZK27ZJkyYsizAAmDNnDt69e4fVq1fDysqKk0x//vknrK2tMXv2bCas8p60QMXzePfuXVZYYmIiUx/29vYQiUSIiorCyJEjOV23Kq6urnjz5g0SEhKYl7wLFy6gvLxcytJRFhJrzgsXLuD58+fo3bu3VBrJnoM7duxgJhCri6qqKvOh4sCBA3B1dWW9NGZmZqKoqOi9V0B8TAQCAQTCD/woXEc4OTnhxIkTStPFxsbCzc2NZe1Y2TJXV1cXNjY2iIqKQufOnaXOb9OmDZ4+fQo1NbVqr56R4ODgAAcHB0yePBm+vr7YuXMn0x9ZWVlhzJgxGDNmDIKDg7F161ZMnDjxvfvAynInJycr1DlMTEzw5MkT5vf9+/dZFm/Ozs7Ytm0bcnJyqm3la2trC3V1dcTHxzMTzK9fv0Z6ejrT77du3RpisRjPnz9nxqSqxMbGok+fPhgyZAiAio9a6enpnKxc35fs7Gx07twZbdu2xc6dO6u1EsDExARPnz4FETEfd6urd6WmpuLVq1dYsmQJ04/fuHGjWnlIxmp/f38mrLpjNZf2adq0qdR+5cruU6FQCCcnJyQnJ8PT05MJj42NxYYNG/DNN98AqNjH+uXLl0y8s7Mz/vnnH6Snp8u08lWky1cXbW1taGtr4/Xr1zh79iyWLVumMH15ebncPcaFQiHatm2LqKgo9O3bl0kfFRVVrT21lfVlgPQzLRaLcffuXaZvU6bvcMHV1RULFy7E8+fPGYvmyMhI6OnpcXoulY3DeXl5OHz4MBYvXsxJnsTERMYIQxZ5eXnIzMzE0KFDWeF37979LMZpHh4e2fxnJ3x/X/oT1D/x0hMRduzYgUePHtW1KB8dRZO15eJyFKHCuq1AXA4VyJg8I6DPrQzczSusTTH/VdjZ2aG0tBRr166Ft7c3YmNjsWnTJlaa4OBgtGjRAuPGjcOYMWMgFApx8eJFDBgwAPXq1cPUqVMxefJklJeX46uvvsLbt28RGxsLPT09BAQEcJJj3rx56NWrFxo2bIjvvvsOKioqSEpKwt27dxkHF5KXzg4dOkBDQwOGhoZK823atCm+/PJLzJgxA4GBgayv9+3bt4eWlhZmzZqFH374AfHx8SxnHFzQ1dVVWn5ra2sIBAKcOnUK33zzDUQikULrDy55cpE9JCQEXbt2ha2tLQYNGoSysjKcPn0aM2bMUFquwYMHY/bs2Rg1ahSCg4ORlZWFFStWsNKMHz8eW7duha+vL6ZPnw4jIyNkZGTg4MGD2LZtGwwNDWFsbIwtW7bA3NwcDx8+rJZTnfdh//79CAgIwOrVq9G+fXs8ffoUQIXVhr6+vsJza0LeiRMn4uuvv8Yvv/wCb29vXLhwAREREdXaGoZL+48ZMwZhYWGYNm0aRo4ciYSEBE73rpeXF3bv3s0Ks7e3x7Fjx+Dt7Q2BQIC5c+eyLL9sbGwQEBCAwMBArFmzBi1btsTff/+N58+fY+DAgZgwYQLWrl2LQYMGITg4GPr6+rh69SpcXFzg6OiIa9euwd/fH1FRUVIWyxLi4+ORnZ2NVq1aITs7G6GhoSgvL2c5jwQqXgJ37tyJgIAAmR8Z/P390aBBAyxevBiamppS23BILA65bM9RuX4ePnyIgwcP4osvvkB4eDiOHz/OStOlSxcsX74ce/bsgaurK3799VfWy5KmpiZmzJiB6dOnQygUokOHDnjx4gXu3bvHLKGsLLssmjZtiu7du2PUqFHYtGkTSktLMWHCBAwaNIjZ2iI7Oxtdu3bFnj17GAeNO3fuRNOmTWFiYoK4uDgEBQVh8uTJrGWf69atg5ubG3R0dBAZGYlp06ZhyZIlLAvNjIwM5OXl4enTpygsLGQmaZycnCAUCvHy5Uv89ttv6NSpE4qKirBz504cOXJEytFkTEwMGjduzMm6mkc5r169woABAxAYGAhnZ2fo6urixo0bWLZsGfr06aP0fHt7e+zZswdnz55Fo0aNsHfvXly/fh2NGjVi0oSGhmLMmDEwNTVFjx498O7dO8TGxmLixIno1q0bXF1d0bdvXyxbtgwODg54/Pgx4zS0quOpyhQWFmLatGn47rvv0KhRI/zzzz+4fv06+vfvDwCYNGkSevToAQcHB7x+/RoXL15ktlp43z5QwowZM/Dll19iwoQJGDlyJLS1tZGcnIzIyEisW7cOQMVzvW7dOri6ukIsFmPGjBmsj1q+vr5YtGgR+vbti8WLF8Pc3By3bt2ChYWF0u0ldHR0MGLECEybNg3GxsYwNTXF7NmzWZOnDg4O8PPzg7+/P8LCwtC6dWu8ePECUVFRcHZ2Rs+ePWFvb4/ffvsNf/75JwwNDfHLL7/g2bNntTbhm52djU6dOsHa2horVqzAixcvmDgu1pidOnXCixcvsGzZMnz33Xc4c+YMIiIiqrVFTsOGDSEUCrF27VqMGTMGd+/exU8//VStckycOBGjRo1Cu3bt4ObmhkOHDuH27dvVWtXApX1++OEHdOjQAStWrECfPn1w9uxZhds5SPDy8sKVK1cwadIkJsze3h579+5Fu3btkJubi2nTprF0Wnd3d3z99dfo378/fvnlF9jZ2SE1NRUCgQDdu3dXqssfP34cwcHBCrdgOnv2LIgIjo6OyMjIwLRp09CkSRPGsCA/Px8LFy5E7969YW5ujpcvX2L9+vXIzs5mbSfRtWtX9OvXj5nQnTJlCgICAtCuXTu4uLhg1apVyM/Pr5bBApe+rEuXLpgyZQrCw8Nha2uLX375BW/evGHilek7smSviqenJ5ycnDB06FAsW7YMT58+xZw5czB+/HhmlZYsvYjLOAxUbDFVVlbGfOSpzKpVq9CoUSM0a9YMRUVF2LZtGy5cuIBz584xaaZOncqsZnv8+DFCQkKgqqoKX19fVl4xMTHVfq54eHg+IWp1h+BPEMnmyIu/7ck4Z5N17J87jeWkqy6o7JBN2bFt27Y6l7emKC8vp1430jk7VPvQo6pDNi58zpu1K3JW9Msvv5C5uTmJRCLy8vKiPXv2SDk+iY6OJjc3N9LQ0CADAwPy8vJi4svLy2nVqlXk6OhI6urqZGJiQl5eXnTp0iUiku207NatWwSAcaBGRHTmzBlyc3MjkUhEenp65OLiwnIEcvLkSbKzsyM1NTWWgw9lbN++nQDIdBpx/PhxsrOzI5FIRL169aItW7ZIOW1T5mBEWfmJiBYsWEBmZmYkEAgY5xCK4JKnMtmJiI4ePUqtWrUioVBI9erVo2+//ZaJU+S0jYgoLi6OWrZsSUKhkFq1akVHjx5lOSwjIkpPT6d+/fqRgYEBiUQiatKkCU2aNIl5tiIjI6lp06akoaFBzs7OFB0dzbpOVSdoRP9zWHLx4kWl9VTVEYu7uzsBkDq41Pn7yCvr3t6yZQs1aNCARCIR9e3bl37++WcyMzNj4mvqnvrjjz/Izs6ONDQ0qGPHjrRjxw6lDotevXpFmpqalJqayoQ9ePCAOnfuTCKRiKysrGjdunVSjk0KCwtp8uTJZG5uTkKhkOzs7GjHjh1MfFJSEnl6epKWlhbp6upSx44dKTMzk1VHlZ/1qkRHRzP1bmxsTEOHDpXpkO3s2bMKnQK6u7srbGtZ/WBISIjS/mTatGlkbGxMOjo65OPjQytXrpRyujNv3jyqX78+6evr0+TJk2nChAmM0zaiCgc7P//8M1lbW5O6ujo1bNiQ5YxFmexEFe3n6+tLOjo6pKenR8OHD2ecFBH97/6s/OzMmDGD6tevT+rq6mRvb09hYWFSY9/QoUPJyMiIhEIhOTs7y3TsJ+/ZkrTrixcv6MsvvyRtbW3S0tKirl270tWrV6Xy8fT0pMWLFyssJw93ioqKaObMmdSmTRvS19cnLS0tcnR0pDlz5lBBQQGTrmp/X/n8YcOGkb6+PhkYGNDYsWNp5syZUn3Upk2bmP7I3NycJk6cyMTl5ubSxIkTycLCgtTV1cnKyor8/Pzo4cOHCmUvLi6mQYMGkZWVFQmFQrKwsKAJEyYwOtaECRPI1taWNDQ0yMTEhIYOHUovX75kzn+fPrAy165dIw8PD9LR0SFtbW1ydnamhQsXMvHZ2dnk6elJ2traZG9vT6dPn2Y5bSMiysrKov79+5Oenh5paWlRu3btKD4+nogUO20jqnDcNmTIENLS0qL69evTsmXLpPrekpISmjdvHtnY2DB1369fP7p9+zYRVfQJffr0IR0dHTI1NaU5c+aQv78/6zrKHFUpo/IYuHPnTpn9QHVeLTdu3EhWVlakra1N/v7+tHDhQimnbYqcYBIR7d+/n2xsbEhDQ4NcXV3p5MmTCsdmWQ5eFyxYQPXq1SMdHR0KDAykH374gb788stqyaGsfYgqdFBLS0sSiUTk7e1NK1asUOi0jajCea1IJKI3b94wYTdv3qR27dqRpqYm2dvb05EjR6Se61evXtHw4cPJ2NiYNDU1qXnz5nTq1CkmXpEuL2lbRRw6dIgaN25MQqGQzMzMaPz48SwZCwsLqV+/fmRhYUFCoZDMzc2pd+/eUvq3tbU1hYSEsMLWrl1LDRs2JKFQSC4uLlLjR0BAAKvuq8KlLyspKaGxY8eSkZERmZqa0uLFi6WeBWX6jizZq5KVlUU9evQgkUhE9erVox9//JFKS0uZeFl6EZdxmKjCcd7gwYNlxi1dupRsbW1JU1OTjIyMqFOnTnThwgVWGh8fH6ZsDRo0IB8fH8rIyGCl+fPPP8nAwIA1hvB83vMAPJ8OH8tpm4CoygY2/3Jyc3Ohr6+P2zGX0aTdF3LTqWlo1LmTrpKSEixatAhAxVc4yXJwWairq9e5vDVFvlgM28t3lCfkQHMdEX5vbQcoqJr3cchWVFSEBw8eoFGjRu+9ryzPx+enn37CkSNHcPv27boWhec/yKhRo5CamoqYmJi6FgVAheOm3NxcbN68ua5F+SQICAiAQCCotnU/T/W5d+8eunTpgvT0dKUW9zw8PDwfEw8PD5iZmUk5wqorBgwYgDZt2sjc9/a/iLu7Ozp37ozQ0NC6FuVfj4+PD1q2bIlZs2bVtSifFPw8AE9NIJmXfPv2bbVWt1SXT3xTg9pDTSiE+mf0gAqFQoUTvp8KpGTfXC5U3lv3Todm0FJl7wlWLi7E5SsVS1S//uoaVFTlb6z/PpO5PP8+JHuRrlu3jtkWgoentlmxYgU8PDygra2NiIgI7N69Gxs2bKhrsRhmz56NDRs2oLy8vFp7L/4bISJER0d/lL2leYAnT55gz549/GQvDw9PnVJQUIBNmzbBy8sLqqqqOHDgAM6fP4/IyMi6Fo1h+fLl+OOPP+pajE+Ct2/fIjMzE+Hh4XUtyr+ekpIStGjRApMnT65rUXh4eD6A/+yEL0/NQ0TofTMD13PzayxPLVUVaFfxeCuGCjRRzMRX9YjL89+jWbNmUs6TJGzevBmRkZE4cOAA+vbti8DAwI8snXwePnyocH+95ORkxonLf50ePXrItYydNWtWtawPPla9X7t2DcuWLcO7d+/QuHFjrFmz5r0dddUGBgYGvNXG/yMQCOT2ITw1z/s6weH5fFE2Tvv5+dXKdWty7Pg3s2/fPnz//fcy46ytrXHv3r1q5fe51LtAIMDp06excOFCFBUVwdHREUePHv2k+igbGxtMnDixrsX4JNDX18c///xT12L8JxAKhZgzZ05di8HDw/OB8BO+PDVGQXl5jU72fqGnCQ0qgljMttAViwvknMHzX+X06dMoLS2VGVe/fn34+fl9ksu0LSwsFHqlljhg4gG2bduGwkLZThir6xX9Y9X74cOHayQfHh4ens8dZeN0bVGTY8e/md69e6N9+/Yy4yo7qOPK51LvIpEI58+fr2sxeHh4eHh4agV+wpenVpC1FQMDEcrLi+SeS0S4lRiA0rdJuHS5lgTk+VdhbW1d1yK8F2pqarCzs6trMT4LJN6LawK+3nl4eHg+LnU1Ttfk2PFvRldXF7q6ujWWH1/vPDw8PDw8dQ8/4VuHEJFcawegYu+czxVZWzEAFWVOuDkQb9/eVJqHsp139fXbQkVF/v69PDw8PDw8PDw8PDw8PDw8PDw8/zX4Cd86goiwY8cOPHr0qK5F+aiUlxdymuwFAB0dJ7Rtc1Cu0zUVFRHvkI2Hh4eHh4eHh4eHh4eHh4eHh6cS/IRvLaLIgrekpITzZK+VldV77Z/1qdPxq3ioqmrJjecndHl4eHh4eHh4eHh4eHh4eHh4eKoHP+FbS1THgnfq1KkQCoVy49XV1f+VE5+qqloKJ3x5eHh4eHh4eHh4eHh4eHh4eHh4qgc/4VtLlJaWcprstbKygra29r9yQpeHh4eHh4eHh4eHh4eHh4eHh4fn46JS1wL8F5g6dSpmzZol8wgMDOQne3l4/uVkZWVBIBAgMTGxrkUBAOzatQsGBgbM79DQULRq1UrhOcOGDUPfvn1rVa7qYmNjg1WrVtW1GNWi6r0QHR0NgUCAN2/e1Klc1ZElKioKTZs2hVgs/jiC8fD8P5s2bYK3t3ddi8HDUy2q9q1Vx+CaQiAQ4MSJEzWeb11SdZxXVsZPTd8Caq+95cHrGTw878+gQYMQFhZW12Lw8NQY/ITvR0AoFMo9+Mleno+Jt7c3unfvLjMuJiYGAoEAt2/fZsKOHj2KLl26wNDQECKRCI6OjggMDMStW7dY55aUlGD58uVo06YNtLW1oa+vj5YtW2LOnDl4/PixQpk6deoEgUAgdZSVlX14gXk4MXXqVERFRdW1GB9ETk4OJk6cCEdHR4hEIjRs2BA//PAD3r59W9eiKcTNzQ1PnjyBvr5+XYvCmenTp2POnDlQVVWta1EYSktLsWDBAtja2kJTUxMtW7bEmTNnpNJlZ2djyJAhMDY2hkgkQosWLXDjxg25+Q4bNkxm/9SsWbPaLE6tUVRUhPHjx8PY2Bg6Ojro378/nj17pvCcZ8+eYdiwYbCwsICWlha6d++O+/fvs9JkZmaiX79+MDExgZ6eHgYOHCg33+LiYrRq1UpqUqaoqAjDhg1DixYtoKamJvMDU2BgIG7evImYmJhql/2/wqNHjxAYGAgLCwsIhUJYW1sjKCgIr169qhN55D1DGRkZdSLPp4CPjw/S09NrPN8nT56gR48eNZ7vp8S/oYzHjh2Dh4cH01+6urri7NmztXY9Xs+oGeS9r/Ts2ZNJk5eXhwkTJsDS0hIikQhOTk7YtGnTB+f7OZGTkwM/Pz/o6enBwMAAI0aMQF5ensJznj59iqFDh8LMzAza2tpo06YNjh49ykpjY2MjVUdLlixhpTl8+DBatWoFLS0tWFtbY/ny5ax4Ls/enDlzsHDhwk/+/YGHhyv8hC9PrVAuLoRYXCDz4Kk7RowYgcjISPzzzz9ScTt37kS7du3g7OwMAJgxYwZ8fHzQqlUrnDx5Emlpadi/fz8aN26M4OBg5rzi4mJ4eHhg0aJFGDZsGC5fvow7d+5gzZo1ePnyJdauXatUrlGjRuHJkyesQ02N33HmY6GjowNjY+O6FuODePz4MR4/fowVK1bg7t272LVrF86cOYMRI0bUtWgKEQqFMDMz+2w+/l25cgWZmZno379/XYvCYs6cOdi8eTPWrl2L5ORkjBkzBv369WN9nHr9+jU6dOgAdXV1REREIDk5GWFhYTA0NJSb7+rVq1n90qNHj2BkZIQBAwZ8jGLVOJMnT8Yff/yBI0eO4NKlS3j8+DG+/fZbuemJCH379sVff/2F33//Hbdu3YK1tTW6deuG/Px8AEB+fj48PT0hEAhw4cIFxMbGoqSkBN7e3igvL5fKc/r06bCwsJAKF4vFEIlE+OGHH9CtWzeZ8giFQgwePBhr1qx5zxr4d/PXX3+hXbt2uH//Pg4cOICMjAxs2rQJUVFRcHV1RU5OTp3I1b17d6kxvlGjRnUiy6eASCSCqalpjedrZmYGDQ2NGs/3U+LfUMbLly/Dw8MDp0+fRkJCAjp37gxvb28pY4qagtczaoZjx46x+rC7d+9CVVWVpQ9MmTIFZ86cwa+//oqUlBRMmjQJEyZMwMmTJz8o388JPz8/3Lt3D5GRkTh16hQuX76M0aNHKzzH398faWlpOHnyJO7cuYNvv/0WAwcOlHomFixYwKqriRMnMnERERHw8/PDmDFjcPfuXWzYsAErV67EunXrmDRcnr3mzZvD1tYWv/76aw3VCA9PHUP/Md6+fUsAKDn+aq1ep7i4mEJCQigkJISKi4tr9VqfCnmlpVT/wi2qf+EWnYpqSuejGis8ysry61rk96awsJCSk5OpsLCwrkWpFqWlpVS/fn366aefWOHv3r0jHR0d2rhxIxERxcXFEQBavXq1zHzKy8uZ/xcvXkwqKip08+ZNpWll4e7uTkFBQTLjpk+fTvb29iQSiahRo0Y0Z84cKikpYaU5efIktWvXjjQ0NMjY2Jj69u3LxBUVFdGPP/5IFhYWpKWlRS4uLnTx4kWF8kjIysqiXr16kYGBAWlpaZGTkxOFh4cTEVFOTg4NHjyY6tWrR5qammRnZ0c7duxgzo2Pj6dWrVqRhoYGtW3blo4dO0YA6NatW5yufefOHerevTtpa2uTqakpDRkyhF68eMHEW1tb08qVK1nntGzZkkJCQpjfr1+/ptGjR5OpqSlpaGhQs2bN6I8//iAiop07d5K+vj6TNiQkhFq2bMn8Lisro8mTJ5O+vj4ZGRnRtGnTyN/fn/r06cOkEYvFtGjRIrKxsSFNTU1ydnamI0eOsPIIDAxk4h0cHGjVqlUsmQMCAqhPnz60fPlyMjMzIyMjIxo3bpxUG8tDVj1U5vDhwyQUCqm0tFRpXlXrhIjo+PHjVHmYlNTTnj17yNramvT09MjHx4dyc3OZNBEREdShQwem7nr27EkZGRlM/IMHD1j3wsWLFwkAvX79mkmzZcsWsrS0JJFIRH379qWwsDCZ7aVIDmXtQ0QUHh5O9vb2pKmpSZ06daKdO3dKyVKV8ePH03fffccKy8jIoN69e5OpqSlpa2tTu3btKDIykpWmqKiIpk+fTpaWliQUCsnW1pa2bdvGxN+9e5d69uxJurq6pKOjQ1999RWr3pRhbm5O69atY4V9++235Ofnx/yeMWMGffXVV5zzlMXx48dJIBBQVlZWtc4LCwuj5s2bk5aWFllaWtLYsWPp3bt3THzVZ5CIaOXKlWRtbc0K2759Ozk5OZFQKCQzMzMaP348ZxnevHlD6urqrPsgJSWFAFBcXJzMc9LS0ggA3b17lwkTi8VkYmJCW7duJSKis2fPkoqKCr19+5Z1LYFAIHUfnD59mpo0aUL37t1T2CdK+gZZXLp0iYRCIRUUFHAp9n+K7t27k6WlpVTdPHnyhLS0tGjMmDFMmLW1NS1YsIAGDRpEWlpaZGFhIfUMvX79mkaMGEH16tUjXV1d6ty5MyUmJjLxXPoiRW2p7LkgIrpy5Qq5u7uTSCQiAwMD8vT0pJycHCLi1s/JQ9L3njlzhlq1akWamprUuXNnevbsGXOf6urqkq+vL+Xn/09frYm+tep4w6UP5dJeAOj48eNE9L+x5sCBA+Tq6sroAdHR0axzlOkbilBWF1zGVSLFelzVcb5yGYm46VvKysh13D569Ch16tSJRCIROTs7059//smpnmTVQ1WcnJxo/vz5nPLj9Yy60TOqsnLlStLV1aW8vDwmrFmzZrRgwQJWujZt2tDs2bM/KF8uKHtnktUXBwUFkbu7O/NbLBbT0qVLydbWloRCIVlZWdHPP//MWYbk5GQCQNevX2fCIiIiSCAQUHZ2ttzztLW1ac+ePawwIyMjRs8gUq7z+/r6St03a9asIUtLS4XvorKevfnz5yvUFz/XeQCeTwvJvGRl/bk24C18eThDRHKtdsXiApSWcrcc0ddvCxUVUS1K+/EhIhSUFtTJQUScZFRTU4O/vz927drFOufIkSMQi8Xw9fUFABw4cAA6OjoYN26czHwqWwkcOHAAHh4eaN26tdK01UVXVxe7du1CcnIyVq9eja1bt2LlypVMfHh4OPr164dvvvkGt27dQlRUFFxcXJj4CRMmIC4uDgcPHsTt27cxYMAAmcuRZTF+/HgUFxczFstLly6Fjo4OAGDu3LlITk5GREQEUlJSsHHjRtSrVw9AxXKuXr16wcnJCQkJCQgNDcXUqVM5l/nNmzfo0qULWrdujRs3buDMmTN49uwZBg4cyDmP8vJy9OjRA7Gxsfj111+RnJyMJUuWcF4aFxYWhl27dmHHjh24cuUKcnJycPz4cVaaxYsXY8+ePdi0aRPu3buHyZMnY8iQIbh06RIjg6WlJY4cOYLk5GTMmzcPs2bNwuHDh1n5XLx4EZmZmbh48SJ2796NXbt2YdeuXZzLqoi3b99CT0+vRq3FMzMzceLECZw6dQqnTp3CpUuXWEvK8vPzMWXKFNy4cQNRUVFQUVFBv379ZFo6yiI2NhZjxoxBUFAQEhMT4eHhgYULF1ZbDmXt8+jRI3z77bfw9vZGYmIiRo4ciZkzZyqVLyYmBu3atWOF5eXl4ZtvvkFUVBRu3bqF7t27w9vbGw8fPmTS+Pv748CBA1izZg1SUlKwefNm5nnKzs7G119/DQ0NDVy4cAEJCQkIDAxktnWR7PmXlZUlV67i4mJoamqywkQiEa5cucL8PnnyJNq1a4cBAwbA1NQUrVu3xtatW5WWuTLbt29Ht27dYG1tXa3zVFRUsGbNGty7dw+7d+/GhQsXMH369GrlsXHjRowfPx6jR4/GnTt3cPLkSdjZ2THxw4YNQ6dOneSen5CQgNLSUpb1bJMmTdCwYUPExcXJPKe4uBgAWHWroqICDQ0Npm6Li4shEAhYVneamppQUVFh1f+zZ88watQo7N27F1paWtUqe2XatWuHsrIyxMfHv3ce1YWIUFJSUicH1/E9JycHZ8+exbhx4yASsfUrMzMz+Pn54dChQ6z8li9fjpYtW+LWrVuYOXMmgoKCEBkZycQPGDAAz58/R0REBBISEtCmTRt07dqVZSmsrC9ShLLnIjExEV27doWTkxPi4uJw5coVeHt7M/t6KuvnuBAaGop169bhzz//xKNHjzBw4ECsWrUK+/fvR3h4OM6dO8daqVQbfSuXPhRQ3l6ymDZtGn788UfcunULrq6u8Pb2Zrb3+FB9oybqX5kepwgu+haXMnIdt2fPno2pU6ciMTERDg4O8PX1rZHtx8rLy/Hu3TsYGRlxSs/rGXWjZ1Rl+/btGDRoELS1tZkwNzc3nDx5EtnZ2SAiXLx4Eenp6fD09PygfLmg7J2JC8HBwViyZAnznrN//37Ur1+fie/UqROGDRsm9/y4uDgYGBiw2q9bt25QUVFROGa7ubnh0KFDyMnJQXl5OQ4ePIiioiIpnWbJkiUwNjZG69atsXz5ctbzJ08P/Oeff/D333/LvK68Z8/FxQXXrl1jdCAens+aWp1O/gThLXzfj/Lycrp+4zuFFrunopoyFr6vC55RWVm+3EOZ1eenjqwve/kl+dR8V/M6OfJLuFtLSyy6Klu6duzYkYYMGcL87t69Ozk7O7POCwsLI21tbeZ48+YNERFpamrSDz/8wErbt29fJp2rq6tCedzd3UldXZ2V95QpU2SmXb58ObVt25b57erqyrLgq8zff/9NqqqqUl+Uu3btSsHBwQplIiJq0aIFhYaGyozz9vam4cOHy4zbvHkzGRsbs+6NjRs3crbw/emnn8jT05MV9ujRIwJAaWlpRKTcwldicSdJXxVlFr7m5ua0bNky5ndpaSlZWloylgFFRUWkpaUlZd0yYsQI8vX1lVu28ePHU//+/ZnfAQEBZG1tTWVlZUzYgAEDyMfHR24elVH0tf/FixfUsGFDmjVrFqe8uFr4amlpsSxcpk2bRu3bt5eb74sXLwgA3blzh4iUW974+PhQz549WXn4+flJtZciObi0T3BwMDk5ObHiZ8yYodTyRl9fX8oKQxbNmjWjtWvXEtH/rESrWuNICA4OpkaNGsm17I6PjydHR0f6559/5F7P19eXnJycKD09ncRiMZ07d45EIhEJhUImjYaGBmloaFBwcDDdvHmTNm/eTJqamrRr1y6l5SEiys7OJlVVVTp06BCn9Io4cuQIGRsbM7+5WPhaWFgotBKaOXMmDR06VG78vn37WPUh4YsvvqDp06fLPKekpIQaNmxIAwYMoJycHCouLqYlS5YQAKafev78Oenp6VFQUBDl5+dTXl4eTZgwgQDQ6NGjiahCh+jevTuzuqTqc1AVRVahRESGhoac260mqKzTfeyDqw559epVKevHyvzyyy8EgJ49e0ZEFf1n9+7dWWl8fHyoR48eREQUExNDenp6VFRUxEpja2tLmzdvJiJufWJAQACpqqqyxviqVlgSqj4Xvr6+1KFDB5lp33cckiDpe8+fP8+ELV68mABQZmYmE/b999+Tl5cX52ty6Vu5WHxW7kOJlLcXkWwL3yVLljDxkrF86dKlRMRN35AHl7rgMq4q0uMk5ZZn4ctF33qfMsobtytbi0pWKaSkpMiVXYKy9l66dCkZGhoyz2Z14fWMj6NnVE0PgOLj41nhRUVF5O/vTwBITU2NhEIh7d69m1OeivJ9H6q+Mymz8M3NzSUNDQ2WVW1Vhg4dSjNnzpQbv3DhQnJwcJAKNzExoQ0bNsg97/Xr1+Tp6cnUm56eHp09e5aVJiwsjC5evEhJSUm0ceNGMjAwoMmTJzPxmzdvJi0tLTp//jyJxWJKS0ujJk2aEAC51vjynr2kpCQCIHc1F2/hy1MTfCwLX36TTB5OlJcX4u3bm5zTq6sbQVWVv70+RZo0aQI3Nzfs2LEDnTp1QkZGBmJiYrBgwQKF5wUGBqJ3796Ij4/HkCFDFFodbdiwAfn5+VizZg0uX74MANi3bx++//57Jk1ERAQ6duwIoGK/p9mzZzNxEm/Ghw4dwpo1a5CZmYm8vDyUlZVBT0+PSZeYmIhRo0bJlOHOnTsQi8VwcHBghRcXF3Par/aHH37A2LFjce7cOXTr1g39+/dn9jceO3Ys+vfvj5s3b8LT0xN9+/aFm5sbACAlJQXOzs6sr8yurq5KrychKSkJFy9eZKwSKpOZmSlVHlkkJibC0tKSU9qqvH37Fk+ePEH79u2ZMDU1NbRr145p84yMDBQUFMDDw4N1bklJCcvSe/369dixYwcePnyIwsJClJSUoFWrVqxzmjVrxrI8Njc3x507d6otd2Vyc3PRs2dPODk5ITQ09IPyqoqNjQ10dXWZ3+bm5nj+/Dnz+/79+5g3bx7i4+Px8uVLxuLm4cOHaN68udL809LS0K9fP1aYi4sLTp06xVkOLu2TkpLCamOA231aWFgoZUGRl5eH0NBQhIeH48mTJygrK0NhYSFjeZOYmAhVVVW4u7vLzDMxMREdO3aEurq6zHgXFxekpqYqlGv16tUYNWoUmjRpAoFAAFtbWwwfPhw7duxg0pSXl6Ndu3ZYtGgRAKB169a4e/cuNm3ahICAAKVl3717NwwMDGQ6E1PG+fPnsXjxYqSmpiI3NxdlZWUoKipCQUEBJ2vX58+f4/Hjx+jatavcNIsXL662XMpQV1fHsWPHMGLECBgZGUFVVRXdunVDjx49mP7AxMQER44cwdixY7FmzRqoqKjA19cXbdq0gYpKxUKytWvX4t27d6z93z8EkUiEggLeJ4AsFI3NVan6zLu6umLVqlUAKsaivLw8qfGysLAQmZmZzG9lfSIAdO7cGRs3bmR+SyzXlD0XiYmJcvex5DoOKUMyrgNA/fr1oaWlhcaNG7PCrl27xvma79O3KutD5eVTub3kUfkcyViekpIC4MP0jZqqf0V6nDK46Ftcysh13K58r5ibmwOo6JubNGnyXvIDwP79+zF//nz8/vvvnPd15vWMutEzKrN9+3a0aNFCyhp97dq1uHr1Kk6ePAlra2tcvnwZ48ePh4WFhdy96bnkywVl70zKSElJQXFxsUI9Y8+ePdWWiwtz587FmzdvcP78edSrVw8nTpzAwIEDERMTgxYtWgCo2B9ZgrOzM4RCIb7//nssXrwYGhoaGDVqFDIzM9GrVy+UlpZCT08PQUFBCA0NZXSRyih69iSrZHg9g+ffAD8j954QEUpLS+XGl5SUfERpagYiQoGc5UDl4nIUoWK5ZgfXaKiqSr+gFojLgat/Vfz4TBwD1CQiNRHiB3+8JaZVr10dRowYgYkTJ2L9+vXYuXMnbG1tWUqSvb09rly5gtLSUkY5MjAwgIGBgZTDN3t7e6SlpbHCJIpw5SUyvXv3Zil+DRo0YP7X19dnLU0GKpYF+fn5Yf78+fDy8oK+vj4OHjyIsLCw/5VbJL/ceXl5UFVVRUJCgtRWBrIU/6qMHDkSXl5ezJLOxYsXIywsDBMnTkSPHj3w999/4/Tp04iMjETXrl0xfvx4rFixQmm+ysjLy4O3tzeWLl0qFSepVxUVFamX+sr9kaJ6qQkk3nbDw8NZ7QiAWdZ98OBBTJ06FWFhYXB1dYWuri6WL18utaSrqvItEAg4L0uUxbt379C9e3fo6uri+PHjcpX7qiirU67yent7w9raGlu3boWFhQXKy8vRvHnzGh8TFMnBpX3el3r16uH169essKlTpyIyMhIrVqyAnZ0dRCIRvvvuO6bMyu7HmrhfTUxMcOLECRQVFeHVq1ewsLDAzJkzWRM35ubmcHJyYp3XtGlTKU/QsiAi7NixA0OHDoVQKKyWbFlZWejVqxfGjh2LhQsXwsjICFeuXMGIESNQUlICLS2tj/JMm5mZoaSkBG/evGE+qgEVWy2YmZnJPa9t27ZITEzE27dvUVJSAhMTE7Rv3561ZNPT0xOZmZl4+fIl1NTUYGBgADMzM6b+L1y4gLi4OKn7r127dvDz88Pu3burVZacnByYmJhU65wPQV1dHbNmzfpo16t6bS7Y2dlBIBAgJSVFajIHqHiZNzQ05FxveXl5MDc3R3R0tFRc5fuHSx+ura0tNcZzeS6UjfHAh/dzleUXCAR10rcq60NrCy76hqJzAcV1wWVc/Rj6irIych23q94rAD5IXzl48CBGjhyJI0eOcJoMlMDrGXWjZ0jIz8/HwYMHpQxlCgsLMWvWLBw/fhw9e/YEUDExmZiYiBUrVihtY3n5coHLO9PH0jOqfvArKytDTk6OXD0jMzMT69atw927d9GsWTMAQMuWLRETE4P169dj06ZNMs9r3749ysrKkJWVBUdHRwgEAixduhSLFi3C06dPYWJigqioKABg6YKA8mdPsm3Rx9QzeHhqC37C9z2QvPg9evSorkWpMYgIvW9m4HpuvvxEgv0Vf68+/jhCfWYIBAJoqb//voQfk4EDByIoKAj79+/Hnj17MHbsWNZeu76+vli7di02bNiAoKAghXn5+vpizpw5uHXrlkKrDl1dXZalgDL+/PNPWFtbsyx/q+7B5OzsjKioKAwfPlzq/NatW0MsFuP58+eMJXF1sbKywpgxYzBmzBgEBwdj69atjEdYExMTBAQEICAgAB07dsS0adOwYsUKNG3aFHv37kVRURFjoXD16lXO12zTpg2OHj0KGxsbuXvPmpiY4MmTJ8zv3NxcPHjwgPnt7OyMf/75B+np6dW28tXX14e5uTni4+Px9ddfA6hQ1iT7NwKAk5MTNDQ08PDhQ7nWFLGxsXBzc2PtA13ZKqw2yM3NhZeXFzQ0NHDy5EkpCxFFmJiY4N27d8jPz2eszxITE6t1/VevXiEtLQ1bt25l7rnKe5hywdHREdevX2eFVf2tDC7t07RpUymv0Vzu09atWyM5OZkVFhsbi2HDhjGTTHl5eax98Fq0aIHy8nJcunRJpmLt7OyM3bt3sz4wvS+amppo0KABSktLcfToUdY+jR06dJD6OJWens5pP95Lly4hIyMDI0aMqLZMCQkJKC8vR1hYGGNlUnUvaxMTEzx9+hRExPTFle8/XV1d2NjYICoqCp07d662DEDFxK26ujqioqIY7+dpaWl4+PAhJ6srfX19ABXWZTdu3MBPP/0klUayl/mFCxfw/Plz9O7dGwCwZs0a/Pzzz0y6x48fw8vLC4cOHZKyAFNGZmYmioqKqmVF+KEIBIJqT/R/bIyNjeHh4YENGzZg8uTJrJf3p0+fYt++ffD392eN9VWf+atXr6Jp06YAKsaip0+fQk1NDTY2NjUuL5fnQjLGz58/X+p8Lv1cTVNbfauyPlRePpXbSx5Xr16VGssnTJgAgJu+IQ8udcFlXFWkxymDi76lrIw1MW6/DwcOHEBgYCAOHjzITA5ygdcz6l7POHLkCIqLizFkyBBWeGlpKUpLS6WsSVVVVTl9GJCXLxe4vDOZmJjg7t27rLDExESmPuzt7SESiRAVFYWRI0dWWwagwoL7zZs3SEhIQNu2bQFU6APl5eVyx3qJFW116y0xMREqKipS1rmqqqrMR4gDBw7A1dWVNXHL5dm7e/cuLC0tGZ2Gh+dzhnfa9h6UlpZynuy1srL64IHlY1BQXq54srcauOhrQ0vG0gmeTwcdHR34+PggODgYT548kdqA39XVFT/++CN+/PFHTJkyBVeuXMHff/+Nq1evYvv27RAIBMzAPHnyZLi6uqJr165YvXo1bt68iQcPHuDs2bOIiIjg7CisKvb29nj48CEOHjyIzMxMrFmzRspxWEhICA4cOICQkBCkpKQwztUAwMHBAX5+fvD398exY8fw4MEDXLt2DYsXL0Z4eLjS60+aNAlnz57FgwcPcPPmTVy8eJF5sZo3bx5+//13ZGRk4N69ezh16hQTN3jwYAgEAowaNQrJyck4ffp0tSx/x48fj5ycHPj6+uL69evIzMzE2bNnMXz4cMZRTZcuXbB3717ExMTgzp07CAgIYNWzu7s7vv76a/Tv3x+RkZF48OABIiIicObMGU4yBAUFYcmSJThx4gRSU1Mxbtw4vHnzhonX1dXF1KlTMXnyZOzevRuZmZm4efMm1q5dy1jq2dvb48aNGzh79izS09Mxd+7car9QVIfc3Fx4enoiPz8f27dvR25uLp4+fYqnT58y9aaI9u3bQ0tLC7NmzUJmZib2799fbedxhoaGMDY2xpYtW5CRkYELFy6wlqBxYeLEiTh9+jR++eUX3L9/H5s3b0ZERES1nB9yaZ8xY8bg/v37mDZtGtLS0jiX18vLS+rl0t7eHseOHUNiYiKSkpIwePBglpJuY2ODgIAABAYG4sSJE3jw4AGio6OZyZ0JEyYgNzcXgwYNwo0bN3D//n3s3buXmZy9du0amjRpguzsbLlyxcfH49ixY/jrr78QExOD7t27o7y8nOUAavLkybh69SoWLVqEjIwM7N+/H1u2bMH48eOZNMHBwfD395fKf/v27Wjfvj2n5bJVsbOzQ2lpKdauXYu//voLe/fulbJY6dSpE168eIFly5YhMzMT69evR0REBCtNaGgowsLCsGbNGty/f59pU2WyS9DX18eIESMwZcoUXLx4EQkJCRg+fDhcXV3x5ZdfMumaNGnC6muPHDmC6Oho/PXXX/j999/h4eGBvn37spzQ7Ny5E1evXkVmZiZ+/fVXDBgwAJMnT4ajoyMAoGHDhmjevDlzSD5E2drawtLSksknOTkZiYmJyMnJwdu3b5GYmCg1QRQTE4PGjRvD1tZWWdX/51i3bh2Ki4vh5eWFy5cv49GjRzhz5gw8PDzQoEEDKcdMsbGxWLZsGdLT07F+/XocOXKE+cjbrVs3uLq6om/fvjh37hyysrLw559/Yvbs2bhx48YHy8rluQgODsb169cxbtw43L59G6mpqdi4cSNevnzJqZ+raWqrb1XWh0pQ1F7yWL9+PY4fP47U1FSMHz8er1+/RmBgIABu+saH1AWXcVWRHqcMLvqWsjLWxLhdXfbv3w9/f3+EhYWhffv2jK7y9u1bpefyekbd6RkStm/fjr59+0ptd6Onpwd3d3dMmzYN0dHRePDgAXbt2oU9e/awVl34+/vL3N5IXr5c4PLO1KVLF9y4cQN79uzB/fv3ERISwpoA1tTUxIwZMzB9+nTs2bMHmZmZzHufMtklNG3aFN27d8eoUaNw7do1xMbGYsKECRg0aBAsLCwAVDjQa9KkCbNVTpMmTWBnZ4fvv/8e165dQ2ZmJsLCwhAZGclsoRUXF4dVq1YhKSkJf/31F/bt28c4CTQ0NAQAvHz5Eps2bUJqaioSExMRFBSEI0eOsLa94frsxcTEVMvRHg/PJ02t7hD8CVITTtsqO+949+4dFRcXyz0+F+dkeWVljMO158UllFdWxjpyi9/RqaimdCqqKeUWv5OKr3x8LmX+EP4Nm7X/+eefBIC++eYbuWkOHTpEnTp1In19fVJXVydLS0saPHgwXb3Kfn6KiopoyZIl1LJlSxKJRKShoUFNmjShyZMn08OHDxXK4e7uTkFBQTLjpk2bRsbGxqSjo0M+Pj60cuVKKccXR48epVatWpFQKKR69erRt99+y8SVlJTQvHnzyMbGhtTV1cnc3Jz69etHt2/fVlw5RDRhwgSytbUlDQ0NMjExoaFDh9LLly+JqMIJSNOmTUkkEpGRkRH16dOH/vrrL+bcuLg4atmyJQmFQmrVqhUdPXqUs9M2IqL09HTq168fGRgYkEgkoiZNmtCkSZOYZ+vt27fk4+NDenp6ZGVlRbt27WI5bSMievXqFQ0fPpyMjY1JU1OTmjdvTqdOnSIi5U7bSktLKSgoiPT09MjAwICmTJlC/v7+LGcP5eXltGrVKnJ0dCR1dXUyMTEhLy8vunTpEhFV3BPDhg0jfX19MjAwoLFjx9LMmTNZ11HmQEIZlZ25SJySyDoePHjAKb/jx4+TnZ0diUQi6tWrF23ZskXKaZsyx1qRkZHUtGlT0tDQIGdnZ4qOjpbpSEeeMxUioi1btlCDBg1IJBJR37596eeffyYzM7NqyaGsfYiI/vjjD7KzsyMNDQ3q2LEj7dixQ6kzlVevXpGmpialpqYyYQ8ePKDOnTuTSCQiKysrWrdundRzXVhYSJMnTyZzc3MSCoVkZ2dHO3bsYOKTkpLI09OTtLS0SFdXlzp27Mg4TpLUkaJ2jI6OZurd2NiYhg4dKuWwUVLm5s2bM33Uli1bWPEBAQFS99+bN29IJBJJpZWwc+dOUqZO/fLLL2Rubk4ikYi8vLxoz549UnW9ceNGsrKyIm1tbfL396eFCxey2pSIaNOmTUybmpub08SJExXKXpXCwkIaN24cGRoakpaWFvXr14+ePHnCSgOAdu7cyfxevXo1WVpakrq6OjVs2JDmzJkj5UhsxowZVL9+fVJXVyd7e3sKCwtTqAvIc9pmbW0t8xmujKenJy1evFhhOf/LZGVlUUBAANMeVlZWNHHiRGb8kmBtbU3z58+nAQMGkJaWFpmZmdHq1atZaXJzc2nixIlkYWHB5OXn58eM61z6IkUO+Lg8F9HR0eTm5kYaGhpkYGBAXl5eTDyXfk4esvpeWc61qpaxJvrWqtfh0odyaS9ZY83+/fvJxcWFhEIhOTk50YULF1jnKNM3FMGlLpSNq0SK9ThFTtuIuOlbyspY3XGbqMLJFKo4QJZH1fZ2d3eX2c8FBAQozet95OX1jAo+VM8gIkpNTSUAdO7cOZnxT548oWHDhpGFhQVpamqSo6Oj1Hjo7u4u1dbK8g0JCZHSB6rC5Z1p3rx5VL9+fdLX16fJkyfThAkTWHqDWCymn3/+maytrZkxf9GiRQplr8qrV6/I19eXdHR0SE9Pj4YPH07v3r1j4iX3Z+VnJz09nb799lsyNTUlLS0tcnZ2ZjnuS0hIoPbt25O+vj5pampS06ZNadGiRSynoi9evKAvv/yStLW1SUtLi7p27Sr1vsrl2SssLCR9fX2Ki4uTW8Z/wzwAT93zsZy2CYiq4d3hX0Bubi709fWRHH8VTV2qt4xQQklJCeP0ZdasWZ/8Mj8u5IvFsL1c4Sgp8+sW0K5ilSkWFyD6UsWm6Z3c78jcw/e/RFFRER48eIBGjRpVa9k4Dw8PT3UZNWoUUlNTERMTU9eiAACmTZuG3NxcbN68ua5F+SQICQnBpUuXZO51ylOz3Lt3D126dEF6ejqzxQTP+2FjY4NJkyZh0qRJdS0KDweq215ZWVlo1KgRbt26JeUslYenKrye8WkTEBAAgUBQ7ZVnPNVn48aNOH78OM6dOyc3DT8PwFMTSOYl3759Wy0Hi9WFX3fPI0W5uBBicYHUwcPDw8NT+6xYsQJJSUnIyMhglkcGBATUtVgMs2fPhrW19Qc5q/k3ERERgWXLltW1GP8Jnjx5gj179vCTvTw8PDwfAK9nfD4QEaKjo2Xum89T86irq7O2y+Lh+dzhnbbJgIhkemeXUNtec2sDIkKBgkGzoOx/e3VdvuICTRR/DLF4eOqMHj16yLVkmDVrVq15ZB8zZgx+/fVXmXFDhgyR6432v0ZMTAx69OghN17iIZorn1O9X7t2DcuWLcO7d+/QuHFjrFmz5r0daNQGBgYGtfZ8fI5I9qHjqX2q48me57/N59Tnf4o8fPgQTk5OcuOTk5PRsGHDjyjRp0tN6pMfq955PePzQSAQSDlg46k9PqXngIenJuC3dKgCEWHHjh2cnbJ9Dls6EBF638zg7JRtOw2WO+Grr98WbdscqtbG/v9G+KUcnz/Z2dkoLCyUGWdkZAQjI6Naue7z58+Rm5srM05PT0/K2+x/lcLCQoXOM+zs7KqVH1/vPDw8PP8d+D7/wygrK0NWVpbceBsbG6ip8XZDQM3qk3y98/B8+vDzADw1wcfa0oEfMapQWlrKebLXysoK6urqtSzRh1NQXs55steBUtCtwyWoqWnLjFdREf3nJ3t5/h00aNCgTq5ramrKv2hyQCQSVXtSVxF8vfPw8PD8d+D7/A9DTU2tRsfgfzM1qU/y9c7Dw8PDU5PwE74KmDp1qkLrXXV19U9i8lPpdg3i/8Xd6dAMWqrSWzeXiwtx+YoLNFAMNbU+/3mnbDw8PDw8PDw8PDw8PDw8PDw8PJ8j/ISvAoRC4b9uuwZNFENThq8+MYr4fXt5eHh4eHh4eHh4eHh4eHh4eHg+c/gJ38+c6m7XEH+lP+reJpmHh4eHh4eHh4eHh4eHh4eHh4enNuAnfP9FcNmuQdlkr75+W6ioiGpHQB4eHh4eHh4eHh4eHh4eHh4eHp5ahZ/w/RehpaoCbVVVqXAxVJjtGjp+Fa9wf17eKRsPDw8PDw8PDw8PDw8PDw8PD8/ni7Q5KM9nS7m4EGJxgcxDgqqqlsKDn+zl4fnvIRAIcOLECQBAVlYWBAIBEhMT5aaPjo6GQCDAmzdvPop8XAgNDUWrVq0+2vWq1sGuXbtgYGDw0a6vCK6ybN++HZ6enrUvEA9PFQYNGoSwsLC6FoOH54OwsbHBqlWr6lqMj0rVsXbYsGHo27evwnM6deqESZMm1apcNU3VcfRj6xiK4CrL3LlzMXr06NoXiIenEiUlJbCxscGNGzfqWhQeHh7wE76fP0TMv5evuCD6UgupI+ZK+zoUkOdTJS4uDqqqqujZs2eN5SkQCKSOr776qsby/9z5lCYF5WFlZYUnT56gefPmdS3KB7F161Z07NgRhoaGMDQ0RLdu3XDt2rVau56Pjw/S09NrLf+apqioCHPnzkVISEhdi8Li3bt3mDRpEqytrSESieDm5obr169LpUtJSUHv3r2hr68PbW1tfPHFF3j48KHcfDt16iSzf6rJ/u9jkpOTAz8/P+jp6cHAwAAjRoxAXl6ewnMyMzPRr18/mJiYQE9PDwMHDsSzZ89YaW7evAkPDw8YGBjA2NgYo0ePZuX76tUrdO/eHRYWFtDQ0ICVlRUmL+RrSAAAbpJJREFUTJiA3NxcJo3kY0jV4+nTp0yaOXPmYOHChXj79m0N1QgPUDH5JqlvdXV11K9fHx4eHtixYwfKy8vrRCZeL6ge169fR9euXWFgYABDQ0N4eXkhKSmprsWSy+rVq7Fr1666FqPWmTp1KqKioupaDM48ffoUq1evxuzZs+taFCmUjd9cxipZrF+/HjY2NtDU1ET79u1rVeerbR4+fIiePXtCS0sLpqammDZtGsrKyuSmlzfuCgQCmTpURkYGdHV1pd5J7t27h/79+8PGxgYCgUDpB60lS5ZAIBCwPugIhUJMnToVM2bMqE6ReXh4agl+wvcTh4iQLxbLPfJKC5Rn8v/w+/PyVGb79u2YOHEiLl++jMePHytMS0QKFY3K7Ny5E0+ePGGOkydP1oS4PB8JVVVVmJmZQU3t897xJzo6Gr6+vrh48SLi4uJgZWUFT09PZGdn18r1RCIRTE1NayXv2uC3336Dnp4eOnToUNeisBg5ciQiIyOxd+9e3LlzB56enujWrRur3TIzM/HVV1+hSZMmiI6Oxu3btzF37lxoamrKzffYsWOsfunu3btQVVXFgAEDPkaxahw/Pz/cu3cPkZGROHXqFC5fvqzQkis/Px+enp4QCAS4cOECYmNjUVJSAm9vb2Yi8PHjx+jWrRvs7OwQHx+PM2fO4N69exg2bBiTj4qKCvr06YOTJ08iPT0du3btwvnz5zFmzBipa6alpbHqvPLz0bx5c9ja2uLXX3+tuUrhAQB0794dT548QVZWFiIiItC5c2cEBQWhV69enMfxmobXC7iRl5eH7t27o2HDhoiPj8eVK1egq6sLLy8vlJaW1rV4MtHX1//kP2TXBDo6OjA2Nq5rMTizbds2uLm5wdrauq5FYaFs/OYyVsni0KFDmDJlCkJCQnDz5k20bNkSXl5eeP78+ccqWo0hFovRs2dPlJSU4M8//8Tu3buxa9cuzJs3T+45bm5urD72yZMnGDlyJBo1aoR27dqx0paWlsLX1xcdO3aUyqegoACNGzfGkiVLYGZmplDO69evY/PmzXB2dpaK8/Pzw5UrV3Dv3j2Opebh4ak16D/G27dvCQAlx1+VGV9cXEwhISEUEhJCxcXFH1k6NuXl5dTrRjrVv3CL0/G64BmVleXLPcrLy+u0PP8mCgsLKTk5mQoLC+talPfi3bt3pKOjQ6mpqeTj40MLFy5kxV+8eJEA0OnTp6lNmzakrq5OFy9eJLFYTEuXLiVbW1sSCoVkZWVFP//8M3MeADp+/LjU9V6+fEmDBg0iCwsLEolE1Lx5c9q/fz8rjbK8Hz58SAMGDCB9fX0yNDSk3r1704MHD5SW9dKlS6SmpkZPnjxhhQcFBdFXX31FREQhISHUsmVLVvzKlSvJ2tqa+R0QEEB9+vSh5cuXk5mZGRkZGdG4ceOopKSESVNUVEQ//vgjWVhYkJaWFrm4uNDFixdZdVr5CAkJUSq/ojy5yk5EtH37dnJyciKhUEhmZmY0fvx4Jq5yuz148IAA0K1bt5j48PBwsre3J01NTerUqRPt3LmTANDr16+ZNDExMfTVV1+RpqYmWVpa0sSJEykvL4+J37NnD7Vt25Z0dHSofv365OvrS8+ePWPiJfVz/vx5atu2LYlEInJ1daXU1FSldSSvHipTVlZGurq6tHv3bk75cZVXUgc7d+4kfX19Vh4//fQTmZiYkI6ODo0YMYJmzJjBkvFD7ykJO3fuJCsrKxKJRNS3b19asWKFlCxV6dmzJ02dOpUVdu3aNerWrRsZGxuTnp4eff3115SQkMBK8/r1axo9ejSZmpqShoYGNWvWjP744w8m/sqVK+Tu7k4ikYgMDAzI09OTcnJyFMoioaCggFRVVenUqVOs8DZt2tDs2bOZ3z4+PjRkyBBOecpj5cqVpKury7pHuTB9+nSyt7cnkUhEjRo1ojlz5rDaS9KmlQkKCiJ3d3fmt7K+ThnJyckEgK5fv86ERUREkEAgoOzsbJnnnD17llRUVOjt27dM2Js3b0ggEFBkZCQREW3evJlMTU1JLBYzaW7fvk0A6P79+3LlWb16NVlaWjK/qz4b8pg/fz7TB/PUDLLuPyKiqKgoAkBbt25lwl6/fk0jRoygevXqka6uLnXu3JkSExNZ5504cYJat25NGhoa1KhRIwoNDaXS0lImHgBt2LCBunfvTpqamtSoUSM6cuQIK4/PVS8gIvrtt9+YcdPa2ppWrFjBSm9tbU0rV65kfv/999/Uu3dv0tbWJl1dXRowYAA9ffqUiIjS0tIIAKWkpLDy+OWXX6hx48ZERHT9+nUCQA8fPmTiuTyDlVE2FstqD319fdq5cyfz+9GjRzRo0CAyNDQkLS0tatu2LV29WvHeVHWsrXrP5eXl0dChQ0lbW5vMzMxoxYoV5O7uTkFBQUwaZeMal3vD3d2dJk6cSNOmTSNDQ0OqX78+J51KQlhYGDVv3py0tLTI0tKSxo4dS+/evWPiq47pVctdWlpKEydOJH19fTIyMqLp06eTv78/qy64yMjlOVy8eDGZmpqSjo4OBQYGSukSsmjWrBmtW7eOFRYREUEdOnRgZO7ZsydlZGSw0ihqeyKikydPUrt27UhDQ4OMjY2pb9++CuWoirLxm8tYJQsXFxeWXisWi8nCwoIWL17MWbaysjIKDAwkGxsb0tTUJAcHB1q1ahUrTdV7mYioT58+FBAQwPwuKiqi6dOnk6WlJQmFQrK1taVt27ZxluP06dOkoqLC9B1ERBs3biQ9PT3OcxMlJSVkYmJCCxYskIqbPn06DRkyRKbeWpmq/Vtl3r17R/b29hQZGSmzToiIOnfuTHPmzOEk7+fG5z4PwPNpIJmXrNzf1Qa8hW8tQkqsc5UdL0vLcD03n9O1HCgFOura/P68dQgRobygoE4OqrS1BxcOHz6MJk2awNHREUOGDMGOHTtk5jFz5kwsWbIEKSkpcHZ2RnBwMJYsWYK5c+ciOTkZ+/fvR/369ZVer6ioCG3btkV4eDju3r2L0aNHY+jQoazlVoryLi0thZeXF3R1dRETE4PY2Fjo6Oige/fuKCkpUXjtr7/+Go0bN8bevXuZsNLSUuzbtw+BgYFcqwwAcPHiRWRmZuLixYvMF/fKSxknTJiAuLg4HDx4ELdv38aAAQPQvXt33L9/H25ubli1ahX09PSYr+9Tp05Vek1FeXJl48aNGD9+PEaPHo07d+7g5MmTsLOz43Tuo0eP8O2338Lb2xuJiYkYOXIkZs6cyUqTmZmJ7t27o3///rh9+zYOHTqEK1euYMKECUya0tJS/PTTT0hKSsKJEyeQlZXFshyUMHv2bISFheHGjRtQU1OrdhvJo6CgAKWlpTAyMuKUnqu88ti3bx8WLlyIpUuXIiEhAQ0bNsTGjRul0n3IPQUA8fHxGDFiBCZMmIDExER07twZP//8s1L5rly5ImX18e7dOwQEBODKlSu4evUq7O3t8c033+Ddu3cAgPLycvTo0QOxsbH49ddfkZycjCVLlkD1/52FJiYmomvXrnByckJcXByuXLkCb29viMViABVbmigah8rKyiAWi6UsdUUiEa5cucLIEB4eDgcHB3h5ecHU1BTt27dn9qDmyvbt2zFo0CBoa2tX6zxdXV3s2rULycnJWL16NbZu3YqVK1dWKw9l/WinTp0U3mtxcXEwMDBgtV+3bt2goqKC+Ph4mecUFxdDIBBAQ0ODCdPU1ISKigpTt8XFxRAKhVBR+Z9qKBJVrAqSpKnK48ePcezYMbi7u0vFtWrVCubm5vDw8EBsbKxUvIuLC65du4bi4mK5Zf1UICK5PhJq+6ju+C6LLl26oGXLljh27BgTNmDAADx//hwRERFISEhAmzZt0LVrV+Tk5AAAYmJi4O/vj6CgICQnJ2Pz5s3YtWsXFi5cyMp77ty56N+/P5KSkuDn54dBgwYhJSVFqUyful6QkJCAgQMHYtCgQbhz5w5CQ0Mxd+5cudsXlJeXo0+fPsjJycGlS5cQGRmJv/76Cz4+PgAABwcHtGvXDvv27WOdt2/fPgwePBgA4OjoCGNjY2zfvh0lJSUoLCzE9u3b0bRpU9jY2CitUy5jsTLy8vLg7u6O7OxsnDx5EklJSZg+fTrnLUGmTZuGS5cu4ffff8e5c+cQHR2NmzdvstIoG9e43BsAsHv3bmhrayM+Ph7Lli3DggULEBkZyUlOFRUVrFmzBvfu3cPu3btx4cIFTJ8+ndO5ALB06VLs27cPO3fuRGxsLHJzc2WOQ8pkVPYcHj58GKGhoVi0aBFu3LgBc3NzbNiwQaFsOTk5SE5Olhrj8/PzMWXKFNy4cQNRUVFQUVFBv379mLZV1vbh4eHo168fvvnmG9y6dQtRUVFwcXFh8g8NDVV4n3IZv7mMVVUpKSlBQkICunXrxoSpqKigW7duiIuLU1hXVeWztLTEkSNHkJycjHnz5mHWrFk4fPgw5zwAwN/fHwcOHMCaNWuQkpKCzZs3Q0dHh4m3sbFBaGio3PPj4uLQokULll7g5eWF3NxczhazJ0+exKtXrzB8+HBW+IULF3DkyBGsX7++WmWqyvjx49GzZ09WnVfFxcUFMTExH3QdHh6eGqBWp5M/QWrKwre8vJzyysrkH6Vl1PVaKmfrXGXH8+ISmdfJLX5Hp6KaUmRUYyory6+tauOpgqwve+L8fEp2bFInhzi/em3v5ubGfLUuLS2levXqsawrJBZaJ06cYMJyc3NJQ0ODZSFUFQCkqalJ2trazCHLsoeowsLwxx9/5JT33r17ydHRkWWlXlxcTCKRiM6ePau0vEuXLqWmTZsyv48ePUo6OjqM1QtXC19ra2sqKytjwgYMGEA+Pj5EVGHZo6qqKmVh17VrVwoODiYi2VagiuCSJxfZLSwsWBaSVYECC9/g4GBycnJipZ8xYwbLgm/EiBE0evRoVpqYmBhSUVGR+/VbYskksaipbOErITw8nABw+oKuzMJ37Nix1Lhx4/f+Gi9PXnkWvu3bt2dZmxARdejQQcoy6kPvKV9fX/rmm29Y8T4+Pgrvs9evXxMAunz5ssIyi8Vi0tXVZSx4JZY3aWlpMtP7+vpShw4d5OZ37NgxcnR0VHhNV1dXcnd3p+zsbCorK6O9e/eSiooKOTg4EBHRkydPCABpaWnRL7/8Qrdu3aLFixeTQCCg6OhohXlLiI+PJwAUHx/PKb0ili9fTm3btmV+K7Pw5dKPDh06lGbOnCk3fuHChUx9VMbExIQ2bNgg85znz5+Tnp4eBQUFUX5+PuXl5dGECRMIAPPs3r17l9TU1GjZsmVUXFxMOTk51L9/fwJAixYtYuU3aNAgEolEBIC8vb1Zz1Vqaipt2rSJbty4QbGxsTR8+HBSU1OTshZPSkoiAJSVlSW3rJ8KZWX5dD6qcZ0c1dHt5Fn4ElX0C5JxMCYmhvT09KioqIiVxtbWljZv3kxEFf1M1Xbfu3cvmZubM78B0JgxY1hp2rdvT2PHjmWl+Rz1gsGDB5OHhwfrnGnTprHGw8oWcOfOnSNVVVWWde69e/cIAF27do2IKsZmW1tbJl6W1e+dO3fI1taWVFRUSEVFhRwdHTk/I1zGYiix8N28eTPp6urSq1evZF5DkYXvu3fvSCgU0uHDh5n4V69ekUgkYiwAuYxrsqh8bxBVWFpWXSHwxRdf0IwZM+TmoYgjR46QsbEx81uZhW/9+vVp+fLlzO+ysjJq2LChlIWvIhm5PIeurq40btw4Vnz79u0V6ju3bt2SshSXxYsXLwgA3blzh4iUt72rqyv5+fnJzW/t2rXUpUsXufFcxm8uY1VVsrOzCQD9+eefrPBp06aRi4uLwjpQxvjx46l///7Mb2UWvpJnWpE1cpcuXWjt2rVy40eNGkWenp6ssPz8fGblJRd69OhBPXr0YIW9fPmSrKys6NKlS0Sk/J1EnoXvgQMHqHnz5kyfIs/Cd/Xq1WRjY8NJ3s8N3sKXpybgLXw/YYgIvW9mwPbyHflHzB3czSuskeu56GujnroatFVVpQ4tVRVoohi87S4PV9LS0nDt2jX4+voCANTU1ODj44Pt27dLpa1sHZCSkoLi4mJ07dpVYf4rV65EYmIic3h4eEAsFuOnn35CixYtYGRkBB0dHZw9e5Zx0qAs76SkJMbBgI6ODnR0dGBkZISioiJkZmYqLfOwYcOQkZGBq1evAqiwNBw4cGC1rfuaNWvGWDMCgLm5ObM/2J07dyAWi+Hg4MDIqKOjg0uXLnGSURY1kefz58/x+PFjpe0mj5SUFLRvz3b86OrqyvqdlJSEXbt2sWT08vJCeXk5Hjx4AKDCWsrb2xsNGzaErq4uYxFY1dFW5b3AzM3NmTJ8CEuWLMHBgwdx/Phxhfu8VoarvPJIS0tjWb4AkPoNfPg9xaV9qlJYWDE2Va2LZ8+eYdSoUbC3t4e+vj709PSQl5fHlDkxMRGWlpZwcHCQma/Ewlce/fr1Q2pqqkLZ9u7dCyJCgwYNoKGhgTVr1sDX15exOpVYGvXp0weTJ09Gq1atMHPmTPTq1QubNm1SmLeE7du3o0WLFjLbQxmHDh1Chw4dYGZmBh0dHcyZM4fzPQFw60f37NmDxYsXV1s2RZiYmODIkSP4448/oKOjA319fbx58wZt2rRh6rZZs2bYvXs3wsLCoKWlBTMzMzRq1Aj169dnWf0CFf38zZs38fvvvyMzMxNTpkxh4hwdHfH999+jbdu2cHNzw44dO+Dm5iZlCS2xHi4o4O6LgOf9ISLGwj4pKQl5eXkwNjZm9S0PHjxg+pb/a+++46I4/v+Bv452HL0IAoKgiIgFFQ2KxK4Bo4ktsZGAhtjbRxN7QWOwE3uP2FFjEqPRWDEYRERFURCkCbaARqwo/d6/P/yxX5c74FCK4Pv5ePDQ252dnd292Zmdm525du0afvjhB9H64cOHIy0tTXTNit5v3NzcFHr4Vsd6QVxcnMIY5+7u7khMTBTeWnhTXFwcbGxsYGNjIyxr3LgxjIyMhPMxaNAgpKamCvvcs2cPXFxc0KhRIwCv782+vr5wd3fHhQsXEBYWhqZNm6Jnz57CfbskqpTFpYmKikLLli1VfhvmTcnJycjNzRWVSSYmJnB0dBQ+q1KulfbdKFR07NA3y8/SnD59Gl27dkWdOnWgr6+Pr7/+GhkZGSrdj549e4YHDx6IyhB1dXW0atVKIWxJaVQlH5ZnGZ+YmIjBgwejfv36MDAwEHrjvlnGl3TtSyvjx40bV+LEdqqU36qUVRVp3bp1aNWqFczMzKCnp4fNmzeXqYyPioqCurq60rdeCgUHB5ep131Z3bt3DydOnICvr69o+fDhwzFkyBB06NDhreO+e/cuJk6ciD179pRan5bJZFy+M/YeqN6z8lSRV3K5ykMtNNWT4VDLBlDaIksEuTy71DhkahLI5coregUFfCN9X0hkMjheiayyfatq69atyM/Ph5WVlbCMiCCVSrF27VoYGhoKy99sEJWpuA8LCwuF4QIWL16MVatWYeXKlWjWrBl0dXXxv//9T3jtsrS4MzMz0apVK4VXIYHXlcPSmJub47PPPsO2bdtQr149HDt2DCEhIcJ6NTU1hddmlU2QoqmpKfoskUhEr8Kpq6sjMjJS1IAHQPQqV1moEmdpaVf1ur2LzMxMjBw5EhMmTFBYV7duXbx8+RIeHh7w8PDAnj17YGZmhjt37sDDw0Ph1ds3z3Fh48S7zC6/fPlyLF68GKdPn1Y6sYQyZUnvu6rs7xQAmJqaQiKR4MmTJ6LlPj4+yMjIwKpVq2BrawupVAo3NzeV82l5fNfs7e1x9uxZvHz5Es+fP4elpSUGDhyI+vXrAwBq1aoFDQ0NNG7cWLSdk5NTsa97vunly5fYt28ffvjhhzKnLTw8HF5eXpg/fz48PDxgaGiIffv2ISAgQAhTGfnRwsJCoVEjPz8fjx8/LnGSlU8++QTJycl49OgRNDQ0YGRkBAsLC+HcAsCQIUMwZMgQPHjwALq6upBIJPjpp59EYQrTYGFhgUaNGsHExATt27fHnDlzhB9pinJ1dVW4PoWvLKtyD69qamoydOoYXWX7Lg9xcXGoV68egNf3FktLS1E5WKhwAq7MzEzMnz8f/fr1Uwij6g9nhapjvaAiWFhYoEuXLggKCkLbtm0RFBSE0aNHC+uDgoKQmpqK8PBwoXErKCgIxsbGOHToEAYNGlRi/KWVxcDrMqYq6wyqlGvLli0r8btRqKTysySpqano1asXRo8eDX9/f5iYmODcuXPw9fVFbm4udHR03vEoVUujKvnwbdSqVQsA8OTJE1Fe+Oyzz2Bra4stW7bAysoKcrkcTZs2rbQyXtXyW5Wyqmi86urqePDggWj5gwcPSp147E379u3D999/j4CAALi5uUFfXx/Lli0TDZVUWWV80eFLCo9NlePZtm0bTE1N8fnnn4uWnzlzBocPH8by5csB/P+hCOVyaGhoYPPmzSoNoRYZGYmHDx/CxcVFWFZQUIB//vkHa9euRU5OjpCvHz9+XC3Kd8ZqOm7wfUfR7k2go178L446ampKxywkIkReGYBnz64o2YpVRxKJBJJyrCRWhPz8fOzcuRMBAQH45JNPROv69OmDvXv3Kp1tHQAcHBwgk8kQHByMb7/9tkz7DQsLQ+/evfHVV18BeN2Al5CQIFT6SovbxcUF+/fvh7m5OQwMDMq070LffvstBg8eDGtra9jb24t67piZmSE9PV3UAyoqKqpM8bds2RIFBQV4+PCh0plvAUBLS0tpz6B3ibO0tOvr68POzg7BwcHo3Lmz6gf0/zk5OSnMqF7YO6mQi4sLYmNjix0XODo6GhkZGVi8eLHQ++ny5ctlTktZLV26FP7+/jhx4oTCWHYluXnz5jun19HREZcuXYK3t7ew7NKlS2WKQ5Xr7+TkpDBua9HrU5SWlhYaN26M2NhY0X0gLCwM69evx6effgrgdU+OR48eCeudnZ1x7949JCQkKO3l6+zsjODgYMyfP1/lYyyOrq4udHV18eTJE5w4cQJLly4V0v7RRx8hPj5eFD4hIUGl2cgPHDiAnJwc4V5UFufPn4etrS1mzZolLLt9+7YojJmZGWJiYkTLoqKihIf+d7mPFnJzc8PTp08RGRkp9Cg7c+YM5HK5Qk8wZQobA86cOYOHDx8qPBQCEMYODAwMhLa2Nrp3715sfIWNFyWNxRsVFaXQGBwTEwNra2shPe8ziUQCdfX3u3wvyZkzZxAdHY1JkyYBeH3PTk9Ph4aGRrFjbrq4uCA+Pr7U8d4vXLggus9duHABLVu2LDVN73u9wMnJSWHs6bCwMDRs2FChobIw/N27d3H37l2h3IiNjcXTp09FDVxeXl6YOnUqBg8ejFu3bokacV+9egW1Is8NhZ9VacgsrSwGXt+j0tLShM+JiYmiXnjOzs74+eef8fjx4zL38rW3t4empiYiIiKEBuYnT54gISFB6PGoSrlW2nfjXUVGRkIulyMgIEBoWC/LOK2GhoaoXbs2Ll26JPSWLCgowJUrV9CiRQuV41ElHxaW8UXzWEns7e1hYGCA2NhYoazOyMhAfHw8tmzZIpz3oj/ClXbtC8v4ouPCqqqs5bcqZVVhvK1atUJwcDD69OkD4PV3pqw9acPCwtCuXTuMGTNGWFb0bYGi+aegoAAxMTFC/bpZs2aQy+U4e/ZsiePblsTNzQ3+/v54+PAhzM3NAQCnTp2CgYFBqXmAiLBt2zZ4e3sr/NgQHh4uegY5dOgQlixZgvPnz6NOnToqpa1r166Ijhb/+Dls2DA0atQI06ZNE90bY2JiVCoLGGMVi4d0eEc66mpKh1oo/Ctughq5PKtcG3sNDVuVWy8QVnMdOXIET548ga+vL5o2bSr669+/v9JhHQppa2tj2rRpmDp1Knbu3Ink5GRcuHChxG0KOTg44NSpUzh//jzi4uIwcuRI0S/xpcXt5eWFWrVqoXfv3ggNDUVKSgpCQkIwYcIE3Lt3T6Vj9/DwgIGBAX788UeFymqnTp3w33//YenSpUhOTsa6detw7NgxleIt1LBhQ3h5ecHb2xu///47UlJScPHiRSxatAhHjx4F8HqihszMTAQHB+PRo0elvuqkSpyqpH3evHkICAjA6tWrkZiYiCtXrmDNmjUqHdeoUaOQmJiIKVOmID4+HkFBQQqT1kybNg3nz58XJg5LTEzEoUOHhIp23bp1oaWlhTVr1uDWrVs4fPgwFixYoNL+39aSJUswZ84cBAYGws7ODunp6UhPT0dmZmap25ZHesePH4+tW7dix44dSExMxI8//ojr16+XafJMVa7/hAkTcPz4cSxfvhyJiYlYu3Ytjh8/XmrcHh4eCg97Dg4O2LVrF+Li4hAREQEvLy9Rb5WOHTuiQ4cO6N+/P06dOoWUlBQcO3ZM2N+MGTNw6dIljBkzBtevX8fNmzexYcMGodH44MGDwqvLxTlx4gSOHz+OlJQUnDp1Cp07d0ajRo1EeXbKlCnYv38/tmzZgqSkJKxduxZ//vmn6CHN29sbM2bMUIh/69at6NOnD0xNTUs9R0U5ODjgzp072LdvH5KTk7F69WocPHhQFKZLly64fPkydu7cicTERPj5+YkagFW5jxaX9kJOTk7w9PTE8OHDcfHiRYSFhWHcuHEYNGiQ8ObG/fv30ahRI1EvoW3btuHChQtITk7G7t278eWXX2LSpEmi163Xrl2LK1euICEhAevWrcO4ceOwaNEiobfZX3/9hW3btiEmJgapqak4evQoRo0aBXd3d6HBYuXKlTh06BCSkpIQExOD//3vfzhz5gzGjh0rOo7Q0FCFHx7Zu8vJyUF6ejru37+PK1euYOHChejduzd69eolNBp169YNbm5u6NOnD06ePInU1FScP38es2bNEn7cmjt3Lnbu3In58+fjxo0biIuLw759+zB79mzR/g4cOIDAwEAkJCTAz88PFy9eVKmR5X2vF3z33XcIDg7GggULkJCQgB07dmDt2rXFTrbarVs3NGvWDF5eXrhy5QouXrwIb29vdOzYUfSDY79+/fDixQuMHj0anTt3Fr1t1b17dzx58gRjx45FXFwcbty4gWHDhkFDQ0OlH2xLK4uB1/eotWvX4urVq7h8+TJGjRolahgaPHgwLCws0KdPH4SFheHWrVv47bffVJr8Sk9PD76+vpgyZQrOnDmDmJgYDB06VPQqvirlWmnfjXfVoEED5OXlCWX8rl27VB4SqND48eOxaNEiHDp0CPHx8Zg4cSKePHlSpjJelXw4ceJEBAYGYtu2bUIeK23irsIJy94s442NjWFqaorNmzcjKSkJZ86cEQ3FA5R+7f38/LB37174+fkhLi4O0dHRWLJkibD92rVrSx0+TJXyW5WyqmvXrli7dq3wefLkydiyZQt27NiBuLg4jB49Gi9fvixT47SDgwMuX76MEydOICEhAXPmzFH4ob5Lly44evQojh49ips3b2L06NF4+vSpsN7Ozg4+Pj745ptv8Mcffwj3pTd/UCia9qI++eQTNG7cGF9//TWuXbuGEydOYPbs2Rg7dqwwmd3FixfRqFEj3L9/X7TtmTNnkJKSovRHMicnJ9FzX506daCmpoamTZvC2NgYwOsJ8AqH3snNzcX9+/cRFRWFpKQkAK87kRR9ftTV1YWpqSmaNm0q2h+X8Yy9Jyp0hOD3UOHgyNfCzlFOTo7C34sXL0qdtC0zP1+YTC3zjcl2ipLL5ZSf/1LpX07Of8KEHDk5/xUbTtW/NyetYBWvug7W3qtXL4UJngoVTmR07do1hQmpChUUFNCPP/5Itra2pKmpSXXr1hVN6gIlk4EQvZ60o3fv3qSnp0fm5uY0e/Zs8vb2Fk1uUVrcaWlp5O3tTbVq1SKpVEr169en4cOHl2mg8zlz5pC6ujr9+++/Cus2bNhANjY2pKurS97e3uTv768waVtJkzEREeXm5tLcuXPJzs6ONDU1ydLSkvr27UvXr18XwowaNYpMTU0JAPn5+ZWaZlXiLC3tREQbN24kR0dHIY7x48cL6968bkUnbSMi+vPPP6lBgwYklUqpffv2FBgYqPD9uHjxInXv3p309PRIV1eXnJ2dyd/fX1gfFBREdnZ2JJVKyc3NjQ4fPizaj7LvXOHEIykpKaWep6ITqtja2hIAhT9VzvnbpFfZ5Bc//PAD1apVi/T09Oibb76hCRMmUNu2bYX15fWd2rp1K1lbW5NMJqPPPvuMli9fXurkgDdu3CCZTEZPnz4Vll25coVat25N2tra5ODgQAcOHFCYtCMjI4OGDRtGpqampK2tTU2bNqUjR44I60NCQqhdu3YklUrJyMiIPDw8ROeotGrH/v37qX79+qSlpUUWFhY0duxYURrfPOYGDRqQtrY2NW/eXDTBJNHrSUQKJ1EpdPPmTQJAJ0+eVLpvPz8/hXxT1JQpU8jU1JT09PRo4MCBtGLFCoVzPXfuXKpduzYZGhrSpEmTaNy4caJrWtq9Tlnai8rIyKDBgweTnp4eGRgY0LBhw4QJBYn+Lx+/ORnntGnTqHbt2qSpqUkODg4UEBCgUHf4+uuvycTEhLS0tMjZ2Zl27twpWn/mzBlyc3MjQ0ND4Xsybdo0Ub5dsmQJ2dvbk7a2NpmYmFCnTp3ozJkzoniysrLI0NCQwsPDSzxOVjY+Pj7CvU5DQ4PMzMyoW7duFBgYSAUFBaKwz58/p/Hjx5OVlRVpamqSjY0NeXl5iSZ6On78OLVr145kMhkZGBiQq6srbd68WVgPgNatW0fdu3cnqVRKdnZ2tH//ftF+qnO94Ndff6XGjRsL+35zki4ixUmNbt++TZ9//jnp6uqSvr4+ffnll5Senq4Q74ABAwgABQYGKqw7efIkubu7k6GhIRkbG1OXLl3KlE9KK4vv379Pn3zyCenq6pKDgwP99ddfoknbiIhSU1Opf//+ZGBgQDo6OtS6dWthksuSJm0jej1x21dffUU6OjpUu3ZtWrp0qcKkTqWVa6p8N0qbPKs0P/30E1laWpJMJiMPDw/auXNniWV60ePOy8ujcePGkYGBARkbG9O0adPoyy+/pEGDBpUpjarkQ39/f6Eu4ePjQ1OnTi1x0jYior/++ovq1KkjyvenTp0iJycnkkql5OzsTCEhIQr5s6RrT/R6csMWLVqQlpYW1apVi/r16yc6R6WVoUSll9+qlFW2trYK9bk1a9ZQ3bp1SUtLi1xdXenCBfEE7T4+PqKyuKjs7GwaOnQoGRoakpGREY0ePZqmT58uOte5ubk0evRoMjExIXNzc1q0aJHCNc3KyqJJkyaRpaUlaWlpUYMGDUR5XVnai0pNTaUePXqQTCajWrVq0XfffUd5eXnC+sI6aNH68eDBg6ldu3Ylxl1IWb21sO5Q9K+k86bse37+/HkyMjKiV69eqZSW6qa6tgOw90tlTdomISoyEE0N9/z5cxgaGmL69OnCr2TFmTlzJrS0tBSWvywogP0/r19nSHRvoHRIByJC5JVByMyMLTVNnTpGV+tXBT9E2dnZSElJQb169co8lh2rOr6+vvjvv/8UhihgrDJ0794dFhYW2LVrV1UnBQDw5ZdfwsXFpcTepB8SHx8fSCQShR7srPxt2LABBw8exMmTJ6s6KewdSCQSHDx4UHiNujriegErD3K5HE5OThgwYECFv8GkCiJCmzZtMGnSJGGS5g9dx44d0blzZ8ybN6+qk1LjDRw4EM2bN8fMmTOrOikVgtsBWHkobJd89uzZWw9NpQoew7cYNjY2CmPfCN5oI//nnCu0Ufy4daXhoRgYq3jPnj1DdHQ0goKC+KGOVYpXr15h48aN8PDwgLq6Ovbu3YvTp0/j1KlTVZ00wbJly/Dnn39WdTLeC0SEkJAQlSZ+Y+9OU1NT5WFlGKsIXC9g7+L27ds4efIkOnbsiJycHKxduxYpKSkYMmRIVScNwOsfYzZv3qww3uqH6tmzZ0hOThaGDWEVJzc3F82aNRPGjGeMVa0PtsF3QA9PNGnrpnQdEUFdPR9yeZbS9Xl5pY8BWUhPrzFauewrdkwnNTVZmcZ7YowpKpzZWZljx45hzpw5uHjxIkaNGlXi5EOVLTQ0FD169Ch2vSrjzX4omjRpojBBVqFNmzbBy8tL5bgq47xLJBL89ddf8Pf3R3Z2NhwdHfHbb7+99SQeFcHOzg7jx4+v6mS8FyQSSbHfL1b+3nbCOsZUVV3rBSXp0aMHQkNDla6bOXNmje1NV1Z79uzByJEjla6ztbUtdQxcVaipqWH79u34/vvvQURo2rQpTp8+DScnp3eOu7y0aNGiTJPI1WSGhoYqj+3N3o2WlpbCWO+MsarzwQ7pcC3sHJzbuSusfz0Uw4ASJ1TLhhS+kiAAwM22VtDXKr5SyQ26NRO/yvF+KZxMQJk6deqIJp56n2RlZSlMuPCm0mZH/5Dcvn0beXl5StfVrl0b+vr6KsfF550xxmq26lovKMn9+/eRlaW8M4qJiQlMTEwqOUXvpxcvXhQ7yZumpiZsbW0rOUWMsZqE2wFYefighnRYt24dli1bhvT0dDRv3hxr1qyBq6trseEPHDiAOXPmIDU1FQ4ODliyZAk+/fTTMu1TTjkoKHilsLyg4FWJjb1FaWqaQF39vTiNjH2wqmsDnUwmq7Zpr2zl+YDG550xxmq2mniPr1OnTlUnoVrQ19cv04/AjDHGWE1V5S2V+/fvx+TJk7Fx40a0adMGK1euhIeHB+Lj42Fubq4Q/vz58xg8eDAWLVqEXr16ISgoCH369MGVK1fQtGlTlfd757EvMs4qTrb2pvYfRyidTO1VgRwI+/89B7j3LmOMMcYYY4wxxhhj7D1RcotnJfjpp58wfPhwDBs2DI0bN8bGjRuho6ODwMBApeFXrVoFT09PTJkyBU5OTliwYAFcXFywdu3acksTAZAatEWemhGyIVX6xxhjjDHGGGOMMcYYY++bKu3hm5ubi8jISMyYMUNYpqamhm7duiE8PFzpNuHh4Zg8ebJomYeHB/74448y7dvMaB2atumgsJwA9I26ixsvcoDQmDLFyRhjjDHGGGOMMcYYY1WpSht8Hz16hIKCAtSuXVu0vHbt2rh586bSbdLT05WGT09PVxo+JycHOTk5wufnz58DAHo/M4BaWPETOqjC1VAXOmpV3kmaMcYYY4wxxhhjjDHGALwHY/hWtEWLFmH+/Pll3q6pngyHWjYAShiiV0dNDRIew5cxxhhjjDHGGGOMMfaeqNIG31q1akFdXR0PHjwQLX/w4AEsLCyUbmNhYVGm8DNmzBANAfH8+XPY2Njgoos9zGorTgpXiBtzGWOMMcYYY4wxxhhj1U2VjkegpaWFVq1aITg4WFgml8sRHBwMNzc3pdu4ubmJwgPAqVOnig0vlUphYGAg+gMAEz1d6KqrF/vHjb2MARKJRBgfOzU1FRKJBFFRUQCAkJAQSCQSPH36tNz2t337dhgZGQmf582bhxYtWgifhw4dij59+gifO3XqhP/973/ltv+KUNI5VKYizmtlsLOzw8qVK4XPbx53VVMlLRkZGTA3N0dqamqlpImxQo8ePYK5uTnu3btX1Ulh7INRtD7xIShavyha51KmaD2sOqiM+urbUjUtwcHBcHJyQkFBQeUkjLH/b9CgQQgICKjqZDDGykmVD0A7efJkbNmyBTt27EBcXBxGjx6Nly9fYtiwYQAAb29v0aRuEydOxPHjxxEQEICbN29i3rx5uHz5MsaNG1dVh8BYtTJ06FBIJBJIJBJoamqidu3a6N69OwIDAyGXy4vdzsbGBmlpaWjatGklplZs1apV2L59e5Xt/129D+ewsqSlpaFHjx5VnQyV+fv7o3fv3rCzs6vqpIj88ssvaNGiBXR0dGBra4tly5YphMnJycGsWbNga2sLqVQKOzs7BAYGlhjvpUuX0LVrVxgZGcHY2BgeHh64du1aRR1GhTtw4AAaNWoEbW1tNGvWDH/99Vep26xbtw5OTk6QyWRwdHTEzp07Revz8vLwww8/wN7eHtra2mjevDmOHz9ebHyLFy+GRCJR+iNUeHg4unTpAl1dXRgYGKBDhw7IysoC8PptJ29vb/j5+ZXtoBkr4m3L94pU9Ae3wvRJJBLo6urCwcEBQ4cORWRkZJWkrzylp6fj66+/hoWFBXR1deHi4oLffvutqpNVrIEDByIhIaGqk1Hh2rVrh7S0NBgaGlZ1UlQ2depUzJ49G+rq6lWdFJGnT59i7NixsLS0hFQqRcOGDRXK2/v37+Orr76CqakpZDIZmjVrhsuXLxcbZ2EjeNG/4ubned9lZ2dj7NixMDU1hZ6eHvr376/wdnJRyo5fIpGI6nyPHz+Gl5cXDAwMYGRkBF9fX2RmZgrr4+Pj0blzZ9SuXRva2tqoX78+Zs+ejby8PNG+Vq5cCUdHR8hkMtjY2GDSpEnIzs4W1s+ePRv+/v549uxZOZ0RxlhVqvIG34EDB2L58uWYO3cuWrRogaioKBw/flyYmO3OnTtIS0sTwrdr1w5BQUHYvHkzmjdvjl9//RV//PHHB9GAwlh58fT0RFpaGlJTU3Hs2DF07twZEydORK9evZCfn690G3V1dVhYWEBDo+pGgjE0NCy1N8r77H04h5XFwsICUqm0qpOhklevXmHr1q3w9fWt6qSIHDt2DF5eXhg1ahRiYmKwfv16rFixAmvXrhWFGzBgAIKDg7F161bEx8dj7969cHR0LDbezMxMeHp6om7duoiIiMC5c+egr68PDw8PhQeD6uD8+fMYPHgwfH19cfXqVfTp0wd9+vRBTExMsdts2LABM2bMwLx583Djxg3Mnz8fY8eOxZ9//imEmT17NjZt2oQ1a9YgNjYWo0aNQt++fXH16lWF+C5duoRNmzbB2dlZYV14eDg8PT3xySef4OLFi7h06RLGjRsHtTcmfR02bBj27NmDx48fv+PZYB+6tynfK9u2bduQlpaGGzduYN26dcjMzESbNm0UfnSpbry9vREfH4/Dhw8jOjoa/fr1w4ABA5TeM94HMpkM5ubFD29XU2hpacHCwqLavL157tw5JCcno3///lWdFJHc3Fx0794dqamp+PXXXxEfH48tW7agTp06QpgnT57A3d0dmpqaOHbsGGJjYxEQEABjY+NS44+Pj0daWprwV12/m5MmTcKff/6JAwcO4OzZs/j333/Rr1+/Erd587jT0tIQGBgIiUQi+g54eXnhxo0bOHXqFI4cOYJ//vkHI0aMENZramrC29sbJ0+eRHx8PFauXIktW7aIfkwOCgrC9OnT4efnh7i4OGzduhX79+/HzJkzhTBNmzaFvb09du/eXY5nhTFWZegD8+zZMwJAz549q+qksGosKyuLYmNjKSsrS1gml8spNzu/Sv7kcrnKaffx8aHevXsrLA8ODiYAtGXLFmEZADp48CAREaWkpBAAunr1KhER/f333wSAjhw5Qs2aNSOpVEpt2rSh6OholdOybds2srGxIZlMRn369KHly5eToaGhsN7Pz4+aN29ebNo7duxIY8eOpbFjx5KBgQGZmprS7NmzVT4f2dnZ9N1335GVlRXp6OiQq6sr/f3338Xun4hoxYoVZGtrK1q2detWaty4MWlpaZGFhQWNHTtWWFfSOSQiOnr0KDk4OJC2tjZ16tSJtm3bRgDoyZMnQpjQ0FD6+OOPSVtbm6ytrWn8+PGUmZkprN+5cye1atWK9PT0qHbt2jR48GB68OCBsL7wWp0+fZpatWpFMpmM3Nzc6ObNmyqdp6SkJPr888/J3NycdHV1qXXr1nTq1ClRGFtbW1qxYoXS4yYiCgsLo+bNm5NUKqVWrVrRwYMHlX6fSkvjH3/8QS1btiSpVEr16tWjefPmUV5enrA+ISGB2rdvT1KplJycnOjkyZMKaSnqwIEDZGZmJlqWn59P33zzDdnZ2ZG2tjY1bNiQVq5cqbBtSdf+yZMnNGLECDI3NyepVEpNmjShP//8s9h0FDV48GD64osvRMtWr15N1tbWwnf82LFjZGhoSBkZGSrHe+nSJQJAd+7cEZZdv36dAFBiYqLK8Vy8eJG6detGpqamZGBgQB06dKDIyEhhvbLv+5MnTwiAKJ/FxMRQz549SV9fn/T09Ojjjz+mpKQkldMxYMAA6tmzp2hZmzZtaOTIkcVu4+bmRt9//71o2eTJk8nd3V34bGlpSWvXrhWF6devH3l5eYmWvXjxghwcHOjUqVPUsWNHmjhxokJaZs+eXepx1KtXj37++edSw7HKJ5fLKTM/v0r+Kqp8f/LkCfn6+lKtWrVIX1+fOnfuTFFRUaLtSrvfAqD169eTp6cnaWtrU7169ejAgQOiOIref4u7H3t7e5O+vj49fvy4xGPMzMwkfX19hf0cPHiQdHR06Pnz50T0+p7WuXNn0tbWJhMTExo+fDi9ePGi2HOVnZ1N48ePJzMzM5JKpeTu7k4XL14kIqKCggKqU6cOrV+/XrTPK1eukEQiodTUVCIi0tXVpZ07d4rCmJiYiM57Se7cuUNffvklGRoakrGxMX3++eeUkpIirFd2f+nduzf5+PiIjmPq1KlkbW1NWlpaZG9vL9xXCsvZwvrFtm3bRHUuIqJFixaRubk56enp0TfffEPTpk1TqAdt2bKFGjVqRFKplBwdHWndunWi9VOnTiUHBweSyWRUr149mj17NuXm5grrC+tWO3fuJFtbWzIwMKCBAwcK1640x44dI3d3dzI0NCQTExPq2bOnqMworr76Zr1q8+bNZG1tLdQ/AwIClNY/S0pjQUEBLVy4UKgnODs7K3wvVanjFTV27FiFsl+VelhJ157o3cvaDRs2UP369UXXsqhp06bRxx9/rHKcRMqvz9sICAigpk2bko6ODllbW9Po0aNFeb486vSlefr0KWlqaoq+B3FxcQSAwsPDVY6nd+/e1KVLF+FzbGwsAaBLly4Jy44dO0YSiYTu379fbDyTJk0SXY+xY8eK4iVSrPsQEc2fP7/M1/FDoqwdgLGyqqx2yZrfzYyxSpKfK8fmiWerZN8jVnWEpvTdXvvq0qULmjdvjt9//x3ffvutyttNmTIFq1atgoWFBWbOnInPPvsMCQkJ0NTULHG7iIgI+Pr6YtGiRejTpw+OHz/+Vq8079ixA76+vrh48SIuX76MESNGoG7duhg+fHip244bNw6xsbHYt28frKyscPDgQXh6eiI6OhoODg4q7X/Dhg2YPHkyFi9ejB49euDZs2cICwtTadu7d++iX79+GDt2LEaMGIHLly/ju+++E4VJTk6Gp6cnfvzxRwQGBuK///7DuHHjMG7cOGzbtg3A61fPFyxYAEdHRzx8+BCTJ0/G0KFDFV6zmzVrFgICAmBmZoZRo0bhm2++USmtmZmZ+PTTT+Hv7w+pVIqdO3fis88+Q3x8POrWrVvq9s+fP8dnn32GTz/9FEFBQbh9+3axYy+XlMbQ0FB4e3tj9erVaN++PZKTk4XeDX5+fpDL5ejXrx9q166NiIgIPHv2TKUxnkNDQ9GqVSvRMrlcDmtraxw4cACmpqY4f/48RowYAUtLSwwYMABAyddeLpejR48eePHiBXbv3g17e3vExsaKXs+USCTYtm0bhg4dqjRdOTk50NHRES2TyWS4d+8ebt++DTs7Oxw+fBitW7fG0qVLsWvXLujq6uLzzz/HggULIJPJlMbr6OgIU1NTbN26FTNnzkRBQQG2bt0KJyenMg1p8eLFC/j4+GDNmjUgIgQEBODTTz9FYmIi9PX1VYrj/v376NChAzp16oQzZ87AwMAAYWFhQk/EkJAQdO7cGSkpKcWmLTw8XDQ5KwB4eHiUOG5zTk4OtLW1RctkMhkuXryIvLw8aGpqFhvm3LlzomVjx45Fz5490a1bN/z444+idQ8fPkRERAS8vLzQrl07JCcno1GjRvD398fHH38sCuvq6orQ0ND3rqc5A17J5bD/J7pK9p3coRl03/G1bmXl+5dffgmZTIZjx47B0NAQmzZtQteuXZGQkAATE5NS77eF5syZg8WLF2PVqlXYtWsXBg0ahOjoaDg5OZUpjZMmTcLOnTtx6tQp4R6rjK6uLgYNGoRt27bhiy++EJYXftbX18fLly/h4eEBNzc3XLp0CQ8fPsS3336LcePGFTss1NSpU/Hbb79hx44dsLW1xdKlS+Hh4YGkpCSYmJhg8ODBCAoKwujRo4Vt9uzZA3d3d9ja2gJ4/Rbi/v370bNnTxgZGeGXX35BdnY2OnXqVOrx5+XlCWkODQ2FhoYGfvzxR3h6euL69evQ0tJS6Tx6e3sjPDwcq1evRvPmzZGSkoJHjx6ptO0vv/yCefPmYd26dfj444+xa9curF69GvXr1xcd89y5c7F27Vq0bNkSV69exfDhw6GrqwsfHx8AgL6+PrZv3w4rKytER0dj+PDh0NfXx9SpU4V4kpOT8ccff+DIkSN48uQJBgwYgMWLF8Pf37/UdL58+RKTJ0+Gs7MzMjMzMXfuXPTt2xdRUVGiNyeKExYWhlGjRmHJkiX4/PPPcfr0acyZM0chXGlpXLRoEXbv3o2NGzfCwcEB//zzD7766iuYmZmhY8eOKtXxlAkNDcWQIUNEy1Sph5V07cujrD18+DDc3NwwduxYHDp0CGZmZhgyZAimTZsm1G0OHz4MDw8PfPnllzh79izq1KmDMWPGqFQnb9GiBXJyctC0aVPMmzcP7u7upW7zJjU1NaxevRr16tXDrVu3MGbMGEydOhXr169XOY7S6vRDhw5FamoqQkJClG4fGRmJvLw8dOvWTVjWqFEj1K1bF+Hh4Wjbtm2paXjw4AGOHj2KHTt2CMvCw8NhZGSE1q1bC8u6desGNTU1REREoG/fvgrxJCUl4fjx46Lexe3atcPu3btx8eJFuLq64tatW/jrr7/w9ddfi7Z1dXWFv78/cnJyqs3beoyxYlRoc/J7iHv4svKg7Je93Ox8WjsyuEr+crPzVU57cT2AiIgGDhxITk5Owmeo0MN33759QviMjAySyWS0f//+UtMxePBg+vTTTxX2X9Yevk5OTqIeUNOmTRMdQ3Fu375N6urqCr+Md+3alWbMmKF0/0SKvQGsrKxo1qxZxe6npHM4Y8YMaty4sSj8tGnTRD0dfH19acSIEaIwoaGhpKamVuwvy4U9OAt7NrzZe7bQ0aNHCcBb/zrdpEkTWrNmjfC5pB6+GzZsIFNTU9G+tmzZUmwP3+LS2LVrV1q4cKEoHbt27SJLS0siIjpx4gRpaGiIrumxY8dK7eHbu3dv+uabb0o95rFjx1L//v2FzyVd+xMnTpCamhrFx8cXG5+joyP9/vvvxa7ftGkT6ejo0OnTp6mgoIDi4+OpUaNGBIDOnz9PREQeHh4klUqpZ8+eFBERQUePHiVbW1saOnRoiccSHR1N9vb2pKamRmpqauTo6Cj0UntbBQUFpK+vL/RiVqWH74wZM6hevXrF9hiKiIggR0dHunfvXrH71dTUpKCgINGydevWkbm5ebHbzJgxgywsLOjy5cskl8vp0qVLVLt2bQJA//77LxG9vkc1btyYEhISqKCggE6ePEkymYy0tLSEePbu3UtNmzYVvqNFe+CFh4cTADIxMaHAwEC6cuUK/e9//yMtLS1KSEgQpWnSpEnUqVOnYtPMqk5mfj7VPnO1Sv4y88u/fA8NDSUDAwPKzs4WhbG3t6dNmzYRUen3W6LX9/lRo0aJwrRp04ZGjx4tCqNKD9+srCwCQEuWLCn1OCMiIkhdXV3Iqw8ePCANDQ0KCQkhote9N42NjUVvwhw9epTU1NQoPT2diMTnKjMzkzQ1NWnPnj1C+NzcXLKysqKlS5cSEdHVq1dJIpHQ7du3iej/ev1u2LBB2ObJkyf0ySefEADS0NAgAwMDOnHiRKnHQ/T63Do6OorqMzk5OSSTyYQ4SuvhGx8fTwAUen4WKq2Hr5ubG40ZM0a0TZs2bUT1IHt7e4X77YIFC8jNza3YY1u2bBm1atVK+Ozn5yfqjU1ENGXKFGrTpk2xcZTkv//+IwDCG2al9fAdOHCgwlshXl5eCvXPktKYnZ1NOjo6QllcyNfXlwYPHkxEqtXxlDE0NFToKa7Mm/Ww0q59eZS1jo6OJJVK6ZtvvqHLly/Tvn37yMTEhObNmyeEkUqlJJVKacaMGXTlyhXatGkTaWtr0/bt24uN9+bNm7Rx40a6fPkyhYWF0bBhw0hDQ0P0xtDbOHDgAJmamgqfy6NOP336dPr666+LXb9nzx5RHaHQRx99RFOnTlUp3UuWLCFjY2NRndnf358aNmyoENbMzEzhzQM3NzeSSqUEgEaMGEEFBQWi9atWrSJNTU3S0NBQeg8nIrp27RoBeOd6YU3FPXxZeeAevoxVMxpaahixqmOV7bs8EFGZxzhzc3MT/m9iYgJHR0fExcWVul1cXJzCL9Jubm4lToqkTNu2bUVpdnNzQ0BAAAoKCkqc7CI6OhoFBQVo2LChaHlOTg5MTU1V2vfDhw/x77//omvXrmVKc6G4uDi0adNGtOzN8wkA165dw/Xr17Fnzx5hGRFBLpcjJSUFTk5OiIyMxLx583Dt2jU8efJEmJznzp07aNy4sbDdm+OLWlpaCsdQWi/dzMxMzJs3D0ePHkVaWhry8/ORlZWFO3fuqHSc8fHxcHZ2FvWWdHV1VRq2pDReu3YNYWFhoh5ABQUFyM7OxqtXrxAXFwcbGxtYWVkJ64ueT2WysrIUenICryf1CgwMxJ07d5CVlYXc3FxhtvLSrn1UVBSsra0Vvl9vunnzZonpGj58OJKTk9GrVy/k5eXBwMAAEydOxLx584ReTHK5HBKJBHv27BEmpPnpp5/wxRdfYP369Up7+WZlZcHX1xfu7u7Yu3cvCgoKsHz5cvTs2ROXLl0qtmdwUQ8ePMDs2bMREhKChw8foqCgAK9evVL5ewG8Pk/t27cv9o0AV1fXUs/T25gzZw7S09PRtm1bEBFq164NHx8fLF26VDi3q1atwvDhw9GoUSNIJBLY29tj2LBhwoR4d+/excSJE3Hq1Cml3x8AQl4cOXKkMBlty5YtERwcjMDAQCxatEgIK5PJ8OrVq3I/VvbudNTUkNyhWZXtuzy8Wb5fu3YNmZmZCmVdVlYWkpOThTAl3W8L3z4oeo91c3NDVFTUW6UPgEp1EFdXVzRp0gQ7duzA9OnTsXv3btja2qJDhw4AXpetzZs3h66urrCNu7s75HI54uPjhXlCCiUnJyMvL0/Uo1BTUxOurq5CfaZFixZwcnISxsA8e/YsHj58iC+//FLYZs6cOXj69ClOnz6NWrVq4Y8//sCAAQMQGhqKZs1K/v5cu3YNSUlJCm9HZGdnC9ekNFFRUVBXV0fHjm9XF42Li8OoUaNEy9zc3PD3338DeN2zNjk5Gb6+vqIem/n5+aIJ0fbv34/Vq1cjOTkZmZmZyM/Ph4GBgSheOzs70bFaWlri4cOHKqUzMTERc+fORUREBB49eiSq86gyp0t8fLxC/dPV1RVHjhxROY1JSUl49eoVunfvLtomNzcXLVu2BKBaHU8ZZXWS0uphpV378ihr5XI5zM3NsXnzZqirq6NVq1a4f/8+li1bJvT6l8vlaN26NRYuXAjgdXkXExODjRs3Cj3Ai3J0dBTNO1D4NsyKFSuwa9euEtP0ptOnT2PRokW4efMmnj9/jvz8fIX7VUlUqdO/WWZXlMDAQHh5eRVbryjN/v378eLFC1y7dg1TpkzB8uXLhd71ISEhWLhwIdavX482bdogKSkJEydOxIIFC0S93AvrgVwnYaz64wZfxsqJRCJ552EVqlpcXBzq1atX1cmoFJmZmVBXV0dkZKRCw7Cenh6A16+HFT6EFnpzUitVG8beNZ0jR47EhAkTFNbVrVtXeHXVw8MDe/bsgZmZGe7cuQMPDw/k5uaKwr9Z0S98qFZl5vbvv/8ep06dwvLly9GgQQPIZDJ88cUXCvGXh5LSmJmZifnz5yud/OJtK8YAUKtWLTx58kS0bN++ffj+++8REBAANzc36OvrY9myZYiIiABQ+rUvj++GRCLBkiVLsHDhQqSnp8PMzAzBwcEAILxia2lpiTp16ogetp2cnEBEuHfvntKhSYKCgpCamorw8HChcTMoKAjGxsY4dOgQBg0apFL6fHx8kJGRgVWrVsHW1hZSqRRubm7C96Iw7jfzUNFJ4crjPFlYWCjMgP3gwQNYWFgUu41MJkNgYCA2bdqEBw8ewNLSEps3b4a+vj7MzMwAAGZmZvjjjz+QnZ2NjIwMWFlZYfr06cK5j4yMxMOHD+Hi4iLEW1BQgH/++Qdr165FTk6O8KPFmz+8AK+vUdGG8cePHwv7Zu8XiUTyzsMqVLU3y/fMzExYWloqfS25cGLUirrflpQ+ACrXQb799lusW7cO06dPx7Zt2zBs2LAKn5TLy8tLaPANCgqCp6en0GienJyMtWvXIiYmBk2aNAEANG/eHKGhoVi3bh02btxYYtyZmZlo1aqV6MfdQoX3haquk2RmZgIAtmzZotCQWViPCg8Ph5eXF+bPnw8PDw8YGhpi3759CAgIEIUv2vAokUhUqo8AwGeffQZbW1ts2bIFVlZWkMvlaNq0abnXSUpKY+G5OHr0qGjSMgDv/Aq8sjpJafWwyqiTWFpaQlNTU1RndnJyQnp6OnJzc6GlpQVLS0ul5d1vv/1Wpn25uroqDJ9UktTUVPTq1QujR4+Gv78/TExMcO7cOfj6+iI3Nxc6OjqVkn8sLCyQm5uLp0+fiiaZLq1OUig0NBTx8fHYv3+/QrxFfxDJz8/H48ePFeK1sbEB8LreUVBQgBEjRuC7776Duro65syZg6+//loY2qdZs2Z4+fIlRowYgVmzZgn1tsIJZLlOwlj1Vz7dBhhj1d6ZM2cQHR1d5lmBL1y4IPz/yZMnSEhIUGnsPicnJ6HxTFlcqlIWh4ODQ4m9e4HXvQ4KCgrw8OFDNGjQQPRXWHkyMzNDenq6qIL4Zs8lfX192NnZCY1wZeXk5ISLFy8qpP9NLi4uiI2NVUhjgwYNoKWlhZs3byIjIwOLFy9G+/bt0ahRI5V7yagqLCwMQ4cORd++fdGsWTNYWFggNTVV5e0dHR0RHR2NnJwcYdmlS5fKnA4XFxfEx8crPRdqampwcnLC3bt3kZaWJmyjyneqZcuWiI2NFS0LCwtDu3btMGbMGLRs2RINGjQQ9bIq7do7Ozvj3r17SEhIKPNxFqWuro46depAS0sLe/fuhZubm1AJd3d3x7///is8fAJAQkIC1NTUYG1trTS+V69eQU1NTdQ4UvhZ1Qdu4PU5mjBhAj799FM0adIEUqlUNFZkYRrfvB5Fe/45OzsjNDRUoSG4LNzc3BSuw6lTp1TqSaWpqQlra2uoq6tj37596NWrl8IYkNra2qhTpw7y8/Px22+/oXfv3gCArl27Ijo6GlFRUcJf69at4eXlJfS2srOzg5WVFeLj40VxJiQkCON+FoqJiRF6hjFWnoqW7y4uLkhPT4eGhobCvbRWrVpCmJLut4WK3mMvXLhQ5vF7AWDlypUwMDAQjX1Zkq+++gq3b9/G6tWrERsbK+o96OTkhGvXruHly5fCsrCwMKipqYl6Ehayt7eHlpaWaKzOvLw8XLp0SdR4NWTIEMTExCAyMhK//vorvLy8hHWFPeGK3j/U1dVVuq+6uLggMTER5ubmCue78Ac9MzMz0f20oKAAMTExwudmzZpBLpfj7Nm3m0+itHpZ7dq1YWVlhVu3bimksbCh/vz587C1tcWsWbPQunVrODg44Pbt22+VHmUyMjIQHx+P2bNno2vXrnByclJoHC2No6OjQh2krHWSxo0bQyqV4s6dOwrnorDBTZU6njLF1UlKqoeVdu3Lo6x1d3dHUlKS6PuckJAAS0tLYYxpd3d3lcq70kRFRQk/mKoiMjIScrkcAQEBaNu2LRo2bIh///1XFKai6/QA0KpVK2hqaoriiI+Px507d1Sqk2zduhWtWrVC8+bNRcvd3Nzw9OlTREZGCsvOnDkDuVyu8OPLm+RyOfLy8oRrVlj/e1Ph89Kb5yUmJgbW1tZCecAYq8YqdMCI9xCP4cvKQ3Ueu8fHx4c8PT0pLS2N7t27R5GRkeTv7096enrUq1cvyn9jvECoMIZvkyZN6PTp0xQdHU2ff/451a1bl3JyckpNR3h4OKmpqdGyZcsoISGB1qxZQ0ZGRmUew1dPT48mTZpEN2/epKCgINLV1aWNGzeqdC68vLzIzs6OfvvtN7p16xZFRETQwoUL6ciRI0T0elZciURCixcvpqSkJFq7di0ZGxuLxvvavn07aWtr06pVqyghIYEiIyNp9erVKp3D27dvk5aWFn3//fd08+ZN2rNnD1lYWIjGd7t27RrJZDIaO3YsXb16lRISEuiPP/4QZg1++PAhaWlp0ZQpUyg5OZkOHTpEDRs2LHWG6qtXrxIA0Qzgxenbty+1aNGCrl69SlFRUfTZZ5+Rvr6+aCzBksbwffbsGZmYmJC3tzfFxsbS8ePHhbFoC2eFVyWNx48fJw0NDZo3bx7FxMRQbGws7d27VxhvraCggBo3bkzdu3enqKgo+ueff6hVq1aljuF7/fp10tDQEM0Ov2rVKjIwMKDjx49TfHw8zZ49mwwMDETfx9KufadOnahp06Z08uRJunXrFv3111907NgxYX1pY/j+999/tGHDBoqLi6OrV6/ShAkTSFtbmyIiIoQwL168IGtra/riiy/oxo0bdPbsWXJwcKBvv/1WCPP777+To6Oj8DkuLo6kUimNHj2aYmNjKSYmhr766isyNDQUxsRURcuWLal79+4UGxtLFy5coPbt25NMJhN9D9q2bUvt27en2NhYCgkJIVdXV9EYvo8ePSJTU1Pq168fXbp0iRISEmjnzp108+ZNIlJtXMGwsDDS0NCg5cuXU1xcHPn5+ZGmpqYwniOR4rh78fHxtGvXLkpISKCIiAgaOHAgmZiYiPLDhQsX6LfffqPk5GT6559/qEuXLlSvXr0Sx15UNsbmihUryMDAgA4cOECJiYk0e/Zs0tbWFs2O/vLlS5LJZPTPP/+UcMYZK5mq5btcLqePP/6YmjdvTidOnKCUlBQKCwujmTNnCrPAl3a/JXp9n69VqxZt3bqV4uPjae7cuaSmpkY3btwQhSk6hu+2bdsoLS2NUlNT6eTJk9S/f39SV1cXjaGriiFDhpCWlhZ5enqKlr98+ZIsLS2pf//+FB0dTWfOnKH69esLY90Wnqs36xMTJ04kKysrOnbsGN24cYN8fHzI2NhYVC4QEbm7u1Pz5s1JX1+fXr16JSzPzc2lBg0aUPv27SkiIoKSkpJo+fLlJJFI6OjRo6Uey8uXL8nBwYE6depE//zzD926dYv+/vtvGj9+PN29e5eIiDZu3Eg6Ojp05MgRiouLo+HDh5OBgYHouIYOHUo2NjZ08OBBIY7CeRVKG8N33759pK2tTYGBgcL11NfXF5V7W7ZsIZlMRqtWraL4+Hi6fv06BQYGUkBAABERHTp0iDQ0NGjv3r2UlJREq1atIhMTkxLrdkSKY6kWp6CggExNTemrr76ixMRECg4Opo8++kil+mrhcZ87d47U1NQoICCAEhISaOPGjWRqakpGRkZlSuOsWbPI1NSUtm/fTklJSUIdoHC8WlXqeMqsXr1aNOYxkWr1sJKufXmUtXfu3CF9fX0aN24cxcfH05EjR8jc3Jx+/PFHIczFixdJQ0OD/P39KTExkfbs2UM6Ojq0e/duIUzR8njFihX0xx9/UGJiIkVHR9PEiRNJTU1NNKdDaaKioggArVy5kpKTk2nnzp1Up04d0bkujzp9aWP4EhGNGjWK6tatS2fOnKHLly+Tm5ubwhjXyup/z549Ix0dHdG44G/y9PSkli1bUkREBJ07d44cHByE8aKJiHbv3k379++n2NhYSk5Opv3795OVlRV5eXkJYfz8/EhfX5/27t1Lt27dopMnT5K9vT0NGDBAtC8fHx+V5rb4UFXndgD2/qisdklu8GXsLVTnG72Pjw8BECYVMTMzo27dulFgYKDCwP6qVKD//PNPatKkCWlpaZGrqytdu3ZN5bRs3bqVrK2tSSaT0WeffUbLly8vc4PvmDFjaNSoUWRgYEDGxsY0c+ZM0aQnJcnNzaW5c+eSnZ0daWpqkqWlJfXt25euX78uhNmwYQPZ2NiQrq4ueXt7k7+/v8JDycaNG8nR0VGIY/z48cK6ks4hEdGff/5JDRo0IKlUSu3bt6fAwECFh4GLFy9S9+7dSU9Pj3R1dcnZ2Zn8/f2F9UFBQWRnZ0dSqZTc3Nzo8OHD5drgm5KSQp07dyaZTEY2Nja0du1ahYatkhp8iV43yjk7O5OWlha1atWKgoKCCIDwsKFqGo8fP07t2rUjmUxGBgYG5OrqSps3bxbWx8fH08cff0xaWlrUsGFDOn78eKkNvkRErq6uoh8KsrOzaejQoWRoaEhGRkY0evRomj59usIDYEnXPiMjg4YNG0ampqakra1NTZs2FX5MKDxH27ZtKzZN//33H7Vt25Z0dXVJR0eHunbtShcuXFAIFxcXR926dSOZTEbW1tY0efJkUUPEtm3bqOjvuydPniR3d3cyNDQkY2Nj6tKlC4WHh4vClJa+K1euUOvWrUlbW5scHBzowIEDCt+D2NhYcnNzI5lMRi1atKCTJ0+KGnyJXv+o8cknn5COjg7p6+tT+/btKTk5mYj+73tR2vf0l19+oYYNG5KWlhY1adJEoYHFx8eHOnbsKEpXixYthO9R7969he9ioZCQEHJyciKpVEqmpqb09ddfK0zyWJSyBl8iokWLFpG1tTXp6OiQm5sbhYaGitYHBQWJGuUZextlKd+fP39O48ePJysrK9LU1CQbGxvy8vKiO3fuCGFKu98CoHXr1lH37t1JKpWSnZ2dwqStyhp8C/+0tbXJ3t6efHx83mqCpuDgYAJAv/zyi8K669evU+fOnUlbW5tMTExo+PDhwkSmhefqzfpEVlYWjR8/nmrVqkVSqZTc3d3p4sWLCvGuX7+eAJC3t7fCuoSEBOrXrx+Zm5uTjo4OOTs7qzT5VqG0tDTy9vYW0lC/fn0aPny48MySm5tLo0ePJhMTEzI3N6dFixaJJm0rPI5JkyaRpaUlaWlpUYMGDSgwMJCISm/wJXo9OVStWrVIT0+PfHx8aOrUqQrl3p49e6hFixakpaVFxsbG1KFDB1Hj1ZQpU8jU1JT09PRo4MCBtGLFinJr8CUiOnXqlHBvdnZ2ppCQkDI1+BK9ntivTp06JJPJqE+fPvTjjz+ShYVFmdIol8tp5cqVQh3AzMyMPDw86OzZs0IYVep4RWVkZJC2traoTFKlHlbStScqn7L2/Pnz1KZNG+H76e/vL+ooUnjMTZs2JalUSo0aNRLdM4gUy+MlS5aQvb29kFc7depEZ86cEW2jrB5T1E8//USWlpYkk8nIw8ODdu7cqXCu37VOXzTtymRlZdGYMWPI2NiYdHR0qG/fvpSWliYKo6x+tWnTJpLJZPT06VOl8WZkZNDgwYNJT0+PDAwMaNiwYaJ72r59+8jFxUV4VmjcuDEtXLhQ9Kyal5dH8+bNE863jY0NjRkzRnSOsrKyyNDQUKFOyP5PdW4HYO+PymqXlBAVGcymhnv+/DkMDQ3x7NkzhQkEGFNVdnY2UlJSUK9evQoZy46xmm7Pnj0YNmwYnj17ViljIZfm6NGjmDJlCmJiYhRed/sQpaSkoGHDhoiNjVU6DjArX23btsWECRMwZMiQqk4KYyqTSCQ4ePAg+vTpUyX737VrFyZNmoR///1XeKWcsbcxfPhw3Lx5E6GhoVWdFADAlClT8Pz5c2zatKmqk/Je8PPzw9mzZ5WOO87K14YNG3Dw4EGcPHmyqpPy3uJ2AFYeKqtdkidtY4wxVuF27tyJ+vXro06dOrh27RqmTZuGAQMGvBeNvQDQs2dPJCYm4v79+8L4ex+yv/76CyNGjODG3krw6NEj9OvXD4MHD67qpDBWLbx69QppaWlYvHgxRo4cyY29rMyWL1+O7t27Q1dXF8eOHcOOHTuwfv36qk6WYNasWVi/fj3kcjn/CA3g2LFjWLt2bVUn44OgqamJNWvWVHUyGGPlhHv4MvYW+Je90vXo0aPYnhIzZ87EzJkzK3T/oaGh6NGjR7Hr35zk6kPXpEmTYidV2bRpk2himre1dOlSrF+/Hunp6bC0tESfPn3g7+8PHR2dd46bMcZY5auoHr6l1R9yc3Ph7++PDh064NChQ9DT0yvX/VeEhQsXYuHChUrXtW/fHseOHavkFL2f7ty5I5oor6jY2FjUrVv3nfczYMAAhISE4MWLF6hfvz7Gjx+PUaNGvXO8jLGaj9sBWHmorHZJbvBl7C3wjb509+/fR1ZWltJ1JiYmMDExqdD9Z2Vl4f79+8Wub9CgQYXuvzq5fft2sTM3165dG/r6+pWcIsYYYx+qqq4/VITHjx/j8ePHStfJZDLUqVOnklP0fsrPz0dqamqx6+3s7KChwS+oMsaqDrcDsPLAQzowxqq1qn54kclk3KirIltb26pOAmOMMQag6usPFaG6NlRXNg0NDa67McYYY+WEBwVi7B18YB3kGWOMMcYYY4yxDxI//7PqhBt8GXsLmpqaAF5PHMIYY4wxxhhjjLGarfD5v7A9gLH3GQ/pwNhbUFdXh5GRER4+fAgA0NHRgUQiqeJUMcYYY4wxxhhjrDwREV69eoWHDx/CyMgI6urqVZ0kxkrFDb6MvSULCwsAEBp9GWOMMcYYY4wxVjMZGRkJ7QCMve+4wZextySRSGBpaQlzc3Pk5eVVdXIYY4wxxhhjjDFWATQ1NblnL6tWuMGXsXekrq7ON37GGGOMMcYYY4wx9l7gSdsYY4wxxhhjjDHGGGOshuAGX8YYY4wxxhhjjDHGGKshuMGXMcYYY4wxxhhjjDHGaogPbgxfIgIAPH/+vIpTwhhjjDHGGGOMMcYY+1AUtkcWtk9WlA+uwTcjIwMAYGNjU8UpYYwxxhhjjDHGGGOMfWgyMjJgaGhYYfF/cA2+JiYmAIA7d+5U6IlljL3+5crGxgZ3796FgYFBVSeHsRqL8xpjlYfzG2OVh/MbY5WD8xpjlefZs2eoW7eu0D5ZUT64Bl81tdfDFhsaGvKNjLFKYmBgwPmNsUrAeY2xysP5jbHKw/mNscrBeY2xylPYPllh8Vdo7IwxxhhjjDHGGGOMMcYqDTf4MsYYY4wxxhhjjDHGWA3xwTX4SqVS+Pn5QSqVVnVSGKvxOL8xVjk4rzFWeTi/MVZ5OL8xVjk4rzFWeSorv0mIiCp0D4wxxhhjjDHGGGOMMcYqxQfXw5cxxhhjjDHGGGOMMcZqKm7wZYwxxhhjjDHGGGOMsRqCG3wZY4wxxhhjjDHGGGOshuAGX8YYY4wxxhhjjDHGGKshakSD77p162BnZwdtbW20adMGFy9eLDH8gQMH0KhRI2hra6NZs2b466+/ROuJCHPnzoWlpSVkMhm6deuGxMTEijwExqqF8s5rQ4cOhUQiEf15enpW5CEwVm2UJb/duHED/fv3h52dHSQSCVauXPnOcTL2ISnv/DZv3jyF8q1Ro0YVeASMVQ9lyWtbtmxB+/btYWxsDGNjY3Tr1k0hPD+3MVa88s5v/OzGmHJlyWu///47WrduDSMjI+jq6qJFixbYtWuXKEx5lW3VvsF3//79mDx5Mvz8/HDlyhU0b94cHh4eePjwodLw58+fx+DBg+Hr64urV6+iT58+6NOnD2JiYoQwS5cuxerVq7Fx40ZERERAV1cXHh4eyM7OrqzDYuy9UxF5DQA8PT2RlpYm/O3du7cyDoex91pZ89urV69Qv359LF68GBYWFuUSJ2MfiorIbwDQpEkTUfl27ty5ijoExqqFsua1kJAQDB48GH///TfCw8NhY2ODTz75BPfv3xfC8HMbY8pVRH4D+NmNsaLKmtdMTEwwa9YshIeH4/r16xg2bBiGDRuGEydOCGHKrWyjas7V1ZXGjh0rfC4oKCArKytatGiR0vADBgygnj17ipa1adOGRo4cSUREcrmcLCwsaNmyZcL6p0+fklQqpb1791bAETBWPZR3XiMi8vHxod69e1dIehmrzsqa395ka2tLK1asKNc4GavJKiK/+fn5UfPmzcsxlYxVf+9aDuXn55O+vj7t2LGDiPi5jbGSlHd+I+JnN8aUKY9nrJYtW9Ls2bOJqHzLtmrdwzc3NxeRkZHo1q2bsExNTQ3dunVDeHi40m3Cw8NF4QHAw8NDCJ+SkoL09HRRGENDQ7Rp06bYOBmr6SoirxUKCQmBubk5HB0dMXr0aGRkZJT/ATBWjbxNfquKOBmrCSoybyQmJsLKygr169eHl5cX7ty5867JZazaKo+89urVK+Tl5cHExAQAP7cxVpyKyG+F+NmNsf/zrnmNiBAcHIz4+Hh06NABQPmWbdW6wffRo0coKChA7dq1Rctr166N9PR0pdukp6eXGL7w37LEyVhNVxF5DXj9StDOnTsRHByMJUuW4OzZs+jRowcKCgrK/yAYqybeJr9VRZyM1QQVlTfatGmD7du34/jx49iwYQNSUlLQvn17vHjx4l2TzFi1VB55bdq0abCyshIegvm5jTHlKiK/AfzsxlhRb5vXnj17Bj09PWhpaaFnz55Ys2YNunfvDqB8yzaNMoVmjLFyNGjQIOH/zZo1g7OzM+zt7RESEoKuXbtWYcoYY4yxt9ejRw/h/87OzmjTpg1sbW3xyy+/wNfXtwpTxlj1tHjxYuzbtw8hISHQ1tau6uQwVqMVl9/42Y2x8qGvr4+oqChkZmYiODgYkydPRv369dGpU6dy3U+17uFbq1YtqKur48GDB6LlDx48KHYSDQsLixLDF/5bljgZq+kqIq8pU79+fdSqVQtJSUnvnmjGqqm3yW9VESdjNUFl5Q0jIyM0bNiQyzf2wXqXvLZ8+XIsXrwYJ0+ehLOzs7Ccn9sYU64i8psy/OzGPnRvm9fU1NTQoEEDtGjRAt999x2++OILLFq0CED5lm3VusFXS0sLrVq1QnBwsLBMLpcjODgYbm5uSrdxc3MThQeAU6dOCeHr1asHCwsLUZjnz58jIiKi2DgZq+kqIq8pc+/ePWRkZMDS0rJ8Es5YNfQ2+a0q4mSsJqisvJGZmYnk5GQu39gH623z2tKlS7FgwQIcP34crVu3Fq3j5zbGlKuI/KYMP7uxD1151SPlcjlycnIAlHPZVqYp3t5D+/btI6lUStu3b6fY2FgaMWIEGRkZUXp6OhERff311zR9+nQhfFhYGGloaNDy5cspLi6O/Pz8SFNTk6Kjo4UwixcvJiMjIzp06BBdv36devfuTfXq1aOsrKxKPz7G3hflnddevHhB33//PYWHh1NKSgqdPn2aXFxcyMHBgbKzs6vkGBl7X5Q1v+Xk5NDVq1fp6tWrZGlpSd9//z1dvXqVEhMTVY6TsQ9VReS37777jkJCQiglJYXCwsKoW7duVKtWLXr48GGlHx9j74uy5rXFixeTlpYW/frrr5SWlib8vXjxQhSGn9sYU1Te+Y2f3RhTrqx5beHChXTy5ElKTk6m2NhYWr58OWloaNCWLVuEMOVVtlX7Bl8iojVr1lDdunVJS0uLXF1d6cKFC8K6jh07ko+Pjyj8L7/8Qg0bNiQtLS1q0qQJHT16VLReLpfTnDlzqHbt2iSVSqlr164UHx9fGYfC2HutPPPaq1ev6JNPPiEzMzPS1NQkW1tbGj58ODc+Mfb/lSW/paSkEACFv44dO6ocJ2MfsvLObwMHDiRLS0vS0tKiOnXq0MCBAykpKakSj4ix91NZ8pqtra3SvObn5yeE4ec2xopXnvmNn90YK15Z8tqsWbOoQYMGpK2tTcbGxuTm5kb79u0TxVdeZZuEiKhsfYIZY4wxxhhjjDHGGGOMvY+q9Ri+jDHGGGOMMcYYY4wxxv4PN/gyxhhjjDHGGGOMMcZYDcENvowxxhhjjDHGGGOMMVZDcIMvY4wxxhhjjDHGGGOM1RDc4MsYY4wxxhhjjDHGGGM1BDf4MsYYY4wxxhhjjDHGWA3BDb6MMcYYY4wxxhhjjDFWQ3CDL2OMMcYYqzTbt2+HkZFRVSfjrUkkEvzxxx8lhhk6dCj69OlTKelhjDHGGGOsKG7wZYwxxhhjZTJ06FBIJBKFv6SkpKpOGrZv3y6kR01NDdbW1hg2bBgePnxYLvGnpaWhR48eAIDU1FRIJBJERUWJwqxatQrbt28vl/0VZ968ecJxqqurw8bGBiNGjMDjx4/LFA83TjPGGGOM1TwaVZ0AxhhjjDFW/Xh6emLbtm2iZWZmZlWUGjEDAwPEx8dDLpfj2rVrGDZsGP7991+cOHHineO2sLAoNYyhoeE770cVTZo0wenTp1FQUIC4uDh88803ePbsGfbv318p+2eMMcYYY+8n7uHLGGOMMcbKTCqVwsLCQvSnrq6On376Cc2aNYOuri5sbGwwZswYZGZmFhvPtWvX0LlzZ+jr68PAwACtWrXC5cuXhfXnzp1D+/btIZPJYGNjgwkTJuDly5clpk0ikcDCwgJWVlbo0aMHJkyYgNOnTyMrKwtyuRw//PADrK2tIZVK0aJFCxw/flzYNjc3F+PGjYOlpSW0tbVha2uLRYsWieIuHNKhXr16AICWLVtCIpGgU6dOAMS9Zjdv3gwrKyvI5XJRGnv37o1vvvlG+Hzo0CG4uLhAW1sb9evXx/z585Gfn1/icWpoaMDCwgJ16tRBt27d8OWXX+LUqVPC+oKCAvj6+qJevXqQyWRwdHTEqlWrhPXz5s3Djh07cOjQIaG3cEhICADg7t27GDBgAIyMjGBiYoLevXsjNTW1xPQwxhhjjLH3Azf4MsYYY4yxcqOmpobVq1fjxo0b2LFjB86cOYOpU6cWG97LywvW1ta4dOkSIiMjMX36dGhqagIAkpOT4enpif79++P69evYv38/zp07h3HjxpUpTTKZDHK5HPn5+Vi1ahUCAgKwfPlyXL9+HR4eHvj888+RmJgIAFi9ejUOHz6MX375BfHx8dizZw/s7OyUxnvx4kUAwOnTp5GWlobff/9dIcyXX36JjIwM/P3338Kyx48f4/jx4/Dy8gIAhIaGwtvbGxMnTkRsbCw2bdqE7du3w9/fX+VjTE1NxYkTJ6ClpSUsk8vlsLa2xoEDBxAbG4u5c+di5syZ+OWXXwAA33//PQYMGABPT0+kpaUhLS0N7dq1Q15eHjw8PKCvr4/Q0FCEhYVBT08Pnp6eyM3NVTlNjDHGGGOsavCQDowxxhhjrMyOHDkCPT094XOPHj1w4MAB/O9//xOW2dnZ4ccff8SoUaOwfv16pfHcuXMHU6ZMQaNGjQAADg4OwrpFixbBy8tLiNPBwQGrV69Gx44dsWHDBmhra5eazsTERGzcuBGtW7eGvr4+li9fjmnTpmHQoEEAgCVLluDvv//GypUrsW7dOty5cwcODg74+OOPIZFIYGtrW2zchUNYmJqaFjvUg7GxMXr06IGgoCB07doVAPDrr7+iVq1a6Ny5MwBg/vz5mD59Onx8fAAA9evXx4IFCzB16lT4+fkVu//o6Gjo6emhoKAA2dnZAICffvpJWK+pqYn58+cLn+vVq4fw8HD88ssvGDBgAPT09CCTyZCTkyNK/+7duyGXy/Hzzz9DIpEAALZt2wYjIyOEhITgk08+KTZNjDHGGGOs6nGDL2OMMcYYK7POnTtjw4YNwmddXV0Ar3u7Llq0CDdv3sTz58+Rn5+P7OxsvHr1Cjo6OgrxTJ48Gd9++y127dolDEtgb28P4PVwD9evX8eePXuE8EQEuVyOlJQUODk5KU3bs2fPoKenB7lcjuzsbHz88cf4+eef8fz5c/z7779wd3cXhXd3d8e1a9cAvB6OoXv37nB0dISnpyd69er1zg2cXl5eGD58ONavXw+pVIo9e/Zg0KBBUFNTE44zLCxM1KO3sBG3uPMGAI6Ojjh8+DCys7Oxe/duREVFYfz48aIw69atQ2BgIO7cuYOsrCzk5uaiRYsWJab32rVrSEpKgr6+vmh5dnY2kpOT3+IMMMYYY4yxysQNvowxxhhjrMx0dXXRoEED0bLU1FT06tULo0ePhr+/P0xMTHDu3Dn4+voiNzdXacPlvHnzMGTIEBw9ehTHjh2Dn58f9u3bh759+yIzMxMjR47EhAkTFLarW7dusWnT19fHlStXoKamBktLS8hkMgDA8+fPSz0uFxcXpKSk4NixYzh9+jQGDBiAbt264ddffy112+J89tlnICIcPXoUH330EUJDQ7FixQphfWZmJubPn49+/fopbFtSL2YtLS3hGixevBg9e/bE/PnzsWDBAgDAvn378P333yMgIABubm7Q19fHsmXLEBERUWJ6MzMz0apVK1FDe6H3ZWI+xhhjjDFWPG7wZYwxxhhj5SIyMhJyuRwBAQFC79XC8WJL0rBhQzRs2BCTJk3C4MGDsW3bNvTt2xcuLi6IjY1VaFgujZqamtJtDAwMYGVlhbCwMHTs2FFYHhYWBldXV1G4gQMHYuDAgfjiiy/g6emJx48fw8TERBRf4Xi5BQUFJaZHW1sb/fr1w549e5CUlARHR0e4uLgI611cXBAfH1/m4yxq9uzZ6NKlC0aPHi0cZ7t27TBmzBghTNEeulpaWgrpd3Fxwf79+2Fubg4DA4N3ShNjjDHGGKt8PGkbY4wxxhgrFw0aNEBeXh7WrFmDW7duYdeuXdi4cWOx4bOysjBu3DiEhITg9u3bCAsLw6VLl4ShGqZNm4bz589j3LhxiIqKQmJiIg4dOlTmSdveNGXKFCxZsgT79+9HfHw8pk+fjqioKEycOBHA6zFw9+7di5s3byIhIQEHDhyAhYUFjIyMFOIyNzeHTCbD8ePH8eDBAzx79qzY/Xp5eeHo0aMIDAwUJmsrNHfuXOzcuRPz58/HjRs3EBcXh3379mH27NllOjY3Nzc4Oztj4cKFAF6PeXz58mWcOHECCQkJmDNnDi5duiTaxs7ODtevX0d8fDwePXqEvLw8eHl5oVatWujduzdCQ0ORkpKCkJAQTJgwAffu3StTmhhjjDHGWOXjBl/GGGOMMVYumjdvjp9++glLlixB06ZNsWfPHixatKjY8Orq6sjIyIC3tzcaNmyIAQMGoEePHsJEY87Ozjh79iwSEhLQvn17tGzZEnPnzoWVldVbp3HChAmYPHkyvvvuOzRr1gzHjx/H4cOHhcni9PX1sXTpUrRu3RofffQRUlNT8ddffwk9lt+koaGB1atXY9OmTbCyskLv3r2L3W+XLl1gYmKC+Ph4DBkyRLTOw8MDR44cwcmTJ/HRRx+hbdu2WLFiRYkTxhVn0qRJ+Pnnn3H37l2MHDkS/fr1w8CBA9GmTRtkZGSIevsCwPDhw+Ho6IjWrVvDzMwMYWFh0NHRwT///IO6deuiX79+cHJygq+vL7Kzs7nHL2OMMcZYNSAhIqrqRDDGGGOMMcYYY4wxxhh7d9zDlzHGGGOMMcYYY4wxxmoIbvBljDHGGGOMMcYYY4yxGoIbfBljjDHGGGOMMcYYY6yG4AZfxhhjjDHGGGOMMcYYqyG4wZcxxhhjjDHGGGOMMcZqCG7wZYwxxhhjjDHGGGOMsRqCG3wZY4wxxhhjjDHGGGOshuAGX8YYY4wxxhhjjDHGGKshuMGXMcYYY4wxxhhjjDHGaghu8GWMMcYYY4wxxhhjjLEaght8GWOMMcYYY4wxxhhjrIbgBl/GGGOMMcYYY4wxxhirIf4fwEU9OpY/oJwAAAAASUVORK5CYII=",
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
- "source": [
- "# to show all models in same graph\n",
- "plt.figure(figsize=(17, 8))\n",
- "\n",
- "for model_name in models:\n",
- " # to show graphs model by model\n",
- " # plt.figure(figsize=(17, 8))\n",
- " accs = []\n",
- " aucs = []\n",
- " fprs = []\n",
- " tprs = []\n",
- " labels = []\n",
- " for distance_metric in distance_metrics:\n",
- " # for detector_backend in robust_face_detectors:\n",
- " for detector_backend in detectors:\n",
- " for align in alignment:\n",
- " if detector_backend == \"skip\" and align is True:\n",
- " continue\n",
- " acc, auc, fpr, tpr, label = plot_roc(model_name, detector_backend, distance_metric, align)\n",
- " accs.append(acc)\n",
- " aucs.append(auc)\n",
- " fprs.append(fpr)\n",
- " tprs.append(tpr)\n",
- " labels.append(label)\n",
- " # ---------------------------------\n",
- " #sort by auc\n",
- " df = pd.DataFrame({\"acc\": accs, \"auc\": aucs, \"fpr\": fprs, \"tpr\": tprs, \"label\": labels})\n",
- " # df = df.sort_values(by = [\"auc\"], ascending = False).reset_index()\n",
- " df = df.sort_values(by = [\"acc\"], ascending = False).reset_index()\n",
- " \n",
- " for index, instance in df.iterrows():\n",
- " fpr = instance[\"fpr\"]\n",
- " tpr = instance[\"tpr\"]\n",
- " auc = instance[\"auc\"]\n",
- " acc = instance[\"acc\"]\n",
- " label = instance[\"label\"]\n",
- " \n",
- " plt.plot(fpr, tpr, label=label)\n",
- " plt.ylabel(\"True Positive Rate\")\n",
- " plt.xlabel(\"False Positive Rate\")\n",
- " plt.legend(loc=\"lower center\", ncol=2)\n",
- " # normally this should be [0, 1] but that scale makes graphs not legible\n",
- " # plt.xlim([0, 1])\n",
- " plt.xlim([0, 0.3])\n",
- "\n",
- " # to show the best auc value\n",
- " break\n",
- " \n",
- " # to show graphs model by model\n",
- " # plt.show()\n",
- " # print(\"----------------\")\n",
- "\n",
- "# to show all models in same graph\n",
- "plt.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "661c5236",
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.9.16"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/benchmarks/Perform-Experiments.ipynb b/benchmarks/Perform-Experiments.ipynb
deleted file mode 100644
index 977e690..0000000
--- a/benchmarks/Perform-Experiments.ipynb
+++ /dev/null
@@ -1,352 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "id": "8133a99d",
- "metadata": {},
- "source": [
- "# Perform Experiments with DeepFace on LFW dataset"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 11,
- "id": "5aab0cbe",
- "metadata": {},
- "outputs": [],
- "source": [
- "# built-in dependencies\n",
- "import os\n",
- "\n",
- "# 3rd party dependencies\n",
- "import numpy as np\n",
- "import pandas as pd\n",
- "from tqdm import tqdm\n",
- "import matplotlib.pyplot as plt\n",
- "from sklearn.metrics import accuracy_score\n",
- "from sklearn.datasets import fetch_lfw_pairs\n",
- "from deepface import DeepFace"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "id": "64c9ed9a",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "This experiment is done with pip package of deepface with 0.0.90 version\n"
- ]
- }
- ],
- "source": [
- "print(f\"This experiment is done with pip package of deepface with {DeepFace.__version__} version\")"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "feaec973",
- "metadata": {},
- "source": [
- "### Configuration Sets"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "id": "453104b4",
- "metadata": {},
- "outputs": [],
- "source": [
- "# all configuration alternatives for 4 dimensions of arguments\n",
- "alignment = [True, False]\n",
- "models = [\"Facenet512\", \"Facenet\", \"VGG-Face\", \"ArcFace\", \"Dlib\", \"GhostFaceNet\", \"SFace\", \"OpenFace\", \"DeepFace\", \"DeepID\"]\n",
- "detectors = [\"retinaface\", \"mtcnn\", \"fastmtcnn\", \"dlib\", \"yolov8\", \"yunet\", \"centerface\", \"mediapipe\", \"ssd\", \"opencv\", \"skip\"]\n",
- "metrics = [\"euclidean\", \"euclidean_l2\", \"cosine\"]\n",
- "expand_percentage = 0"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "c9aeb57a",
- "metadata": {},
- "source": [
- "### Create Required Folders if necessary"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "id": "671d8a00",
- "metadata": {},
- "outputs": [],
- "source": [
- "target_paths = [\"lfwe\", \"dataset\", \"outputs\", \"outputs/test\", \"results\"]\n",
- "for target_path in target_paths:\n",
- " if not os.path.exists(target_path):\n",
- " os.mkdir(target_path)\n",
- " print(f\"{target_path} is just created\")"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "fc31f03a",
- "metadata": {},
- "source": [
- "### Load LFW Dataset"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "id": "721a7d70",
- "metadata": {},
- "outputs": [],
- "source": [
- "pairs_touch = \"outputs/test_lfwe.txt\"\n",
- "instances = 1000 #pairs.shape[0]"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "id": "010184d8",
- "metadata": {},
- "outputs": [],
- "source": [
- "target_path = \"dataset/test_lfw.npy\"\n",
- "labels_path = \"dataset/test_labels.npy\"\n",
- "\n",
- "if os.path.exists(target_path) != True:\n",
- " fetch_lfw_pairs = fetch_lfw_pairs(subset = 'test', color = True\n",
- " , resize = 2\n",
- " , funneled = False\n",
- " , slice_=None\n",
- " )\n",
- " pairs = fetch_lfw_pairs.pairs\n",
- " labels = fetch_lfw_pairs.target\n",
- " target_names = fetch_lfw_pairs.target_names\n",
- " np.save(target_path, pairs)\n",
- " np.save(labels_path, labels)\n",
- "else:\n",
- " if not os.path.exists(pairs_touch):\n",
- " # loading pairs takes some time. but if we extract these pairs as image, no need to load it anymore\n",
- " pairs = np.load(target_path)\n",
- " labels = np.load(labels_path) "
- ]
- },
- {
- "cell_type": "markdown",
- "id": "005f582e",
- "metadata": {},
- "source": [
- "### Save LFW image pairs into file system"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "id": "5bc23313",
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "100%|██████████| 1000/1000 [00:00<00:00, 190546.25it/s]\n"
- ]
- }
- ],
- "source": [
- "for i in tqdm(range(0, instances)):\n",
- " img1_target = f\"lfwe/test/{i}_1.jpg\"\n",
- " img2_target = f\"lfwe/test/{i}_2.jpg\"\n",
- " \n",
- " if not os.path.exists(img1_target):\n",
- " img1 = pairs[i][0]\n",
- " # plt.imsave(img1_target, img1/255) #works for my mac\n",
- " plt.imsave(img1_target, img1) #works for my debian\n",
- " \n",
- " if not os.path.exists(img2_target):\n",
- " img2 = pairs[i][1]\n",
- " # plt.imsave(img2_target, img2/255) #works for my mac\n",
- " plt.imsave(img2_target, img2) #works for my debian\n",
- " \n",
- "if not os.path.exists(pairs_touch):\n",
- " open(pairs_touch,'a').close()"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "6f8fa8fa",
- "metadata": {},
- "source": [
- "### Perform Experiments\n",
- "\n",
- "This block will save the experiments results in outputs folder"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 8,
- "id": "e7fba936",
- "metadata": {},
- "outputs": [],
- "source": [
- "for model_name in models:\n",
- " for detector_backend in detectors:\n",
- " for distance_metric in metrics:\n",
- " for align in alignment:\n",
- " \n",
- " if detector_backend == \"skip\" and align is True:\n",
- " # Alignment is not possible for a skipped detector configuration\n",
- " continue\n",
- " \n",
- " alignment_text = \"aligned\" if align is True else \"unaligned\"\n",
- " task = f\"{model_name}_{detector_backend}_{distance_metric}_{alignment_text}\"\n",
- " output_file = f\"outputs/test/{task}.csv\"\n",
- " if os.path.exists(output_file):\n",
- " #print(f\"{output_file} is available already\")\n",
- " continue\n",
- " \n",
- " distances = []\n",
- " for i in tqdm(range(0, instances), desc = task):\n",
- " img1_target = f\"lfwe/test/{i}_1.jpg\"\n",
- " img2_target = f\"lfwe/test/{i}_2.jpg\"\n",
- " result = DeepFace.verify(\n",
- " img1_path=img1_target,\n",
- " img2_path=img2_target,\n",
- " model_name=model_name,\n",
- " detector_backend=detector_backend,\n",
- " distance_metric=distance_metric,\n",
- " align=align,\n",
- " enforce_detection=False,\n",
- " expand_percentage=expand_percentage,\n",
- " )\n",
- " distance = result[\"distance\"]\n",
- " distances.append(distance)\n",
- " # -----------------------------------\n",
- " df = pd.DataFrame(list(labels), columns = [\"actuals\"])\n",
- " df[\"distances\"] = distances\n",
- " df.to_csv(output_file, index=False)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "a0b8dafa",
- "metadata": {},
- "source": [
- "### Calculate Results\n",
- "\n",
- "Experiments were responsible for calculating distances. We will calculate the best accuracy scores in this block."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "id": "67376e76",
- "metadata": {},
- "outputs": [],
- "source": [
- "data = [[0 for _ in range(len(models))] for _ in range(len(detectors))]\n",
- "base_df = pd.DataFrame(data, columns=models, index=detectors)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "id": "f2cc536b",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "results/pivot_euclidean_with_alignment_True.csv saved\n",
- "results/pivot_euclidean_l2_with_alignment_True.csv saved\n",
- "results/pivot_cosine_with_alignment_True.csv saved\n",
- "results/pivot_euclidean_with_alignment_False.csv saved\n",
- "results/pivot_euclidean_l2_with_alignment_False.csv saved\n",
- "results/pivot_cosine_with_alignment_False.csv saved\n"
- ]
- }
- ],
- "source": [
- "for is_aligned in alignment:\n",
- " for distance_metric in metrics:\n",
- "\n",
- " current_df = base_df.copy()\n",
- " \n",
- " target_file = f\"results/pivot_{distance_metric}_with_alignment_{is_aligned}.csv\"\n",
- " if os.path.exists(target_file):\n",
- " continue\n",
- " \n",
- " for model_name in models:\n",
- " for detector_backend in detectors:\n",
- "\n",
- " align = \"aligned\" if is_aligned is True else \"unaligned\"\n",
- "\n",
- " if detector_backend == \"skip\" and is_aligned is True:\n",
- " # Alignment is not possible for a skipped detector configuration\n",
- " align = \"unaligned\"\n",
- "\n",
- " source_file = f\"outputs/test/{model_name}_{detector_backend}_{distance_metric}_{align}.csv\"\n",
- " df = pd.read_csv(source_file)\n",
- " \n",
- " positive_mean = df[(df[\"actuals\"] == True) | (df[\"actuals\"] == 1)][\"distances\"].mean()\n",
- " negative_mean = df[(df[\"actuals\"] == False) | (df[\"actuals\"] == 0)][\"distances\"].mean()\n",
- "\n",
- " distances = sorted(df[\"distances\"].values.tolist())\n",
- "\n",
- " items = []\n",
- " for i, distance in enumerate(distances):\n",
- " if distance >= positive_mean and distance <= negative_mean:\n",
- " sandbox_df = df.copy()\n",
- " sandbox_df[\"predictions\"] = False\n",
- " idx = sandbox_df[sandbox_df[\"distances\"] < distance].index\n",
- " sandbox_df.loc[idx, \"predictions\"] = True\n",
- "\n",
- " actuals = sandbox_df.actuals.values.tolist()\n",
- " predictions = sandbox_df.predictions.values.tolist()\n",
- " accuracy = 100*accuracy_score(actuals, predictions)\n",
- " items.append((distance, accuracy))\n",
- "\n",
- " pivot_df = pd.DataFrame(items, columns = [\"distance\", \"accuracy\"])\n",
- " pivot_df = pivot_df.sort_values(by = [\"accuracy\"], ascending = False)\n",
- " threshold = pivot_df.iloc[0][\"distance\"]\n",
- " # print(f\"threshold for {model_name}/{detector_backend} is {threshold}\")\n",
- " accuracy = pivot_df.iloc[0][\"accuracy\"]\n",
- "\n",
- " # print(source_file, round(accuracy, 1))\n",
- " current_df.at[detector_backend, model_name] = round(accuracy, 1)\n",
- " \n",
- " current_df.to_csv(target_file)\n",
- " print(f\"{target_file} saved\")"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.9.16"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/benchmarks/README.md b/benchmarks/README.md
deleted file mode 100644
index 1370c8e..0000000
--- a/benchmarks/README.md
+++ /dev/null
@@ -1,134 +0,0 @@
-# Benchmarks
-
-[`🎥 Video Tutorial`](https://youtu.be/eKOZawGR3y0)
-
-DeepFace offers various configurations that significantly impact accuracy, including the facial recognition model, face detector model, distance metric, and alignment mode. Our experiments conducted on the [LFW dataset](https://sefiks.com/2020/08/27/labeled-faces-in-the-wild-for-face-recognition/) using different combinations of these configurations yield the following results.
-
-You can reproduce the results by executing the `Perform-Experiments.ipynb` and `Evaluate-Results.ipynb` notebooks, respectively.
-
-## ROC Curves
-
-ROC curves provide a valuable means of evaluating the performance of different models on a broader scale. The following illusration shows ROC curves for different facial recognition models alongside their optimal configurations yielding the highest accuracy scores.
-
-
-
-In summary, FaceNet-512d surpasses human-level accuracy, while FaceNet-128d reaches it, with Dlib, VGG-Face, and ArcFace closely trailing but slightly below, and GhostFaceNet and SFace making notable contributions despite not leading, while OpenFace, DeepFace, and DeepId exhibit lower performance.
-
-## Accuracy Scores
-
-Please note that humans achieve a 97.5% accuracy score on the same dataset. Configurations that outperform this benchmark are highlighted in bold.
-
-## Performance Matrix for euclidean while alignment is True
-
-| | Facenet512 |Facenet |VGG-Face |ArcFace |Dlib |GhostFaceNet |SFace |OpenFace |DeepFace |DeepID |
-| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
-| retinaface |95.9 |93.5 |95.8 |85.2 |88.9 |85.9 |80.2 |69.4 |67.0 |65.6 |
-| mtcnn |95.2 |93.8 |95.9 |83.7 |89.4 |83.0 |77.4 |70.2 |66.5 |63.3 |
-| fastmtcnn |96.0 |93.4 |95.8 |83.5 |91.1 |82.8 |77.7 |69.4 |66.7 |64.0 |
-| dlib |96.0 |90.8 |94.5 |88.6 |96.8 |65.7 |66.3 |75.8 |63.4 |60.4 |
-| yolov8 |94.4 |91.9 |95.0 |84.1 |89.2 |77.6 |73.4 |68.7 |69.0 |66.5 |
-| yunet |97.3 |96.1 |96.0 |84.9 |92.2 |84.0 |79.4 |70.9 |65.8 |65.2 |
-| centerface |**97.6** |95.8 |95.7 |83.6 |90.4 |82.8 |77.4 |68.9 |65.5 |62.8 |
-| mediapipe |95.1 |88.6 |92.9 |73.2 |93.1 |63.2 |72.5 |78.7 |61.8 |62.2 |
-| ssd |88.9 |85.6 |87.0 |75.8 |83.1 |79.1 |76.9 |66.8 |63.4 |62.5 |
-| opencv |88.2 |84.2 |87.3 |73.0 |84.4 |83.8 |81.1 |66.4 |65.5 |59.6 |
-| skip |92.0 |64.1 |90.6 |56.6 |69.0 |75.1 |81.4 |57.4 |60.8 |60.7 |
-
-## Performance Matrix for euclidean while alignment is False
-
-| | Facenet512 |Facenet |VGG-Face |ArcFace |Dlib |GhostFaceNet |SFace |OpenFace |DeepFace |DeepID |
-| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
-| retinaface |96.1 |92.8 |95.7 |84.1 |88.3 |83.2 |78.6 |70.8 |67.4 |64.3 |
-| mtcnn |95.9 |92.5 |95.5 |81.8 |89.3 |83.2 |76.3 |70.9 |65.9 |63.2 |
-| fastmtcnn |96.3 |93.0 |96.0 |82.2 |90.0 |82.7 |76.8 |71.2 |66.5 |64.3 |
-| dlib |96.0 |89.0 |94.1 |82.6 |96.3 |65.6 |73.1 |75.9 |61.8 |61.9 |
-| yolov8 |94.8 |90.8 |95.2 |83.2 |88.4 |77.6 |71.6 |68.9 |68.2 |66.3 |
-| yunet |**97.9** |96.5 |96.3 |84.1 |91.4 |82.7 |78.2 |71.7 |65.5 |65.2 |
-| centerface |97.4 |95.4 |95.8 |83.2 |90.3 |82.0 |76.5 |69.9 |65.7 |62.9 |
-| mediapipe |94.9 |87.1 |93.1 |71.1 |91.9 |61.9 |73.2 |77.6 |61.7 |62.4 |
-| ssd |97.2 |94.9 |96.7 |83.9 |88.6 |84.9 |82.0 |69.9 |66.7 |64.0 |
-| opencv |94.1 |90.2 |95.8 |89.8 |91.2 |91.0 |86.9 |71.1 |68.4 |61.1 |
-| skip |92.0 |64.1 |90.6 |56.6 |69.0 |75.1 |81.4 |57.4 |60.8 |60.7 |
-
-## Performance Matrix for euclidean_l2 while alignment is True
-
-| | Facenet512 |Facenet |VGG-Face |ArcFace |Dlib |GhostFaceNet |SFace |OpenFace |DeepFace |DeepID |
-| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
-| retinaface |**98.4** |96.4 |95.8 |96.6 |89.1 |90.5 |92.4 |69.4 |67.7 |64.4 |
-| mtcnn |**97.6** |96.8 |95.9 |96.0 |90.0 |89.8 |90.5 |70.2 |66.4 |64.0 |
-| fastmtcnn |**98.1** |97.2 |95.8 |96.4 |91.0 |89.5 |90.0 |69.4 |67.4 |64.1 |
-| dlib |97.0 |92.6 |94.5 |95.1 |96.4 |63.3 |69.8 |75.8 |66.5 |59.5 |
-| yolov8 |97.3 |95.7 |95.0 |95.5 |88.8 |88.9 |91.9 |68.7 |67.5 |66.0 |
-| yunet |**97.9** |97.4 |96.0 |96.7 |91.6 |89.1 |91.0 |70.9 |66.5 |63.6 |
-| centerface |**97.7** |96.8 |95.7 |96.5 |90.9 |87.5 |89.3 |68.9 |67.8 |64.0 |
-| mediapipe |96.1 |90.6 |92.9 |90.3 |92.6 |64.4 |75.4 |78.7 |64.7 |63.0 |
-| ssd |88.7 |87.5 |87.0 |86.2 |83.3 |82.2 |84.6 |66.8 |64.1 |62.6 |
-| opencv |87.6 |84.8 |87.3 |84.6 |84.0 |85.0 |83.6 |66.4 |63.8 |60.9 |
-| skip |91.4 |67.6 |90.6 |57.2 |69.3 |78.4 |83.4 |57.4 |62.6 |61.6 |
-
-## Performance Matrix for euclidean_l2 while alignment is False
-
-| | Facenet512 |Facenet |VGG-Face |ArcFace |Dlib |GhostFaceNet |SFace |OpenFace |DeepFace |DeepID |
-| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
-| retinaface |**98.0** |95.9 |95.7 |95.7 |88.4 |89.5 |90.6 |70.8 |67.7 |64.6 |
-| mtcnn |**97.8** |96.2 |95.5 |95.9 |89.2 |88.0 |91.1 |70.9 |67.0 |64.0 |
-| fastmtcnn |**97.7** |96.6 |96.0 |95.9 |89.6 |87.8 |89.7 |71.2 |67.8 |64.2 |
-| dlib |96.5 |89.9 |94.1 |93.8 |95.6 |63.0 |75.0 |75.9 |62.6 |61.8 |
-| yolov8 |**97.7** |95.8 |95.2 |95.0 |88.1 |88.7 |89.8 |68.9 |68.9 |65.3 |
-| yunet |**98.3** |96.8 |96.3 |96.1 |91.7 |88.0 |90.5 |71.7 |67.6 |63.2 |
-| centerface |97.4 |96.3 |95.8 |95.8 |90.2 |86.8 |89.3 |69.9 |68.4 |63.1 |
-| mediapipe |96.3 |90.0 |93.1 |89.3 |91.8 |65.6 |74.6 |77.6 |64.9 |61.6 |
-| ssd |**97.9** |97.0 |96.7 |96.6 |89.4 |91.5 |93.0 |69.9 |68.7 |64.9 |
-| opencv |96.2 |92.9 |95.8 |93.2 |91.5 |93.3 |91.7 |71.1 |68.3 |61.6 |
-| skip |91.4 |67.6 |90.6 |57.2 |69.3 |78.4 |83.4 |57.4 |62.6 |61.6 |
-
-## Performance Matrix for cosine while alignment is True
-
-| | Facenet512 |Facenet |VGG-Face |ArcFace |Dlib |GhostFaceNet |SFace |OpenFace |DeepFace |DeepID |
-| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
-| retinaface |**98.4** |96.4 |95.8 |96.6 |89.1 |90.5 |92.4 |69.4 |67.7 |64.4 |
-| mtcnn |**97.6** |96.8 |95.9 |96.0 |90.0 |89.8 |90.5 |70.2 |66.3 |63.0 |
-| fastmtcnn |**98.1** |97.2 |95.8 |96.4 |91.0 |89.5 |90.0 |69.4 |67.4 |63.6 |
-| dlib |97.0 |92.6 |94.5 |95.1 |96.4 |63.3 |69.8 |75.8 |66.5 |58.7 |
-| yolov8 |97.3 |95.7 |95.0 |95.5 |88.8 |88.9 |91.9 |68.7 |67.5 |65.9 |
-| yunet |**97.9** |97.4 |96.0 |96.7 |91.6 |89.1 |91.0 |70.9 |66.5 |63.5 |
-| centerface |**97.7** |96.8 |95.7 |96.5 |90.9 |87.5 |89.3 |68.9 |67.8 |63.6 |
-| mediapipe |96.1 |90.6 |92.9 |90.3 |92.6 |64.3 |75.4 |78.7 |64.8 |63.0 |
-| ssd |88.7 |87.5 |87.0 |86.2 |83.3 |82.2 |84.5 |66.8 |63.8 |62.6 |
-| opencv |87.6 |84.9 |87.2 |84.6 |84.0 |85.0 |83.6 |66.2 |63.7 |60.1 |
-| skip |91.4 |67.6 |90.6 |54.8 |69.3 |78.4 |83.4 |57.4 |62.6 |61.1 |
-
-## Performance Matrix for cosine while alignment is False
-
-| | Facenet512 |Facenet |VGG-Face |ArcFace |Dlib |GhostFaceNet |SFace |OpenFace |DeepFace |DeepID |
-| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
-| retinaface |**98.0** |95.9 |95.7 |95.7 |88.4 |89.5 |90.6 |70.8 |67.7 |63.7 |
-| mtcnn |**97.8** |96.2 |95.5 |95.9 |89.2 |88.0 |91.1 |70.9 |67.0 |64.0 |
-| fastmtcnn |**97.7** |96.6 |96.0 |95.9 |89.6 |87.8 |89.7 |71.2 |67.8 |62.7 |
-| dlib |96.5 |89.9 |94.1 |93.8 |95.6 |63.0 |75.0 |75.9 |62.6 |61.7 |
-| yolov8 |**97.7** |95.8 |95.2 |95.0 |88.1 |88.7 |89.8 |68.9 |68.9 |65.3 |
-| yunet |**98.3** |96.8 |96.3 |96.1 |91.7 |88.0 |90.5 |71.7 |67.6 |63.2 |
-| centerface |97.4 |96.3 |95.8 |95.8 |90.2 |86.8 |89.3 |69.9 |68.4 |62.6 |
-| mediapipe |96.3 |90.0 |93.1 |89.3 |91.8 |64.8 |74.6 |77.6 |64.9 |61.6 |
-| ssd |**97.9** |97.0 |96.7 |96.6 |89.4 |91.5 |93.0 |69.9 |68.7 |63.8 |
-| opencv |96.2 |92.9 |95.8 |93.2 |91.5 |93.3 |91.7 |71.1 |68.1 |61.1 |
-| skip |91.4 |67.6 |90.6 |54.8 |69.3 |78.4 |83.4 |57.4 |62.6 |61.1 |
-
-# Citation
-
-Please cite deepface in your publications if it helps your research - see [`CITATIONS`](https://github.com/serengil/deepface/blob/master/CITATION.md) for more details. Here is its BibTex entry:
-
-```BibTeX
-@article{serengil2024lightface,
- title = {A Benchmark of Facial Recognition Pipelines and Co-Usability Performances of Modules},
- author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
- journal = {Bilisim Teknolojileri Dergisi},
- volume = {17},
- number = {2},
- pages = {95-107},
- year = {2024},
- doi = {10.17671/gazibtd.1399077},
- url = {https://dergipark.org.tr/en/pub/gazibtd/issue/84331/1399077},
- publisher = {Gazi University}
-}
-```
diff --git a/comfyui_client.py b/comfyui_client.py
new file mode 100644
index 0000000..4726ecc
--- /dev/null
+++ b/comfyui_client.py
@@ -0,0 +1,139 @@
+import requests
+import uuid
+import json
+import os
+import time
+from PIL import Image
+from io import BytesIO
+
+
+class ComfyUIClient:
+ def __init__(self, server_url="http://127.0.0.1:8188"):
+ """初始化ComfyUI客户端"""
+ self.server_url = server_url.rstrip("/")
+ self.client_id = str(uuid.uuid4())
+ self.output_images = []
+
+ def upload_image(self, image_path):
+ """上传图片到ComfyUI服务器"""
+ if not os.path.exists(image_path):
+ raise FileNotFoundError(f"图片文件不存在: {image_path}")
+
+ # 读取图片文件
+ with open(image_path, "rb") as file:
+ image_data = file.read()
+
+ filename = os.path.basename(image_path)
+ files = {
+ "image": (filename, image_data, "image/png")
+ }
+ data = {
+ "overwrite": "true"
+ }
+
+ url = f"{self.server_url}/upload/image"
+ response = requests.post(url, files=files, data=data)
+
+ if response.status_code != 200:
+ raise Exception(f"上传图片失败: {response.text}")
+
+ print(f"成功上传图片: {filename}")
+ return filename
+
+ def submit_prompt(self, workflow, input_image_path):
+ """提交绘图任务"""
+ # 上传图片并修改工作流
+ image_filename = self.upload_image(input_image_path)
+
+ # 修改工作流中的图片输入
+ if "10" in workflow:
+ workflow["10"]["inputs"]["image"] = image_filename
+
+ # 构建请求数据
+ data = {
+ "client_id": self.client_id,
+ "prompt": workflow
+ }
+
+ # 提交任务
+ url = f"{self.server_url}/prompt"
+ response = requests.post(url, json=data)
+
+ if response.status_code != 200:
+ raise Exception(f"提交任务失败: {response.text}")
+
+ result = response.json()
+ prompt_id = result.get("prompt_id")
+ print(f"任务提交成功,任务ID: {prompt_id}")
+ return prompt_id
+
+ def wait_for_completion(self, prompt_id, timeout=300, interval=2):
+ """通过 /history 轮询等待任务完成"""
+ history_url = f"{self.server_url}/history/{prompt_id}"
+ start_time = time.time()
+
+ while time.time() - start_time < timeout:
+ try:
+ response = requests.get(history_url)
+ if response.status_code == 200:
+ history = response.json().get(prompt_id)
+ if history and history.get("status", {}).get("completed", False):
+ outputs = history.get("outputs", {})
+ for node_id, output in outputs.items():
+ if "images" in output:
+ self.output_images = output["images"]
+ print(
+ f"任务完成,生成了 {len(self.output_images)} 张图片")
+ return self.output_images
+ except Exception as e:
+ print(f"轮询错误: {e}")
+
+ time.sleep(interval)
+
+ print("任务超时或未生成图片")
+ return None
+
+ def get_image(self, filename, save_path=None):
+ """获取并可选保存生成的图片"""
+ print(f"获取图片名称: {filename}")
+ url = f"{self.server_url}/view"
+ params = {
+ "filename": filename,
+ "type": "temp" # 这里可以根据需要修改为 "output" 或 "temp"
+ }
+ response = requests.get(url, params=params)
+
+ if response.status_code != 200:
+ raise Exception(f"获取图片失败: {response.status_code}")
+
+ image = Image.open(BytesIO(response.content))
+
+ if save_path:
+ os.makedirs(os.path.dirname(
+ os.path.abspath(save_path)), exist_ok=True)
+ image.save(save_path)
+ print(f"图片已保存到: {save_path}")
+
+ return image
+
+
+# 使用示例
+if __name__ == "__main__":
+ client = ComfyUIClient(server_url="https://image.ai.faceta.cn")
+
+ input_image_path = "/Users/wandou/Downloads/aa.png"
+
+ with open('FaceImageArtView.json', 'r', encoding='utf-8') as f:
+ workflow = json.load(f)
+
+ # 提交任务
+ prompt_id = client.submit_prompt(workflow, input_image_path)
+
+ # 等待任务完成
+ output_images = client.wait_for_completion(prompt_id)
+
+ # 如果有生成的图片,获取并保存
+ if output_images:
+ for i, img_file in enumerate(output_images):
+ save_path = f"output_{i}.png"
+ client.get_image(img_file['filename'], save_path)
diff --git a/deepface/.DS_Store b/deepface/.DS_Store
deleted file mode 100644
index 532d227..0000000
Binary files a/deepface/.DS_Store and /dev/null differ
diff --git a/deepface/DeepFace.py b/deepface/DeepFace.py
deleted file mode 100644
index 5848d7b..0000000
--- a/deepface/DeepFace.py
+++ /dev/null
@@ -1,615 +0,0 @@
-# common dependencies
-import os
-import warnings
-import logging
-from typing import Any, Dict, List, Union, Optional
-
-# this has to be set before importing tensorflow
-os.environ["TF_USE_LEGACY_KERAS"] = "1"
-
-# pylint: disable=wrong-import-position
-
-# 3rd party dependencies
-import numpy as np
-import pandas as pd
-import tensorflow as tf
-
-# package dependencies
-from deepface.commons import package_utils, folder_utils
-from deepface.commons.logger import Logger
-from deepface.modules import (
- modeling,
- representation,
- verification,
- recognition,
- demography,
- detection,
- streaming,
- preprocessing,
-)
-from deepface import __version__
-
-logger = Logger()
-
-# -----------------------------------
-# configurations for dependencies
-
-# users should install tf_keras package if they are using tf 2.16 or later versions
-package_utils.validate_for_keras3()
-
-warnings.filterwarnings("ignore")
-os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
-tf_version = package_utils.get_tf_major_version()
-if tf_version == 2:
- tf.get_logger().setLevel(logging.ERROR)
-# -----------------------------------
-
-# create required folders if necessary to store model weights
-folder_utils.initialize_folder()
-
-
-def build_model(model_name: str, task: str = "facial_recognition") -> Any:
- """
- This function builds a pre-trained model
- Args:
- model_name (str): model identifier
- - VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib,
- ArcFace, SFace, GhostFaceNet for face recognition
- - Age, Gender, Emotion, Race for facial attributes
- - opencv, mtcnn, ssd, dlib, retinaface, mediapipe, yolov8, yunet,
- fastmtcnn or centerface for face detectors
- - Fasnet for spoofing
- task (str): facial_recognition, facial_attribute, face_detector, spoofing
- default is facial_recognition
- Returns:
- built_model
- """
- return modeling.build_model(task=task, model_name=model_name)
-
-
-def verify(
- img1_path: Union[str, np.ndarray, List[float]],
- img2_path: Union[str, np.ndarray, List[float]],
- model_name: str = "VGG-Face",
- detector_backend: str = "opencv",
- distance_metric: str = "cosine",
- enforce_detection: bool = True,
- align: bool = True,
- expand_percentage: int = 0,
- normalization: str = "base",
- silent: bool = False,
- threshold: Optional[float] = None,
- anti_spoofing: bool = False,
-) -> Dict[str, Any]:
- """
- Verify if an image pair represents the same person or different persons.
- Args:
- img1_path (str or np.ndarray or List[float]): Path to the first image.
- Accepts exact image path as a string, numpy array (BGR), base64 encoded images
- or pre-calculated embeddings.
-
- img2_path (str or np.ndarray or List[float]): Path to the second image.
- Accepts exact image path as a string, numpy array (BGR), base64 encoded images
- or pre-calculated embeddings.
-
- model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
- OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
-
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
- (default is opencv).
-
- distance_metric (string): Metric for measuring similarity. Options: 'cosine',
- 'euclidean', 'euclidean_l2' (default is cosine).
-
- enforce_detection (boolean): If no face is detected in an image, raise an exception.
- Set to False to avoid the exception for low-resolution images (default is True).
-
- align (bool): Flag to enable face alignment (default is True).
-
- expand_percentage (int): expand detected facial area with a percentage (default is 0).
-
- normalization (string): Normalize the input image before feeding it to the model.
- Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace (default is base)
-
- silent (boolean): Suppress or allow some log messages for a quieter analysis process
- (default is False).
-
- threshold (float): Specify a threshold to determine whether a pair represents the same
- person or different individuals. This threshold is used for comparing distances.
- If left unset, default pre-tuned threshold values will be applied based on the specified
- model name and distance metric (default is None).
-
- anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
-
- Returns:
- result (dict): A dictionary containing verification results with following keys.
-
- - 'verified' (bool): Indicates whether the images represent the same person (True)
- or different persons (False).
-
- - 'distance' (float): The distance measure between the face vectors.
- A lower distance indicates higher similarity.
-
- - 'threshold' (float): The maximum threshold used for verification.
- If the distance is below this threshold, the images are considered a match.
-
- - 'model' (str): The chosen face recognition model.
-
- - 'distance_metric' (str): The chosen similarity metric for measuring distances.
-
- - 'facial_areas' (dict): Rectangular regions of interest for faces in both images.
- - 'img1': {'x': int, 'y': int, 'w': int, 'h': int}
- Region of interest for the first image.
- - 'img2': {'x': int, 'y': int, 'w': int, 'h': int}
- Region of interest for the second image.
-
- - 'time' (float): Time taken for the verification process in seconds.
- """
-
- return verification.verify(
- img1_path=img1_path,
- img2_path=img2_path,
- model_name=model_name,
- detector_backend=detector_backend,
- distance_metric=distance_metric,
- enforce_detection=enforce_detection,
- align=align,
- expand_percentage=expand_percentage,
- normalization=normalization,
- silent=silent,
- threshold=threshold,
- anti_spoofing=anti_spoofing,
- )
-
-
-def analyze(
- img_path: Union[str, np.ndarray],
- actions: Union[tuple, list] = ("emotion", "age", "gender", "race"),
- enforce_detection: bool = True,
- detector_backend: str = "opencv",
- align: bool = True,
- expand_percentage: int = 0,
- silent: bool = False,
- anti_spoofing: bool = False,
-) -> List[Dict[str, Any]]:
- """
- Analyze facial attributes such as age, gender, emotion, and race in the provided image.
- Args:
- img_path (str or np.ndarray): The exact path to the image, a numpy array in BGR format,
- or a base64 encoded image. If the source image contains multiple faces, the result will
- include information for each detected face.
-
- actions (tuple): Attributes to analyze. The default is ('age', 'gender', 'emotion', 'race').
- You can exclude some of these attributes from the analysis if needed.
-
- enforce_detection (boolean): If no face is detected in an image, raise an exception.
- Set to False to avoid the exception for low-resolution images (default is True).
-
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
- (default is opencv).
-
- distance_metric (string): Metric for measuring similarity. Options: 'cosine',
- 'euclidean', 'euclidean_l2' (default is cosine).
-
- align (boolean): Perform alignment based on the eye positions (default is True).
-
- expand_percentage (int): expand detected facial area with a percentage (default is 0).
-
- silent (boolean): Suppress or allow some log messages for a quieter analysis process
- (default is False).
-
- anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
-
- Returns:
- results (List[Dict[str, Any]]): A list of dictionaries, where each dictionary represents
- the analysis results for a detected face. Each dictionary in the list contains the
- following keys:
-
- - 'region' (dict): Represents the rectangular region of the detected face in the image.
- - 'x': x-coordinate of the top-left corner of the face.
- - 'y': y-coordinate of the top-left corner of the face.
- - 'w': Width of the detected face region.
- - 'h': Height of the detected face region.
-
- - 'age' (float): Estimated age of the detected face.
-
- - 'face_confidence' (float): Confidence score for the detected face.
- Indicates the reliability of the face detection.
-
- - 'dominant_gender' (str): The dominant gender in the detected face.
- Either "Man" or "Woman".
-
- - 'gender' (dict): Confidence scores for each gender category.
- - 'Man': Confidence score for the male gender.
- - 'Woman': Confidence score for the female gender.
-
- - 'dominant_emotion' (str): The dominant emotion in the detected face.
- Possible values include "sad," "angry," "surprise," "fear," "happy,"
- "disgust," and "neutral"
-
- - 'emotion' (dict): Confidence scores for each emotion category.
- - 'sad': Confidence score for sadness.
- - 'angry': Confidence score for anger.
- - 'surprise': Confidence score for surprise.
- - 'fear': Confidence score for fear.
- - 'happy': Confidence score for happiness.
- - 'disgust': Confidence score for disgust.
- - 'neutral': Confidence score for neutrality.
-
- - 'dominant_race' (str): The dominant race in the detected face.
- Possible values include "indian," "asian," "latino hispanic,"
- "black," "middle eastern," and "white."
-
- - 'race' (dict): Confidence scores for each race category.
- - 'indian': Confidence score for Indian ethnicity.
- - 'asian': Confidence score for Asian ethnicity.
- - 'latino hispanic': Confidence score for Latino/Hispanic ethnicity.
- - 'black': Confidence score for Black ethnicity.
- - 'middle eastern': Confidence score for Middle Eastern ethnicity.
- - 'white': Confidence score for White ethnicity.
- """
- return demography.analyze(
- img_path=img_path,
- actions=actions,
- enforce_detection=enforce_detection,
- detector_backend=detector_backend,
- align=align,
- expand_percentage=expand_percentage,
- silent=silent,
- anti_spoofing=anti_spoofing,
- )
-
-
-def find(
- img_path: Union[str, np.ndarray],
- db_path: str,
- model_name: str = "VGG-Face",
- distance_metric: str = "cosine",
- enforce_detection: bool = True,
- detector_backend: str = "opencv",
- align: bool = True,
- expand_percentage: int = 0,
- threshold: Optional[float] = None,
- normalization: str = "base",
- silent: bool = False,
- refresh_database: bool = True,
- anti_spoofing: bool = False,
-) -> List[pd.DataFrame]:
- """
- Identify individuals in a database
- Args:
- img_path (str or np.ndarray): The exact path to the image, a numpy array in BGR format,
- or a base64 encoded image. If the source image contains multiple faces, the result will
- include information for each detected face.
-
- db_path (string): Path to the folder containing image files. All detected faces
- in the database will be considered in the decision-making process.
-
- model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
- OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
-
- distance_metric (string): Metric for measuring similarity. Options: 'cosine',
- 'euclidean', 'euclidean_l2' (default is cosine).
-
- enforce_detection (boolean): If no face is detected in an image, raise an exception.
- Set to False to avoid the exception for low-resolution images (default is True).
-
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
- (default is opencv).
-
- align (boolean): Perform alignment based on the eye positions (default is True).
-
- expand_percentage (int): expand detected facial area with a percentage (default is 0).
-
- threshold (float): Specify a threshold to determine whether a pair represents the same
- person or different individuals. This threshold is used for comparing distances.
- If left unset, default pre-tuned threshold values will be applied based on the specified
- model name and distance metric (default is None).
-
- normalization (string): Normalize the input image before feeding it to the model.
- Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace (default is base).
-
- silent (boolean): Suppress or allow some log messages for a quieter analysis process
- (default is False).
-
- refresh_database (boolean): Synchronizes the images representation (pkl) file with the
- directory/db files, if set to false, it will ignore any file changes inside the db_path
- (default is True).
-
- anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
-
- Returns:
- results (List[pd.DataFrame]): A list of pandas dataframes. Each dataframe corresponds
- to the identity information for an individual detected in the source image.
- The DataFrame columns include:
-
- - 'identity': Identity label of the detected individual.
-
- - 'target_x', 'target_y', 'target_w', 'target_h': Bounding box coordinates of the
- target face in the database.
-
- - 'source_x', 'source_y', 'source_w', 'source_h': Bounding box coordinates of the
- detected face in the source image.
-
- - 'threshold': threshold to determine a pair whether same person or different persons
-
- - 'distance': Similarity score between the faces based on the
- specified model and distance metric
- """
- return recognition.find(
- img_path=img_path,
- db_path=db_path,
- model_name=model_name,
- distance_metric=distance_metric,
- enforce_detection=enforce_detection,
- detector_backend=detector_backend,
- align=align,
- expand_percentage=expand_percentage,
- threshold=threshold,
- normalization=normalization,
- silent=silent,
- refresh_database=refresh_database,
- anti_spoofing=anti_spoofing,
- )
-
-
-def represent(
- img_path: Union[str, np.ndarray],
- model_name: str = "VGG-Face",
- enforce_detection: bool = True,
- detector_backend: str = "opencv",
- align: bool = True,
- expand_percentage: int = 0,
- normalization: str = "base",
- anti_spoofing: bool = False,
- max_faces: Optional[int] = None,
-) -> List[Dict[str, Any]]:
- """
- Represent facial images as multi-dimensional vector embeddings.
-
- Args:
- img_path (str or np.ndarray): The exact path to the image, a numpy array in BGR format,
- or a base64 encoded image. If the source image contains multiple faces, the result will
- include information for each detected face.
-
- model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
- OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet
- (default is VGG-Face.).
-
- enforce_detection (boolean): If no face is detected in an image, raise an exception.
- Default is True. Set to False to avoid the exception for low-resolution images
- (default is True).
-
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
- (default is opencv).
-
- align (boolean): Perform alignment based on the eye positions (default is True).
-
- expand_percentage (int): expand detected facial area with a percentage (default is 0).
-
- normalization (string): Normalize the input image before feeding it to the model.
- Default is base. Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace
- (default is base).
-
- anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
-
- max_faces (int): Set a limit on the number of faces to be processed (default is None).
-
- Returns:
- results (List[Dict[str, Any]]): A list of dictionaries, each containing the
- following fields:
-
- - embedding (List[float]): Multidimensional vector representing facial features.
- The number of dimensions varies based on the reference model
- (e.g., FaceNet returns 128 dimensions, VGG-Face returns 4096 dimensions).
-
- - facial_area (dict): Detected facial area by face detection in dictionary format.
- Contains 'x' and 'y' as the left-corner point, and 'w' and 'h'
- as the width and height. If `detector_backend` is set to 'skip', it represents
- the full image area and is nonsensical.
-
- - face_confidence (float): Confidence score of face detection. If `detector_backend` is set
- to 'skip', the confidence will be 0 and is nonsensical.
- """
- return representation.represent(
- img_path=img_path,
- model_name=model_name,
- enforce_detection=enforce_detection,
- detector_backend=detector_backend,
- align=align,
- expand_percentage=expand_percentage,
- normalization=normalization,
- anti_spoofing=anti_spoofing,
- max_faces=max_faces,
- )
-
-
-def stream(
- db_path: str = "",
- model_name: str = "VGG-Face",
- detector_backend: str = "opencv",
- distance_metric: str = "cosine",
- enable_face_analysis: bool = True,
- source: Any = 0,
- time_threshold: int = 5,
- frame_threshold: int = 5,
- anti_spoofing: bool = False,
-) -> None:
- """
- Run real time face recognition and facial attribute analysis
-
- Args:
- db_path (string): Path to the folder containing image files. All detected faces
- in the database will be considered in the decision-making process.
-
- model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
- OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
-
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
- (default is opencv).
-
- distance_metric (string): Metric for measuring similarity. Options: 'cosine',
- 'euclidean', 'euclidean_l2' (default is cosine).
-
- enable_face_analysis (bool): Flag to enable face analysis (default is True).
-
- source (Any): The source for the video stream (default is 0, which represents the
- default camera).
-
- time_threshold (int): The time threshold (in seconds) for face recognition (default is 5).
-
- frame_threshold (int): The frame threshold for face recognition (default is 5).
-
- anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
- Returns:
- None
- """
-
- time_threshold = max(time_threshold, 1)
- frame_threshold = max(frame_threshold, 1)
-
- streaming.analysis(
- db_path=db_path,
- model_name=model_name,
- detector_backend=detector_backend,
- distance_metric=distance_metric,
- enable_face_analysis=enable_face_analysis,
- source=source,
- time_threshold=time_threshold,
- frame_threshold=frame_threshold,
- anti_spoofing=anti_spoofing,
- )
-
-
-def extract_faces(
- img_path: Union[str, np.ndarray],
- detector_backend: str = "opencv",
- enforce_detection: bool = True,
- align: bool = True,
- expand_percentage: int = 0,
- grayscale: bool = False,
- color_face: str = "rgb",
- normalize_face: bool = True,
- anti_spoofing: bool = False,
-) -> List[Dict[str, Any]]:
- """
- Extract faces from a given image
-
- Args:
- img_path (str or np.ndarray): Path to the first image. Accepts exact image path
- as a string, numpy array (BGR), or base64 encoded images.
-
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
- (default is opencv).
-
- enforce_detection (boolean): If no face is detected in an image, raise an exception.
- Set to False to avoid the exception for low-resolution images (default is True).
-
- align (bool): Flag to enable face alignment (default is True).
-
- expand_percentage (int): expand detected facial area with a percentage (default is 0).
-
- grayscale (boolean): (Deprecated) Flag to convert the output face image to grayscale
- (default is False).
-
- color_face (string): Color to return face image output. Options: 'rgb', 'bgr' or 'gray'
- (default is 'rgb').
-
- normalize_face (boolean): Flag to enable normalization (divide by 255) of the output
- face image output face image normalization (default is True).
-
- anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
-
- Returns:
- results (List[Dict[str, Any]]): A list of dictionaries, where each dictionary contains:
-
- - "face" (np.ndarray): The detected face as a NumPy array.
-
- - "facial_area" (Dict[str, Any]): The detected face's regions as a dictionary containing:
- - keys 'x', 'y', 'w', 'h' with int values
- - keys 'left_eye', 'right_eye' with a tuple of 2 ints as values. left and right eyes
- are eyes on the left and right respectively with respect to the person itself
- instead of observer.
-
- - "confidence" (float): The confidence score associated with the detected face.
-
- - "is_real" (boolean): antispoofing analyze result. this key is just available in the
- result only if anti_spoofing is set to True in input arguments.
-
- - "antispoof_score" (float): score of antispoofing analyze result. this key is
- just available in the result only if anti_spoofing is set to True in input arguments.
- """
-
- return detection.extract_faces(
- img_path=img_path,
- detector_backend=detector_backend,
- enforce_detection=enforce_detection,
- align=align,
- expand_percentage=expand_percentage,
- grayscale=grayscale,
- color_face=color_face,
- normalize_face=normalize_face,
- anti_spoofing=anti_spoofing,
- )
-
-
-def cli() -> None:
- """
- command line interface function will be offered in this block
- """
- import fire
-
- fire.Fire()
-
-
-# deprecated function(s)
-
-
-def detectFace(
- img_path: Union[str, np.ndarray],
- target_size: tuple = (224, 224),
- detector_backend: str = "opencv",
- enforce_detection: bool = True,
- align: bool = True,
-) -> Union[np.ndarray, None]:
- """
- Deprecated face detection function. Use extract_faces for same functionality.
-
- Args:
- img_path (str or np.ndarray): Path to the first image. Accepts exact image path
- as a string, numpy array (BGR), or base64 encoded images.
-
- target_size (tuple): final shape of facial image. black pixels will be
- added to resize the image (default is (224, 224)).
-
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
- (default is opencv).
-
- enforce_detection (boolean): If no face is detected in an image, raise an exception.
- Set to False to avoid the exception for low-resolution images (default is True).
-
- align (bool): Flag to enable face alignment (default is True).
-
- Returns:
- img (np.ndarray): detected (and aligned) facial area image as numpy array
- """
- logger.warn("Function detectFace is deprecated. Use extract_faces instead.")
- face_objs = extract_faces(
- img_path=img_path,
- detector_backend=detector_backend,
- grayscale=False,
- enforce_detection=enforce_detection,
- align=align,
- )
- extracted_face = None
- if len(face_objs) > 0:
- extracted_face = face_objs[0]["face"]
- extracted_face = preprocessing.resize_image(img=extracted_face, target_size=target_size)
- return extracted_face
diff --git a/deepface/__init__.py b/deepface/__init__.py
deleted file mode 100644
index cffae17..0000000
--- a/deepface/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.0.94"
diff --git a/deepface/api/.DS_Store b/deepface/api/.DS_Store
deleted file mode 100644
index eaff783..0000000
Binary files a/deepface/api/.DS_Store and /dev/null differ
diff --git a/deepface/api/__init__.py b/deepface/api/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/deepface/api/postman/deepface-api.postman_collection.json b/deepface/api/postman/deepface-api.postman_collection.json
deleted file mode 100644
index d36f8ca..0000000
--- a/deepface/api/postman/deepface-api.postman_collection.json
+++ /dev/null
@@ -1,133 +0,0 @@
-{
- "info": {
- "_postman_id": "4c0b144e-4294-4bdd-8072-bcb326b1fed2",
- "name": "deepface-api",
- "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json"
- },
- "item": [
- {
- "name": "Represent",
- "request": {
- "method": "POST",
- "header": [],
- "body": {
- "mode": "raw",
- "raw": "{\n \"model_name\": \"Facenet\",\n \"img\": \"/Users/sefik/Desktop/deepface/tests/dataset/img1.jpg\"\n}",
- "options": {
- "raw": {
- "language": "json"
- }
- }
- },
- "url": {
- "raw": "http://127.0.0.1:5000/represent",
- "protocol": "http",
- "host": [
- "127",
- "0",
- "0",
- "1"
- ],
- "port": "5000",
- "path": [
- "represent"
- ]
- }
- },
- "response": []
- },
- {
- "name": "Face verification",
- "request": {
- "method": "POST",
- "header": [],
- "body": {
- "mode": "raw",
- "raw": " {\n \t\"img1_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/img1.jpg\",\n \"img2_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/img2.jpg\",\n \"model_name\": \"Facenet\",\n \"detector_backend\": \"mtcnn\",\n \"distance_metric\": \"euclidean\"\n }",
- "options": {
- "raw": {
- "language": "json"
- }
- }
- },
- "url": {
- "raw": "http://127.0.0.1:5000/verify",
- "protocol": "http",
- "host": [
- "127",
- "0",
- "0",
- "1"
- ],
- "port": "5000",
- "path": [
- "verify"
- ]
- }
- },
- "response": []
- },
- {
- "name": "Face analysis",
- "request": {
- "method": "POST",
- "header": [],
- "body": {
- "mode": "raw",
- "raw": "{\n \"img_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/couple.jpg\",\n \"actions\": [\"age\", \"gender\", \"emotion\", \"race\"]\n}",
- "options": {
- "raw": {
- "language": "json"
- }
- }
- },
- "url": {
- "raw": "http://127.0.0.1:5000/analyze",
- "protocol": "http",
- "host": [
- "127",
- "0",
- "0",
- "1"
- ],
- "port": "5000",
- "path": [
- "analyze"
- ]
- }
- },
- "response": []
- },
- {
- "name": "Face extractor",
- "request": {
- "method": "POST",
- "header": [],
- "body": {
- "mode": "raw",
- "raw": "{\n \"img_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/couple.jpg\",\n \n}",
- "options": {
- "raw": {
- "language": "json"
- }
- }
- },
- "url": {
- "raw": "http://127.0.0.1:5005/extract_faces",
- "protocol": "http",
- "host": [
- "127",
- "0",
- "0",
- "1"
- ],
- "port": "5005",
- "path": [
- "extract_faces"
- ]
- }
- },
- "response": []
- }
- ]
-}
\ No newline at end of file
diff --git a/deepface/api/src/__init__.py b/deepface/api/src/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/deepface/api/src/__pycache__/api.cpython-39.pyc b/deepface/api/src/__pycache__/api.cpython-39.pyc
deleted file mode 100644
index ad4e60e..0000000
Binary files a/deepface/api/src/__pycache__/api.cpython-39.pyc and /dev/null differ
diff --git a/deepface/api/src/__pycache__/app.cpython-39.pyc b/deepface/api/src/__pycache__/app.cpython-39.pyc
deleted file mode 100644
index b27876b..0000000
Binary files a/deepface/api/src/__pycache__/app.cpython-39.pyc and /dev/null differ
diff --git a/deepface/api/src/api.py b/deepface/api/src/api.py
deleted file mode 100644
index 8573e32..0000000
--- a/deepface/api/src/api.py
+++ /dev/null
@@ -1,10 +0,0 @@
-import argparse
-import app
-
-if __name__ == "__main__":
- deepface_app = app.create_app()
- parser = argparse.ArgumentParser()
- parser.add_argument("-p", "--port", type=int,
- default=5000, help="Port of serving api")
- args = parser.parse_args()
- deepface_app.run(host="0.0.0.0", port=args.port)
diff --git a/deepface/api/src/app.py b/deepface/api/src/app.py
deleted file mode 100644
index d11f576..0000000
--- a/deepface/api/src/app.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# 3rd parth dependencies
-from flask import Flask
-from flask_cors import CORS
-
-# project dependencies
-from deepface import DeepFace
-from deepface.commons.logger import Logger
-from deepface.api.src.modules.core.routes import blueprint
-
-logger = Logger()
-
-
-def create_app():
- app = Flask(__name__)
- CORS(app)
- app.register_blueprint(blueprint)
- logger.info(f"Welcome to DeepFace API v{DeepFace.__version__}!")
- return app
diff --git a/deepface/api/src/modules/__init__.py b/deepface/api/src/modules/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/deepface/api/src/modules/core/__init__.py b/deepface/api/src/modules/core/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/deepface/api/src/modules/core/routes.py b/deepface/api/src/modules/core/routes.py
deleted file mode 100644
index 5f2f5ff..0000000
--- a/deepface/api/src/modules/core/routes.py
+++ /dev/null
@@ -1,122 +0,0 @@
-from flask import Blueprint, request
-from deepface import DeepFace
-from deepface.api.src.modules.core import service
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-blueprint = Blueprint("routes", __name__)
-
-
-@blueprint.route("/")
-def home():
- return f"Welcome to DeepFace API v{DeepFace.__version__}!
"
-
-
-@blueprint.route("/represent", methods=["POST"])
-def represent():
- input_args = request.get_json()
-
- if input_args is None:
- return {"message": "empty input set passed"}
-
- img_path = input_args.get("img") or input_args.get("img_path")
- if img_path is None:
- return {"message": "you must pass img_path input,hhhhh"}
-
- obj = service.represent(
- img_path=img_path,
- model_name=input_args.get("model_name", "VGG-Face"),
- detector_backend=input_args.get("detector_backend", "opencv"),
- enforce_detection=input_args.get("enforce_detection", True),
- align=input_args.get("align", True),
- anti_spoofing=input_args.get("anti_spoofing", False),
- max_faces=input_args.get("max_faces"),
- )
-
- logger.debug(obj)
-
- return obj
-
-
-@blueprint.route("/verify", methods=["POST"])
-def verify():
- input_args = request.get_json()
-
- if input_args is None:
- return {"message": "empty input set passed"}
-
- img1_path = input_args.get("img1") or input_args.get("img1_path")
- img2_path = input_args.get("img2") or input_args.get("img2_path")
-
- if img1_path is None:
- return {"message": "you must pass img1_path input"}
-
- if img2_path is None:
- return {"message": "you must pass img2_path input"}
-
- verification = service.verify(
- img1_path=img1_path,
- img2_path=img2_path,
- model_name=input_args.get("model_name", "VGG-Face"),
- detector_backend=input_args.get("detector_backend", "opencv"),
- distance_metric=input_args.get("distance_metric", "cosine"),
- align=input_args.get("align", True),
- enforce_detection=input_args.get("enforce_detection", True),
- anti_spoofing=input_args.get("anti_spoofing", False),
- )
-
- logger.debug(verification)
-
- return verification
-
-
-@blueprint.route("/analyze", methods=["POST"])
-def analyze():
- input_args = request.get_json()
-
- if input_args is None:
- return {"message": "empty input set passed"}
-
- img_path = input_args.get("img") or input_args.get("img_path")
- if img_path is None:
- return {"message": "you must pass img_path input"}
-
- demographies = service.analyze(
- img_path=img_path,
- actions=input_args.get(
- "actions", ["age", "gender", "emotion", "race"]),
- detector_backend=input_args.get("detector_backend", "opencv"),
- enforce_detection=input_args.get("enforce_detection", True),
- align=input_args.get("align", True),
- anti_spoofing=input_args.get("anti_spoofing", False),
- )
-
- logger.debug(demographies)
-
- return demographies
-
-
-@blueprint.route("/extract", methods=["POST"])
-def extract():
- input_args = request.get_json()
-
- if input_args is None:
- return {"message": "empty input set passed"}
-
- img_path = input_args.get("img") or input_args.get("img_path")
- if img_path is None:
- return {"message": "you must pass img_path input"}
- print('represent:', img_path)
-
- demographies = service.extract(
- img_path=img_path,
- detector_backend=input_args.get("detector_backend", "yolov8"),
- enforce_detection=input_args.get("enforce_detection", False),
- align=input_args.get("align", True),
- anti_spoofing=input_args.get("anti_spoofing", False),
- )
-
- logger.debug(demographies)
-
- return demographies
diff --git a/deepface/api/src/modules/core/service.py b/deepface/api/src/modules/core/service.py
deleted file mode 100644
index 9e087e9..0000000
--- a/deepface/api/src/modules/core/service.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# built-in dependencies
-import traceback
-from typing import Optional
-
-# project dependencies
-from deepface import DeepFace
-
-# pylint: disable=broad-except
-
-
-def represent(
- img_path: str,
- model_name: str,
- detector_backend: str,
- enforce_detection: bool,
- align: bool,
- anti_spoofing: bool,
- max_faces: Optional[int] = None,
-):
- try:
- result = {}
- embedding_objs = DeepFace.represent(
- img_path=img_path,
- model_name=model_name,
- detector_backend=detector_backend,
- enforce_detection=enforce_detection,
- align=align,
- anti_spoofing=anti_spoofing,
- max_faces=max_faces,
- )
- result["results"] = embedding_objs
- return result
- except Exception as err:
- tb_str = traceback.format_exc()
- return {"error": f"Exception while representing: {str(err)} - {tb_str}"}, 400
-
-
-def verify(
- img1_path: str,
- img2_path: str,
- model_name: str,
- detector_backend: str,
- distance_metric: str,
- enforce_detection: bool,
- align: bool,
- anti_spoofing: bool,
-):
- try:
- obj = DeepFace.verify(
- img1_path=img1_path,
- img2_path=img2_path,
- model_name=model_name,
- detector_backend=detector_backend,
- distance_metric=distance_metric,
- align=align,
- enforce_detection=enforce_detection,
- anti_spoofing=anti_spoofing,
- )
- return obj
- except Exception as err:
- tb_str = traceback.format_exc()
- return {"error": f"Exception while verifying: {str(err)} - {tb_str}"}, 400
-
-
-def analyze(
- img_path: str,
- actions: list,
- detector_backend: str,
- enforce_detection: bool,
- align: bool,
- anti_spoofing: bool,
-):
- try:
- result = {}
- demographies = DeepFace.analyze(
- img_path=img_path,
- actions=actions,
- detector_backend=detector_backend,
- enforce_detection=enforce_detection,
- align=align,
- silent=True,
- anti_spoofing=anti_spoofing,
- )
- result["results"] = demographies
- return result
- except Exception as err:
- tb_str = traceback.format_exc()
- return {"error": f"Exception while analyzing: {str(err)} - {tb_str}"}, 400
-
-
-def extract(
- img_path: str,
- detector_backend: str,
- enforce_detection: bool,
- align: bool,
- anti_spoofing: bool,
-):
- try:
- result = {}
- demographies = DeepFace.extract_faces(
- img_path=img_path,
- detector_backend=detector_backend,
- enforce_detection=enforce_detection,
- align=align,
- expand_percentage=0,
- grayscale=False,
- normalize_face=True,
- anti_spoofing=anti_spoofing
- )
- result["results"] = demographies
- return result
- except Exception as err:
- tb_str = traceback.format_exc()
- return {"error": f"Exception while detecting: {str(err)} - {tb_str}"}, 400
diff --git a/deepface/commons/__init__.py b/deepface/commons/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/deepface/commons/constant.py b/deepface/commons/constant.py
deleted file mode 100644
index 22f6349..0000000
--- a/deepface/commons/constant.py
+++ /dev/null
@@ -1,4 +0,0 @@
-import os
-
-SRC_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-ROOT_DIR = os.path.dirname(SRC_DIR)
diff --git a/deepface/commons/folder_utils.py b/deepface/commons/folder_utils.py
deleted file mode 100644
index 416eaba..0000000
--- a/deepface/commons/folder_utils.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import os
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-
-def initialize_folder() -> None:
- """
- Initialize the folder for storing model weights.
-
- Raises:
- OSError: if the folder cannot be created.
- """
- home = get_deepface_home()
- deepface_home_path = os.path.join(home, ".deepface")
- weights_path = os.path.join(deepface_home_path, "weights")
-
- if not os.path.exists(deepface_home_path):
- os.makedirs(deepface_home_path, exist_ok=True)
- logger.info(f"Directory {deepface_home_path} has been created")
-
- if not os.path.exists(weights_path):
- os.makedirs(weights_path, exist_ok=True)
- logger.info(f"Directory {weights_path} has been created")
-
-
-def get_deepface_home() -> str:
- """
- Get the home directory for storing model weights
-
- Returns:
- str: the home directory.
- """
- return str(os.getenv("DEEPFACE_HOME", default=os.path.expanduser("~")))
diff --git a/deepface/commons/image_utils.py b/deepface/commons/image_utils.py
deleted file mode 100644
index 8d9e058..0000000
--- a/deepface/commons/image_utils.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# built-in dependencies
-import os
-import io
-from typing import List, Union, Tuple
-import hashlib
-import base64
-from pathlib import Path
-
-# 3rd party dependencies
-import requests
-import numpy as np
-import cv2
-from PIL import Image
-
-
-def list_images(path: str) -> List[str]:
- """
- List images in a given path
- Args:
- path (str): path's location
- Returns:
- images (list): list of exact image paths
- """
- images = []
- for r, _, f in os.walk(path):
- for file in f:
- exact_path = os.path.join(r, file)
-
- ext_lower = os.path.splitext(exact_path)[-1].lower()
-
- if ext_lower not in {".jpg", ".jpeg", ".png"}:
- continue
-
- with Image.open(exact_path) as img: # lazy
- if img.format.lower() in {"jpeg", "png"}:
- images.append(exact_path)
- return images
-
-
-def find_image_hash(file_path: str) -> str:
- """
- Find the hash of given image file with its properties
- finding the hash of image content is costly operation
- Args:
- file_path (str): exact image path
- Returns:
- hash (str): digest with sha1 algorithm
- """
- file_stats = os.stat(file_path)
-
- # some properties
- file_size = file_stats.st_size
- creation_time = file_stats.st_ctime
- modification_time = file_stats.st_mtime
-
- properties = f"{file_size}-{creation_time}-{modification_time}"
-
- hasher = hashlib.sha1()
- hasher.update(properties.encode("utf-8"))
- return hasher.hexdigest()
-
-
-def load_image(img: Union[str, np.ndarray]) -> Tuple[np.ndarray, str]:
- """
- Load image from path, url, base64 or numpy array.
- Args:
- img: a path, url, base64 or numpy array.
- Returns:
- image (numpy array): the loaded image in BGR format
- image name (str): image name itself
- """
-
- # The image is already a numpy array
- if isinstance(img, np.ndarray):
- return img, "numpy array"
-
- if isinstance(img, Path):
- img = str(img)
-
- if not isinstance(img, str):
- raise ValueError(f"img must be numpy array or str but it is {type(img)}")
-
- # The image is a base64 string
- if img.startswith("data:image/"):
- return load_image_from_base64(img), "base64 encoded string"
-
- # The image is a url
- if img.lower().startswith(("http://", "https://")):
- return load_image_from_web(url=img), img
-
- # The image is a path
- if not os.path.isfile(img):
- raise ValueError(f"Confirm that {img} exists")
-
- # image must be a file on the system then
-
- # image name must have english characters
- if not img.isascii():
- raise ValueError(f"Input image must not have non-english characters - {img}")
-
- img_obj_bgr = cv2.imread(img)
- # img_obj_rgb = cv2.cvtColor(img_obj_bgr, cv2.COLOR_BGR2RGB)
- return img_obj_bgr, img
-
-
-def load_image_from_base64(uri: str) -> np.ndarray:
- """
- Load image from base64 string.
- Args:
- uri: a base64 string.
- Returns:
- numpy array: the loaded image.
- """
-
- encoded_data_parts = uri.split(",")
-
- if len(encoded_data_parts) < 2:
- raise ValueError("format error in base64 encoded string")
-
- encoded_data = encoded_data_parts[1]
- decoded_bytes = base64.b64decode(encoded_data)
-
- # similar to find functionality, we are just considering these extensions
- # content type is safer option than file extension
- with Image.open(io.BytesIO(decoded_bytes)) as img:
- file_type = img.format.lower()
- if file_type not in {"jpeg", "png"}:
- raise ValueError(f"Input image can be jpg or png, but it is {file_type}")
-
- nparr = np.fromstring(decoded_bytes, np.uint8)
- img_bgr = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
- # img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
- return img_bgr
-
-
-def load_image_from_web(url: str) -> np.ndarray:
- """
- Loading an image from web
- Args:
- url: link for the image
- Returns:
- img (np.ndarray): equivalent to pre-loaded image from opencv (BGR format)
- """
- response = requests.get(url, stream=True, timeout=60)
- response.raise_for_status()
- image_array = np.asarray(bytearray(response.raw.read()), dtype=np.uint8)
- img = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
- return img
diff --git a/deepface/commons/logger.py b/deepface/commons/logger.py
deleted file mode 100644
index f494eb8..0000000
--- a/deepface/commons/logger.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import os
-import logging
-from datetime import datetime
-
-# pylint: disable=broad-except
-class Logger:
- """
- A Logger class for logging messages with a specific log level.
-
- The class follows the singleton design pattern, ensuring that only one
- instance of the Logger is created. The parameters of the first instance
- are preserved across all instances.
- """
-
- __instance = None
-
- def __new__(cls):
- if cls.__instance is None:
- cls.__instance = super(Logger, cls).__new__(cls)
- return cls.__instance
-
- def __init__(self):
- if not hasattr(self, "_singleton_initialized"):
- self._singleton_initialized = True # to prevent multiple initializations
- log_level = os.environ.get("DEEPFACE_LOG_LEVEL", str(logging.INFO))
- try:
- self.log_level = int(log_level)
- except Exception as err:
- self.dump_log(
- f"Exception while parsing $DEEPFACE_LOG_LEVEL."
- f"Expected int but it is {log_level} ({str(err)})."
- "Setting app log level to info."
- )
- self.log_level = logging.INFO
-
- def info(self, message):
- if self.log_level <= logging.INFO:
- self.dump_log(f"{message}")
-
- def debug(self, message):
- if self.log_level <= logging.DEBUG:
- self.dump_log(f"🕷️ {message}")
-
- def warn(self, message):
- if self.log_level <= logging.WARNING:
- self.dump_log(f"⚠️ {message}")
-
- def error(self, message):
- if self.log_level <= logging.ERROR:
- self.dump_log(f"🔴 {message}")
-
- def critical(self, message):
- if self.log_level <= logging.CRITICAL:
- self.dump_log(f"💥 {message}")
-
- def dump_log(self, message):
- print(f"{str(datetime.now())[2:-7]} - {message}")
diff --git a/deepface/commons/package_utils.py b/deepface/commons/package_utils.py
deleted file mode 100644
index 8513f67..0000000
--- a/deepface/commons/package_utils.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# built-in dependencies
-import hashlib
-
-# 3rd party dependencies
-import tensorflow as tf
-
-# package dependencies
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-
-def get_tf_major_version() -> int:
- """
- Find tensorflow's major version
- Returns
- major_version (int)
- """
- return int(tf.__version__.split(".", maxsplit=1)[0])
-
-
-def get_tf_minor_version() -> int:
- """
- Find tensorflow's minor version
- Returns
- minor_version (int)
- """
- return int(tf.__version__.split(".", maxsplit=-1)[1])
-
-
-def validate_for_keras3():
- tf_major = get_tf_major_version()
- tf_minor = get_tf_minor_version()
-
- # tf_keras is a must dependency after tf 2.16
- if tf_major == 1 or (tf_major == 2 and tf_minor < 16):
- return
-
- try:
- import tf_keras
-
- logger.debug(f"tf_keras is already available - {tf_keras.__version__}")
- except ImportError as err:
- # you may consider to install that package here
- raise ValueError(
- f"You have tensorflow {tf.__version__} and this requires "
- "tf-keras package. Please run `pip install tf-keras` "
- "or downgrade your tensorflow."
- ) from err
-
-
-def find_file_hash(file_path: str, hash_algorithm: str = "sha256") -> str:
- """
- Find the hash of a given file with its content
- Args:
- file_path (str): exact path of a given file
- hash_algorithm (str): hash algorithm
- Returns:
- hash (str)
- """
- hash_func = hashlib.new(hash_algorithm)
- with open(file_path, "rb") as f:
- while chunk := f.read(8192):
- hash_func.update(chunk)
- return hash_func.hexdigest()
diff --git a/deepface/commons/weight_utils.py b/deepface/commons/weight_utils.py
deleted file mode 100644
index f1ccf2c..0000000
--- a/deepface/commons/weight_utils.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# built-in dependencies
-import os
-from typing import Optional
-import zipfile
-import bz2
-
-# 3rd party dependencies
-import gdown
-
-# project dependencies
-from deepface.commons import folder_utils, package_utils
-from deepface.commons.logger import Logger
-
-tf_version = package_utils.get_tf_major_version()
-if tf_version == 1:
- from keras.models import Sequential
-else:
- from tensorflow.keras.models import Sequential
-
-logger = Logger()
-
-ALLOWED_COMPRESS_TYPES = ["zip", "bz2"]
-
-
-def download_weights_if_necessary(
- file_name: str, source_url: str, compress_type: Optional[str] = None
-) -> str:
- """
- Download the weights of a pre-trained model from external source if not downloaded yet.
- Args:
- file_name (str): target file name with extension
- source_url (url): source url to be downloaded
- compress_type (optional str): compress type e.g. zip or bz2
- Returns
- target_file (str): exact path for the target file
- """
- home = folder_utils.get_deepface_home()
-
- target_file = os.path.join(home, ".deepface/weights", file_name)
-
- if os.path.isfile(target_file):
- logger.debug(f"{file_name} is already available at {target_file}")
- return target_file
-
- if compress_type is not None and compress_type not in ALLOWED_COMPRESS_TYPES:
- raise ValueError(f"unimplemented compress type - {compress_type}")
-
- try:
- logger.info(f"🔗 {file_name} will be downloaded from {source_url} to {target_file}...")
-
- if compress_type is None:
- gdown.download(source_url, target_file, quiet=False)
- elif compress_type is not None and compress_type in ALLOWED_COMPRESS_TYPES:
- gdown.download(source_url, f"{target_file}.{compress_type}", quiet=False)
-
- except Exception as err:
- raise ValueError(
- f"⛓️💥 An exception occurred while downloading {file_name} from {source_url}. "
- f"Consider downloading it manually to {target_file}."
- ) from err
-
- # uncompress downloaded file
- if compress_type == "zip":
- with zipfile.ZipFile(f"{target_file}.zip", "r") as zip_ref:
- zip_ref.extractall(os.path.join(home, ".deepface/weights"))
- logger.info(f"{target_file}.zip unzipped")
- elif compress_type == "bz2":
- bz2file = bz2.BZ2File(f"{target_file}.bz2")
- data = bz2file.read()
- with open(target_file, "wb") as f:
- f.write(data)
- logger.info(f"{target_file}.bz2 unzipped")
-
- return target_file
-
-
-def load_model_weights(model: Sequential, weight_file: str) -> Sequential:
- """
- Load pre-trained weights for a given model
- Args:
- model (keras.models.Sequential): pre-built model
- weight_file (str): exact path of pre-trained weights
- Returns:
- model (keras.models.Sequential): pre-built model with
- updated weights
- """
- try:
- model.load_weights(weight_file)
- except Exception as err:
- raise ValueError(
- f"An exception occurred while loading the pre-trained weights from {weight_file}."
- "This might have happened due to an interruption during the download."
- "You may want to delete it and allow DeepFace to download it again during the next run."
- "If the issue persists, consider downloading the file directly from the source "
- "and copying it to the target folder."
- ) from err
- return model
diff --git a/deepface/models/Demography.py b/deepface/models/Demography.py
deleted file mode 100644
index ad93920..0000000
--- a/deepface/models/Demography.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from typing import Union
-from abc import ABC, abstractmethod
-import numpy as np
-from deepface.commons import package_utils
-
-tf_version = package_utils.get_tf_major_version()
-if tf_version == 1:
- from keras.models import Model
-else:
- from tensorflow.keras.models import Model
-
-# Notice that all facial attribute analysis models must be inherited from this class
-
-
-# pylint: disable=too-few-public-methods
-class Demography(ABC):
- model: Model
- model_name: str
-
- @abstractmethod
- def predict(self, img: np.ndarray) -> Union[np.ndarray, np.float64]:
- pass
diff --git a/deepface/models/Detector.py b/deepface/models/Detector.py
deleted file mode 100644
index be1130f..0000000
--- a/deepface/models/Detector.py
+++ /dev/null
@@ -1,69 +0,0 @@
-from typing import List, Tuple, Optional
-from abc import ABC, abstractmethod
-from dataclasses import dataclass
-import numpy as np
-
-# Notice that all facial detector models must be inherited from this class
-
-
-# pylint: disable=unnecessary-pass, too-few-public-methods
-class Detector(ABC):
- @abstractmethod
- def detect_faces(self, img: np.ndarray) -> List["FacialAreaRegion"]:
- """
- Interface for detect and align face
-
- Args:
- img (np.ndarray): pre-loaded image as numpy array
-
- Returns:
- results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
- where each object contains:
-
- - facial_area (FacialAreaRegion): The facial area region represented
- as x, y, w, h, left_eye and right_eye. left eye and right eye are
- eyes on the left and right respectively with respect to the person
- instead of observer.
- """
- pass
-
-
-@dataclass
-class FacialAreaRegion:
- """
- Initialize a Face object.
-
- Args:
- x (int): The x-coordinate of the top-left corner of the bounding box.
- y (int): The y-coordinate of the top-left corner of the bounding box.
- w (int): The width of the bounding box.
- h (int): The height of the bounding box.
- left_eye (tuple): The coordinates (x, y) of the left eye with respect to
- the person instead of observer. Default is None.
- right_eye (tuple): The coordinates (x, y) of the right eye with respect to
- the person instead of observer. Default is None.
- confidence (float, optional): Confidence score associated with the face detection.
- Default is None.
- """
- x: int
- y: int
- w: int
- h: int
- left_eye: Optional[Tuple[int, int]] = None
- right_eye: Optional[Tuple[int, int]] = None
- confidence: Optional[float] = None
-
-
-@dataclass
-class DetectedFace:
- """
- Initialize detected face object.
-
- Args:
- img (np.ndarray): detected face image as numpy array
- facial_area (FacialAreaRegion): detected face's metadata (e.g. bounding box)
- confidence (float): confidence score for face detection
- """
- img: np.ndarray
- facial_area: FacialAreaRegion
- confidence: float
diff --git a/deepface/models/FacialRecognition.py b/deepface/models/FacialRecognition.py
deleted file mode 100644
index a6ee7b5..0000000
--- a/deepface/models/FacialRecognition.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from abc import ABC
-from typing import Any, Union, List, Tuple
-import numpy as np
-from deepface.commons import package_utils
-
-tf_version = package_utils.get_tf_major_version()
-if tf_version == 2:
- from tensorflow.keras.models import Model
-else:
- from keras.models import Model
-
-# Notice that all facial recognition models must be inherited from this class
-
-# pylint: disable=too-few-public-methods
-class FacialRecognition(ABC):
- model: Union[Model, Any]
- model_name: str
- input_shape: Tuple[int, int]
- output_shape: int
-
- def forward(self, img: np.ndarray) -> List[float]:
- if not isinstance(self.model, Model):
- raise ValueError(
- "You must overwrite forward method if it is not a keras model,"
- f"but {self.model_name} not overwritten!"
- )
- # model.predict causes memory issue when it is called in a for loop
- # embedding = model.predict(img, verbose=0)[0].tolist()
- return self.model(img, training=False).numpy()[0].tolist()
diff --git a/deepface/models/__init__.py b/deepface/models/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/deepface/models/demography/Age.py b/deepface/models/demography/Age.py
deleted file mode 100644
index 29efdf5..0000000
--- a/deepface/models/demography/Age.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# 3rd party dependencies
-import numpy as np
-
-# project dependencies
-from deepface.models.facial_recognition import VGGFace
-from deepface.commons import package_utils, weight_utils
-from deepface.models.Demography import Demography
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# ----------------------------------------
-# dependency configurations
-
-tf_version = package_utils.get_tf_major_version()
-
-if tf_version == 1:
- from keras.models import Model, Sequential
- from keras.layers import Convolution2D, Flatten, Activation
-else:
- from tensorflow.keras.models import Model, Sequential
- from tensorflow.keras.layers import Convolution2D, Flatten, Activation
-
-# ----------------------------------------
-
-# pylint: disable=too-few-public-methods
-class ApparentAgeClient(Demography):
- """
- Age model class
- """
-
- def __init__(self):
- self.model = load_model()
- self.model_name = "Age"
-
- def predict(self, img: np.ndarray) -> np.float64:
- # model.predict causes memory issue when it is called in a for loop
- # age_predictions = self.model.predict(img, verbose=0)[0, :]
- age_predictions = self.model(img, training=False).numpy()[0, :]
- return find_apparent_age(age_predictions)
-
-
-def load_model(
- url="https://github.com/serengil/deepface_models/releases/download/v1.0/age_model_weights.h5",
-) -> Model:
- """
- Construct age model, download its weights and load
- Returns:
- model (Model)
- """
-
- model = VGGFace.base_model()
-
- # --------------------------
-
- classes = 101
- base_model_output = Sequential()
- base_model_output = Convolution2D(classes, (1, 1), name="predictions")(model.layers[-4].output)
- base_model_output = Flatten()(base_model_output)
- base_model_output = Activation("softmax")(base_model_output)
-
- # --------------------------
-
- age_model = Model(inputs=model.input, outputs=base_model_output)
-
- # --------------------------
-
- # load weights
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="age_model_weights.h5", source_url=url
- )
-
- age_model = weight_utils.load_model_weights(
- model=age_model, weight_file=weight_file
- )
-
- return age_model
-
-def find_apparent_age(age_predictions: np.ndarray) -> np.float64:
- """
- Find apparent age prediction from a given probas of ages
- Args:
- age_predictions (?)
- Returns:
- apparent_age (float)
- """
- output_indexes = np.arange(0, 101)
- apparent_age = np.sum(age_predictions * output_indexes)
- return apparent_age
diff --git a/deepface/models/demography/Emotion.py b/deepface/models/demography/Emotion.py
deleted file mode 100644
index 3d1d88f..0000000
--- a/deepface/models/demography/Emotion.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# 3rd party dependencies
-import numpy as np
-import cv2
-
-# project dependencies
-from deepface.commons import package_utils, weight_utils
-from deepface.models.Demography import Demography
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# -------------------------------------------
-# pylint: disable=line-too-long
-# -------------------------------------------
-# dependency configuration
-tf_version = package_utils.get_tf_major_version()
-
-if tf_version == 1:
- from keras.models import Sequential
- from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Dense, Dropout
-else:
- from tensorflow.keras.models import Sequential
- from tensorflow.keras.layers import (
- Conv2D,
- MaxPooling2D,
- AveragePooling2D,
- Flatten,
- Dense,
- Dropout,
- )
-# -------------------------------------------
-
-# Labels for the emotions that can be detected by the model.
-labels = ["angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"]
-
-# pylint: disable=too-few-public-methods
-class EmotionClient(Demography):
- """
- Emotion model class
- """
-
- def __init__(self):
- self.model = load_model()
- self.model_name = "Emotion"
-
- def predict(self, img: np.ndarray) -> np.ndarray:
- img_gray = cv2.cvtColor(img[0], cv2.COLOR_BGR2GRAY)
- img_gray = cv2.resize(img_gray, (48, 48))
- img_gray = np.expand_dims(img_gray, axis=0)
-
- # model.predict causes memory issue when it is called in a for loop
- # emotion_predictions = self.model.predict(img_gray, verbose=0)[0, :]
- emotion_predictions = self.model(img_gray, training=False).numpy()[0, :]
-
- return emotion_predictions
-
-
-def load_model(
- url="https://github.com/serengil/deepface_models/releases/download/v1.0/facial_expression_model_weights.h5",
-) -> Sequential:
- """
- Consruct emotion model, download and load weights
- """
-
- num_classes = 7
-
- model = Sequential()
-
- # 1st convolution layer
- model.add(Conv2D(64, (5, 5), activation="relu", input_shape=(48, 48, 1)))
- model.add(MaxPooling2D(pool_size=(5, 5), strides=(2, 2)))
-
- # 2nd convolution layer
- model.add(Conv2D(64, (3, 3), activation="relu"))
- model.add(Conv2D(64, (3, 3), activation="relu"))
- model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
-
- # 3rd convolution layer
- model.add(Conv2D(128, (3, 3), activation="relu"))
- model.add(Conv2D(128, (3, 3), activation="relu"))
- model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
-
- model.add(Flatten())
-
- # fully connected neural networks
- model.add(Dense(1024, activation="relu"))
- model.add(Dropout(0.2))
- model.add(Dense(1024, activation="relu"))
- model.add(Dropout(0.2))
-
- model.add(Dense(num_classes, activation="softmax"))
-
- # ----------------------------
-
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="facial_expression_model_weights.h5", source_url=url
- )
-
- model = weight_utils.load_model_weights(
- model=model, weight_file=weight_file
- )
-
- return model
diff --git a/deepface/models/demography/Gender.py b/deepface/models/demography/Gender.py
deleted file mode 100644
index 2f3a142..0000000
--- a/deepface/models/demography/Gender.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# 3rd party dependencies
-import numpy as np
-
-# project dependencies
-from deepface.models.facial_recognition import VGGFace
-from deepface.commons import package_utils, weight_utils
-from deepface.models.Demography import Demography
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# -------------------------------------
-# pylint: disable=line-too-long
-# -------------------------------------
-# dependency configurations
-
-tf_version = package_utils.get_tf_major_version()
-if tf_version == 1:
- from keras.models import Model, Sequential
- from keras.layers import Convolution2D, Flatten, Activation
-else:
- from tensorflow.keras.models import Model, Sequential
- from tensorflow.keras.layers import Convolution2D, Flatten, Activation
-# -------------------------------------
-
-# Labels for the genders that can be detected by the model.
-labels = ["Woman", "Man"]
-
-# pylint: disable=too-few-public-methods
-class GenderClient(Demography):
- """
- Gender model class
- """
-
- def __init__(self):
- self.model = load_model()
- self.model_name = "Gender"
-
- def predict(self, img: np.ndarray) -> np.ndarray:
- # model.predict causes memory issue when it is called in a for loop
- # return self.model.predict(img, verbose=0)[0, :]
- return self.model(img, training=False).numpy()[0, :]
-
-
-def load_model(
- url="https://github.com/serengil/deepface_models/releases/download/v1.0/gender_model_weights.h5",
-) -> Model:
- """
- Construct gender model, download its weights and load
- Returns:
- model (Model)
- """
-
- model = VGGFace.base_model()
-
- # --------------------------
-
- classes = 2
- base_model_output = Sequential()
- base_model_output = Convolution2D(classes, (1, 1), name="predictions")(model.layers[-4].output)
- base_model_output = Flatten()(base_model_output)
- base_model_output = Activation("softmax")(base_model_output)
-
- # --------------------------
-
- gender_model = Model(inputs=model.input, outputs=base_model_output)
-
- # --------------------------
-
- # load weights
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="gender_model_weights.h5", source_url=url
- )
-
- gender_model = weight_utils.load_model_weights(
- model=gender_model, weight_file=weight_file
- )
-
- return gender_model
diff --git a/deepface/models/demography/Race.py b/deepface/models/demography/Race.py
deleted file mode 100644
index a393667..0000000
--- a/deepface/models/demography/Race.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# 3rd party dependencies
-import numpy as np
-
-# project dependencies
-from deepface.models.facial_recognition import VGGFace
-from deepface.commons import package_utils, weight_utils
-from deepface.models.Demography import Demography
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# --------------------------
-# pylint: disable=line-too-long
-# --------------------------
-# dependency configurations
-tf_version = package_utils.get_tf_major_version()
-
-if tf_version == 1:
- from keras.models import Model, Sequential
- from keras.layers import Convolution2D, Flatten, Activation
-else:
- from tensorflow.keras.models import Model, Sequential
- from tensorflow.keras.layers import Convolution2D, Flatten, Activation
-# --------------------------
-# Labels for the ethnic phenotypes that can be detected by the model.
-labels = ["asian", "indian", "black", "white", "middle eastern", "latino hispanic"]
-
-# pylint: disable=too-few-public-methods
-class RaceClient(Demography):
- """
- Race model class
- """
-
- def __init__(self):
- self.model = load_model()
- self.model_name = "Race"
-
- def predict(self, img: np.ndarray) -> np.ndarray:
- # model.predict causes memory issue when it is called in a for loop
- # return self.model.predict(img, verbose=0)[0, :]
- return self.model(img, training=False).numpy()[0, :]
-
-
-def load_model(
- url="https://github.com/serengil/deepface_models/releases/download/v1.0/race_model_single_batch.h5",
-) -> Model:
- """
- Construct race model, download its weights and load
- """
-
- model = VGGFace.base_model()
-
- # --------------------------
-
- classes = 6
- base_model_output = Sequential()
- base_model_output = Convolution2D(classes, (1, 1), name="predictions")(model.layers[-4].output)
- base_model_output = Flatten()(base_model_output)
- base_model_output = Activation("softmax")(base_model_output)
-
- # --------------------------
-
- race_model = Model(inputs=model.input, outputs=base_model_output)
-
- # --------------------------
-
- # load weights
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="race_model_single_batch.h5", source_url=url
- )
-
- race_model = weight_utils.load_model_weights(
- model=race_model, weight_file=weight_file
- )
-
- return race_model
diff --git a/deepface/models/demography/__init__.py b/deepface/models/demography/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/deepface/models/face_detection/CenterFace.py b/deepface/models/face_detection/CenterFace.py
deleted file mode 100644
index d8e08bd..0000000
--- a/deepface/models/face_detection/CenterFace.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# built-in dependencies
-import os
-from typing import List
-
-# 3rd party dependencies
-import numpy as np
-import cv2
-
-# project dependencies
-from deepface.commons import weight_utils
-from deepface.models.Detector import Detector, FacialAreaRegion
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# pylint: disable=c-extension-no-member
-
-WEIGHTS_URL = "https://github.com/Star-Clouds/CenterFace/raw/master/models/onnx/centerface.onnx"
-
-
-class CenterFaceClient(Detector):
- def __init__(self):
- # BUG: model must be flushed for each call
- # self.model = self.build_model()
- pass
-
- def build_model(self):
- """
- Download pre-trained weights of CenterFace model if necessary and load built model
- """
- weights_path = weight_utils.download_weights_if_necessary(
- file_name="centerface.onnx", source_url=WEIGHTS_URL
- )
-
- return CenterFace(weight_path=weights_path)
-
- def detect_faces(self, img: np.ndarray) -> List["FacialAreaRegion"]:
- """
- Detect and align face with CenterFace
-
- Args:
- img (np.ndarray): pre-loaded image as numpy array
-
- Returns:
- results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
- """
- resp = []
-
- threshold = float(os.getenv("CENTERFACE_THRESHOLD", "0.80"))
-
- # BUG: model causes problematic results from 2nd call if it is not flushed
- # detections, landmarks = self.model.forward(
- # img, img.shape[0], img.shape[1], threshold=threshold
- # )
- detections, landmarks = self.build_model().forward(
- img, img.shape[0], img.shape[1], threshold=threshold
- )
-
- for i, detection in enumerate(detections):
- boxes, confidence = detection[:4], detection[4]
-
- x = boxes[0]
- y = boxes[1]
- w = boxes[2] - x
- h = boxes[3] - y
-
- landmark = landmarks[i]
-
- right_eye = (int(landmark[0]), int(landmark[1]))
- left_eye = (int(landmark[2]), int(landmark[3]))
- # nose = (int(landmark[4]), int(landmark [5]))
- # mouth_right = (int(landmark[6]), int(landmark [7]))
- # mouth_left = (int(landmark[8]), int(landmark [9]))
-
- facial_area = FacialAreaRegion(
- x=int(x),
- y=int(y),
- w=int(w),
- h=int(h),
- left_eye=left_eye,
- right_eye=right_eye,
- confidence=min(max(0, float(confidence)), 1.0),
- )
- resp.append(facial_area)
-
- return resp
-
-
-class CenterFace:
- """
- This class is heavily inspired from
- github.com/Star-Clouds/CenterFace/blob/master/prj-python/centerface.py
- """
-
- def __init__(self, weight_path: str):
- self.net = cv2.dnn.readNetFromONNX(weight_path)
- self.img_h_new, self.img_w_new, self.scale_h, self.scale_w = 0, 0, 0, 0
-
- def forward(self, img, height, width, threshold=0.5):
- self.img_h_new, self.img_w_new, self.scale_h, self.scale_w = self.transform(height, width)
- return self.inference_opencv(img, threshold)
-
- def inference_opencv(self, img, threshold):
- blob = cv2.dnn.blobFromImage(
- img,
- scalefactor=1.0,
- size=(self.img_w_new, self.img_h_new),
- mean=(0, 0, 0),
- swapRB=True,
- crop=False,
- )
- self.net.setInput(blob)
- heatmap, scale, offset, lms = self.net.forward(["537", "538", "539", "540"])
- return self.postprocess(heatmap, lms, offset, scale, threshold)
-
- def transform(self, h, w):
- img_h_new, img_w_new = int(np.ceil(h / 32) * 32), int(np.ceil(w / 32) * 32)
- scale_h, scale_w = img_h_new / h, img_w_new / w
- return img_h_new, img_w_new, scale_h, scale_w
-
- def postprocess(self, heatmap, lms, offset, scale, threshold):
- dets, lms = self.decode(
- heatmap, scale, offset, lms, (self.img_h_new, self.img_w_new), threshold=threshold
- )
- if len(dets) > 0:
- dets[:, 0:4:2], dets[:, 1:4:2] = (
- dets[:, 0:4:2] / self.scale_w,
- dets[:, 1:4:2] / self.scale_h,
- )
- lms[:, 0:10:2], lms[:, 1:10:2] = (
- lms[:, 0:10:2] / self.scale_w,
- lms[:, 1:10:2] / self.scale_h,
- )
- else:
- dets = np.empty(shape=[0, 5], dtype=np.float32)
- lms = np.empty(shape=[0, 10], dtype=np.float32)
- return dets, lms
-
- def decode(self, heatmap, scale, offset, landmark, size, threshold=0.1):
- heatmap = np.squeeze(heatmap)
- scale0, scale1 = scale[0, 0, :, :], scale[0, 1, :, :]
- offset0, offset1 = offset[0, 0, :, :], offset[0, 1, :, :]
- c0, c1 = np.where(heatmap > threshold)
- boxes, lms = [], []
- if len(c0) > 0:
- # pylint:disable=consider-using-enumerate
- for i in range(len(c0)):
- s0, s1 = np.exp(scale0[c0[i], c1[i]]) * 4, np.exp(scale1[c0[i], c1[i]]) * 4
- o0, o1 = offset0[c0[i], c1[i]], offset1[c0[i], c1[i]]
- s = heatmap[c0[i], c1[i]]
- x1, y1 = max(0, (c1[i] + o1 + 0.5) * 4 - s1 / 2), max(
- 0, (c0[i] + o0 + 0.5) * 4 - s0 / 2
- )
- x1, y1 = min(x1, size[1]), min(y1, size[0])
- boxes.append([x1, y1, min(x1 + s1, size[1]), min(y1 + s0, size[0]), s])
- lm = []
- for j in range(5):
- lm.append(landmark[0, j * 2 + 1, c0[i], c1[i]] * s1 + x1)
- lm.append(landmark[0, j * 2, c0[i], c1[i]] * s0 + y1)
- lms.append(lm)
- boxes = np.asarray(boxes, dtype=np.float32)
- keep = self.nms(boxes[:, :4], boxes[:, 4], 0.3)
- boxes = boxes[keep, :]
- lms = np.asarray(lms, dtype=np.float32)
- lms = lms[keep, :]
- return boxes, lms
-
- def nms(self, boxes, scores, nms_thresh):
- x1 = boxes[:, 0]
- y1 = boxes[:, 1]
- x2 = boxes[:, 2]
- y2 = boxes[:, 3]
- areas = (x2 - x1 + 1) * (y2 - y1 + 1)
- order = np.argsort(scores)[::-1]
- num_detections = boxes.shape[0]
- suppressed = np.zeros((num_detections,), dtype=bool)
-
- keep = []
- for _i in range(num_detections):
- i = order[_i]
- if suppressed[i]:
- continue
- keep.append(i)
-
- ix1 = x1[i]
- iy1 = y1[i]
- ix2 = x2[i]
- iy2 = y2[i]
- iarea = areas[i]
-
- for _j in range(_i + 1, num_detections):
- j = order[_j]
- if suppressed[j]:
- continue
-
- xx1 = max(ix1, x1[j])
- yy1 = max(iy1, y1[j])
- xx2 = min(ix2, x2[j])
- yy2 = min(iy2, y2[j])
- w = max(0, xx2 - xx1 + 1)
- h = max(0, yy2 - yy1 + 1)
-
- inter = w * h
- ovr = inter / (iarea + areas[j] - inter)
- if ovr >= nms_thresh:
- suppressed[j] = True
-
- return keep
diff --git a/deepface/models/face_detection/Dlib.py b/deepface/models/face_detection/Dlib.py
deleted file mode 100644
index c96f1a3..0000000
--- a/deepface/models/face_detection/Dlib.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# built-in dependencies
-from typing import List
-
-# 3rd party dependencies
-import numpy as np
-
-# project dependencies
-from deepface.commons import weight_utils
-from deepface.models.Detector import Detector, FacialAreaRegion
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-
-class DlibClient(Detector):
- def __init__(self):
- self.model = self.build_model()
-
- def build_model(self) -> dict:
- """
- Build a dlib hog face detector model
- Returns:
- model (Any)
- """
- # this is not a must dependency. do not import it in the global level.
- try:
- import dlib
- except ModuleNotFoundError as e:
- raise ImportError(
- "Dlib is an optional detector, ensure the library is installed. "
- "Please install using 'pip install dlib'"
- ) from e
-
- # check required file exists in the home/.deepface/weights folder
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="shape_predictor_5_face_landmarks.dat",
- source_url="http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2",
- compress_type="bz2",
- )
-
- face_detector = dlib.get_frontal_face_detector()
- sp = dlib.shape_predictor(weight_file)
-
- detector = {}
- detector["face_detector"] = face_detector
- detector["sp"] = sp
- return detector
-
- def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
- """
- Detect and align face with dlib
-
- Args:
- img (np.ndarray): pre-loaded image as numpy array
-
- Returns:
- results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
- """
- resp = []
-
- face_detector = self.model["face_detector"]
-
- # note that, by design, dlib's fhog face detector scores are >0 but not capped at 1
- detections, scores, _ = face_detector.run(img, 1)
-
- if len(detections) > 0:
-
- for idx, detection in enumerate(detections):
- left = detection.left()
- right = detection.right()
- top = detection.top()
- bottom = detection.bottom()
-
- y = int(max(0, top))
- h = int(min(bottom, img.shape[0]) - y)
- x = int(max(0, left))
- w = int(min(right, img.shape[1]) - x)
-
- shape = self.model["sp"](img, detection)
-
- right_eye = (
- int((shape.part(2).x + shape.part(3).x) // 2),
- int((shape.part(2).y + shape.part(3).y) // 2),
- )
- left_eye = (
- int((shape.part(0).x + shape.part(1).x) // 2),
- int((shape.part(0).y + shape.part(1).y) // 2),
- )
-
- # never saw confidence higher than +3.5 github.com/davisking/dlib/issues/761
- confidence = scores[idx]
-
- facial_area = FacialAreaRegion(
- x=x,
- y=y,
- w=w,
- h=h,
- left_eye=left_eye,
- right_eye=right_eye,
- confidence=min(max(0, confidence), 1.0),
- )
- resp.append(facial_area)
-
- return resp
diff --git a/deepface/models/face_detection/FastMtCnn.py b/deepface/models/face_detection/FastMtCnn.py
deleted file mode 100644
index bc792a4..0000000
--- a/deepface/models/face_detection/FastMtCnn.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# built-in dependencies
-from typing import Any, Union, List
-
-# 3rd party dependencies
-import cv2
-import numpy as np
-
-# project dependencies
-from deepface.models.Detector import Detector, FacialAreaRegion
-
-
-class FastMtCnnClient(Detector):
- """
- Fast MtCnn Detector from github.com/timesler/facenet-pytorch
- """
-
- def __init__(self):
- self.model = self.build_model()
-
- def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
- """
- Detect and align face with mtcnn
-
- Args:
- img (np.ndarray): pre-loaded image as numpy array
-
- Returns:
- results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
- """
- resp = []
-
- img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB but OpenCV read BGR
- detections = self.model.detect(
- img_rgb, landmarks=True
- ) # returns boundingbox, prob, landmark
- if (
- detections is not None
- and len(detections) > 0
- and not any(detection is None for detection in detections) # issue 1043
- ):
- for regions, confidence, eyes in zip(*detections):
- x, y, w, h = xyxy_to_xywh(regions)
- right_eye = eyes[0]
- left_eye = eyes[1]
-
- left_eye = tuple(int(i) for i in left_eye)
- right_eye = tuple(int(i) for i in right_eye)
-
- facial_area = FacialAreaRegion(
- x=x,
- y=y,
- w=w,
- h=h,
- left_eye=left_eye,
- right_eye=right_eye,
- confidence=confidence,
- )
- resp.append(facial_area)
-
- return resp
-
- def build_model(self) -> Any:
- """
- Build a fast mtcnn face detector model
- Returns:
- model (Any)
- """
- # this is not a must dependency. do not import it in the global level.
- try:
- from facenet_pytorch import MTCNN as fast_mtcnn
- import torch
- except ModuleNotFoundError as e:
- raise ImportError(
- "FastMtcnn is an optional detector, ensure the library is installed. "
- "Please install using 'pip install facenet-pytorch'"
- ) from e
-
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
- face_detector = fast_mtcnn(device=device)
-
- return face_detector
-
-
-def xyxy_to_xywh(regions: Union[list, tuple]) -> tuple:
- """
- Convert (x1, y1, x2, y2) format to (x, y, w, h) format.
- Args:
- regions (list or tuple): facial area coordinates as x, y, x+w, y+h
- Returns:
- regions (tuple): facial area coordinates as x, y, w, h
- """
- x, y, x_plus_w, y_plus_h = regions[0], regions[1], regions[2], regions[3]
- w = x_plus_w - x
- h = y_plus_h - y
- return (x, y, w, h)
diff --git a/deepface/models/face_detection/MediaPipe.py b/deepface/models/face_detection/MediaPipe.py
deleted file mode 100644
index 61a84fd..0000000
--- a/deepface/models/face_detection/MediaPipe.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# built-in dependencies
-from typing import Any, List
-
-# 3rd party dependencies
-import numpy as np
-
-# project dependencies
-from deepface.models.Detector import Detector, FacialAreaRegion
-
-
-class MediaPipeClient(Detector):
- """
- MediaPipe from google.github.io/mediapipe/solutions/face_detection
- """
-
- def __init__(self):
- self.model = self.build_model()
-
- def build_model(self) -> Any:
- """
- Build a mediapipe face detector model
- Returns:
- model (Any)
- """
- # this is not a must dependency. do not import it in the global level.
- try:
- import mediapipe as mp
- except ModuleNotFoundError as e:
- raise ImportError(
- "MediaPipe is an optional detector, ensure the library is installed. "
- "Please install using 'pip install mediapipe'"
- ) from e
-
- mp_face_detection = mp.solutions.face_detection
- face_detection = mp_face_detection.FaceDetection(min_detection_confidence=0.7)
- return face_detection
-
- def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
- """
- Detect and align face with mediapipe
-
- Args:
- img (np.ndarray): pre-loaded image as numpy array
-
- Returns:
- results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
- """
- resp = []
-
- img_width = img.shape[1]
- img_height = img.shape[0]
-
- results = self.model.process(img)
-
- # If no face has been detected, return an empty list
- if results.detections is None:
- return resp
-
- # Extract the bounding box, the landmarks and the confidence score
- for current_detection in results.detections:
- (confidence,) = current_detection.score
-
- bounding_box = current_detection.location_data.relative_bounding_box
- landmarks = current_detection.location_data.relative_keypoints
-
- x = int(bounding_box.xmin * img_width)
- w = int(bounding_box.width * img_width)
- y = int(bounding_box.ymin * img_height)
- h = int(bounding_box.height * img_height)
-
- right_eye = (int(landmarks[0].x * img_width), int(landmarks[0].y * img_height))
- left_eye = (int(landmarks[1].x * img_width), int(landmarks[1].y * img_height))
- # nose = (int(landmarks[2].x * img_width), int(landmarks[2].y * img_height))
- # mouth = (int(landmarks[3].x * img_width), int(landmarks[3].y * img_height))
- # right_ear = (int(landmarks[4].x * img_width), int(landmarks[4].y * img_height))
- # left_ear = (int(landmarks[5].x * img_width), int(landmarks[5].y * img_height))
-
- facial_area = FacialAreaRegion(
- x=x,
- y=y,
- w=w,
- h=h,
- left_eye=left_eye,
- right_eye=right_eye,
- confidence=float(confidence),
- )
- resp.append(facial_area)
-
- return resp
diff --git a/deepface/models/face_detection/MtCnn.py b/deepface/models/face_detection/MtCnn.py
deleted file mode 100644
index 014e4a5..0000000
--- a/deepface/models/face_detection/MtCnn.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# built-in dependencies
-from typing import List
-
-# 3rd party dependencies
-import numpy as np
-from mtcnn import MTCNN
-
-# project dependencies
-from deepface.models.Detector import Detector, FacialAreaRegion
-
-# pylint: disable=too-few-public-methods
-class MtCnnClient(Detector):
- """
- Class to cover common face detection functionalitiy for MtCnn backend
- """
-
- def __init__(self):
- self.model = MTCNN()
-
- def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
- """
- Detect and align face with mtcnn
-
- Args:
- img (np.ndarray): pre-loaded image as numpy array
-
- Returns:
- results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
- """
-
- resp = []
-
- # mtcnn expects RGB but OpenCV read BGR
- # img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- img_rgb = img[:, :, ::-1]
- detections = self.model.detect_faces(img_rgb)
-
- if detections is not None and len(detections) > 0:
-
- for current_detection in detections:
- x, y, w, h = current_detection["box"]
- confidence = current_detection["confidence"]
- # mtcnn detector assigns left eye with respect to the observer
- # but we are setting it with respect to the person itself
- left_eye = current_detection["keypoints"]["right_eye"]
- right_eye = current_detection["keypoints"]["left_eye"]
-
- facial_area = FacialAreaRegion(
- x=x,
- y=y,
- w=w,
- h=h,
- left_eye=left_eye,
- right_eye=right_eye,
- confidence=confidence,
- )
-
- resp.append(facial_area)
-
- return resp
diff --git a/deepface/models/face_detection/OpenCv.py b/deepface/models/face_detection/OpenCv.py
deleted file mode 100644
index 4abb6da..0000000
--- a/deepface/models/face_detection/OpenCv.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# built-in dependencies
-import os
-from typing import Any, List
-
-# 3rd party dependencies
-import cv2
-import numpy as np
-
-#project dependencies
-from deepface.models.Detector import Detector, FacialAreaRegion
-
-
-class OpenCvClient(Detector):
- """
- Class to cover common face detection functionalitiy for OpenCv backend
- """
-
- def __init__(self):
- self.model = self.build_model()
-
- def build_model(self):
- """
- Build opencv's face and eye detector models
- Returns:
- model (dict): including face_detector and eye_detector keys
- """
- detector = {}
- detector["face_detector"] = self.__build_cascade("haarcascade")
- detector["eye_detector"] = self.__build_cascade("haarcascade_eye")
- return detector
-
- def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
- """
- Detect and align face with opencv
-
- Args:
- img (np.ndarray): pre-loaded image as numpy array
-
- Returns:
- results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
- """
- resp = []
-
- detected_face = None
-
- faces = []
- try:
- # faces = detector["face_detector"].detectMultiScale(img, 1.3, 5)
-
- # note that, by design, opencv's haarcascade scores are >0 but not capped at 1
- faces, _, scores = self.model["face_detector"].detectMultiScale3(
- img, 1.1, 10, outputRejectLevels=True
- )
- except:
- pass
-
- if len(faces) > 0:
- for (x, y, w, h), confidence in zip(faces, scores):
- detected_face = img[int(y) : int(y + h), int(x) : int(x + w)]
- left_eye, right_eye = self.find_eyes(img=detected_face)
-
- # eyes found in the detected face instead image itself
- # detected face's coordinates should be added
- if left_eye is not None:
- left_eye = (int(x + left_eye[0]), int(y + left_eye[1]))
- if right_eye is not None:
- right_eye = (int(x + right_eye[0]), int(y + right_eye[1]))
-
- facial_area = FacialAreaRegion(
- x=x,
- y=y,
- w=w,
- h=h,
- left_eye=left_eye,
- right_eye=right_eye,
- confidence=(100 - confidence) / 100,
- )
- resp.append(facial_area)
-
- return resp
-
- def find_eyes(self, img: np.ndarray) -> tuple:
- """
- Find the left and right eye coordinates of given image
- Args:
- img (np.ndarray): given image
- Returns:
- left and right eye (tuple)
- """
- left_eye = None
- right_eye = None
-
- # if image has unexpectedly 0 dimension then skip alignment
- if img.shape[0] == 0 or img.shape[1] == 0:
- return left_eye, right_eye
-
- detected_face_gray = cv2.cvtColor(
- img, cv2.COLOR_BGR2GRAY
- ) # eye detector expects gray scale image
-
- eyes = self.model["eye_detector"].detectMultiScale(detected_face_gray, 1.1, 10)
-
- # ----------------------------------------------------------------
-
- # opencv eye detection module is not strong. it might find more than 2 eyes!
- # besides, it returns eyes with different order in each call (issue 435)
- # this is an important issue because opencv is the default detector and ssd also uses this
- # find the largest 2 eye. Thanks to @thelostpeace
-
- eyes = sorted(eyes, key=lambda v: abs(v[2] * v[3]), reverse=True)
-
- # ----------------------------------------------------------------
- if len(eyes) >= 2:
- # decide left and right eye
-
- eye_1 = eyes[0]
- eye_2 = eyes[1]
-
- if eye_1[0] < eye_2[0]:
- right_eye = eye_1
- left_eye = eye_2
- else:
- right_eye = eye_2
- left_eye = eye_1
-
- # -----------------------
- # find center of eyes
- left_eye = (
- int(left_eye[0] + (left_eye[2] / 2)),
- int(left_eye[1] + (left_eye[3] / 2)),
- )
- right_eye = (
- int(right_eye[0] + (right_eye[2] / 2)),
- int(right_eye[1] + (right_eye[3] / 2)),
- )
- return left_eye, right_eye
-
- def __build_cascade(self, model_name="haarcascade") -> Any:
- """
- Build a opencv face&eye detector models
- Returns:
- model (Any)
- """
- opencv_path = self.__get_opencv_path()
- if model_name == "haarcascade":
- face_detector_path = os.path.join(opencv_path, "haarcascade_frontalface_default.xml")
- if not os.path.isfile(face_detector_path):
- raise ValueError(
- "Confirm that opencv is installed on your environment! Expected path ",
- face_detector_path,
- " violated.",
- )
- detector = cv2.CascadeClassifier(face_detector_path)
-
- elif model_name == "haarcascade_eye":
- eye_detector_path = os.path.join(opencv_path, "haarcascade_eye.xml")
- if not os.path.isfile(eye_detector_path):
- raise ValueError(
- "Confirm that opencv is installed on your environment! Expected path ",
- eye_detector_path,
- " violated.",
- )
- detector = cv2.CascadeClassifier(eye_detector_path)
-
- else:
- raise ValueError(f"unimplemented model_name for build_cascade - {model_name}")
-
- return detector
-
- def __get_opencv_path(self) -> str:
- """
- Returns where opencv installed
- Returns:
- installation_path (str)
- """
- return os.path.join(os.path.dirname(cv2.__file__), "data")
diff --git a/deepface/models/face_detection/RetinaFace.py b/deepface/models/face_detection/RetinaFace.py
deleted file mode 100644
index a3b1468..0000000
--- a/deepface/models/face_detection/RetinaFace.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# built-in dependencies
-from typing import List
-
-# 3rd party dependencies
-import numpy as np
-from retinaface import RetinaFace as rf
-
-# project dependencies
-from deepface.models.Detector import Detector, FacialAreaRegion
-
-# pylint: disable=too-few-public-methods
-class RetinaFaceClient(Detector):
- def __init__(self):
- self.model = rf.build_model()
-
- def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
- """
- Detect and align face with retinaface
-
- Args:
- img (np.ndarray): pre-loaded image as numpy array
-
- Returns:
- results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
- """
- resp = []
-
- obj = rf.detect_faces(img, model=self.model, threshold=0.9)
-
- if not isinstance(obj, dict):
- return resp
-
- for face_idx in obj.keys():
- identity = obj[face_idx]
- detection = identity["facial_area"]
-
- y = detection[1]
- h = detection[3] - y
- x = detection[0]
- w = detection[2] - x
-
- # retinaface sets left and right eyes with respect to the person
- left_eye = identity["landmarks"]["left_eye"]
- right_eye = identity["landmarks"]["right_eye"]
-
- # eyes are list of float, need to cast them tuple of int
- left_eye = tuple(int(i) for i in left_eye)
- right_eye = tuple(int(i) for i in right_eye)
-
- confidence = identity["score"]
-
- facial_area = FacialAreaRegion(
- x=x,
- y=y,
- w=w,
- h=h,
- left_eye=left_eye,
- right_eye=right_eye,
- confidence=confidence,
- )
-
- resp.append(facial_area)
-
- return resp
diff --git a/deepface/models/face_detection/Ssd.py b/deepface/models/face_detection/Ssd.py
deleted file mode 100644
index 4250888..0000000
--- a/deepface/models/face_detection/Ssd.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# built-in dependencies
-from typing import List
-from enum import IntEnum
-
-# 3rd party dependencies
-import cv2
-import numpy as np
-
-# project dependencies
-from deepface.models.face_detection import OpenCv
-from deepface.commons import weight_utils
-from deepface.models.Detector import Detector, FacialAreaRegion
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# pylint: disable=line-too-long, c-extension-no-member
-
-
-class SsdClient(Detector):
- def __init__(self):
- self.model = self.build_model()
-
- def build_model(self) -> dict:
- """
- Build a ssd detector model
- Returns:
- model (dict)
- """
-
- # model structure
- output_model = weight_utils.download_weights_if_necessary(
- file_name="deploy.prototxt",
- source_url="https://github.com/opencv/opencv/raw/3.4.0/samples/dnn/face_detector/deploy.prototxt",
- )
-
- # pre-trained weights
- output_weights = weight_utils.download_weights_if_necessary(
- file_name="res10_300x300_ssd_iter_140000.caffemodel",
- source_url="https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel",
- )
-
- try:
- face_detector = cv2.dnn.readNetFromCaffe(output_model, output_weights)
- except Exception as err:
- raise ValueError(
- "Exception while calling opencv.dnn module."
- + "This is an optional dependency."
- + "You can install it as pip install opencv-contrib-python."
- ) from err
-
- return {"face_detector": face_detector, "opencv_module": OpenCv.OpenCvClient()}
-
- def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
- """
- Detect and align face with ssd
-
- Args:
- img (np.ndarray): pre-loaded image as numpy array
-
- Returns:
- results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
- """
-
- # Because cv2.dnn.blobFromImage expects CV_8U (8-bit unsigned integer) values
- if img.dtype != np.uint8:
- img = img.astype(np.uint8)
-
- opencv_module: OpenCv.OpenCvClient = self.model["opencv_module"]
-
- target_size = (300, 300)
-
- original_size = img.shape
-
- current_img = cv2.resize(img, target_size)
-
- aspect_ratio_x = original_size[1] / target_size[1]
- aspect_ratio_y = original_size[0] / target_size[0]
-
- imageBlob = cv2.dnn.blobFromImage(image=current_img)
-
- face_detector = self.model["face_detector"]
- face_detector.setInput(imageBlob)
- detections = face_detector.forward()
-
- class ssd_labels(IntEnum):
- img_id = 0
- is_face = 1
- confidence = 2
- left = 3
- top = 4
- right = 5
- bottom = 6
-
- faces = detections[0][0]
- faces = faces[
- (faces[:, ssd_labels.is_face] == 1) & (faces[:, ssd_labels.confidence] >= 0.90)
- ]
- margins = [ssd_labels.left, ssd_labels.top, ssd_labels.right, ssd_labels.bottom]
- faces[:, margins] = np.int32(faces[:, margins] * 300)
- faces[:, margins] = np.int32(
- faces[:, margins] * [aspect_ratio_x, aspect_ratio_y, aspect_ratio_x, aspect_ratio_y]
- )
- faces[:, [ssd_labels.right, ssd_labels.bottom]] -= faces[
- :, [ssd_labels.left, ssd_labels.top]
- ]
-
- resp = []
- for face in faces:
- confidence = float(face[ssd_labels.confidence])
- x, y, w, h = map(int, face[margins])
- detected_face = img[y : y + h, x : x + w]
-
- left_eye, right_eye = opencv_module.find_eyes(detected_face)
-
- # eyes found in the detected face instead image itself
- # detected face's coordinates should be added
- if left_eye is not None:
- left_eye = x + int(left_eye[0]), y + int(left_eye[1])
- if right_eye is not None:
- right_eye = x + int(right_eye[0]), y + int(right_eye[1])
-
- facial_area = FacialAreaRegion(
- x=x,
- y=y,
- w=w,
- h=h,
- left_eye=left_eye,
- right_eye=right_eye,
- confidence=confidence,
- )
- resp.append(facial_area)
- return resp
diff --git a/deepface/models/face_detection/Yolo.py b/deepface/models/face_detection/Yolo.py
deleted file mode 100644
index a4f5a46..0000000
--- a/deepface/models/face_detection/Yolo.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# built-in dependencies
-from typing import Any, List
-
-# 3rd party dependencies
-import numpy as np
-
-# project dependencies
-from deepface.models.Detector import Detector, FacialAreaRegion
-from deepface.commons import weight_utils
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# Model's weights paths
-PATH = ".deepface/weights/yolov8n-face.pt"
-
-# Google Drive URL from repo (https://github.com/derronqi/yolov8-face) ~6MB
-WEIGHT_URL = "https://drive.google.com/uc?id=1qcr9DbgsX3ryrz2uU8w4Xm3cOrRywXqb"
-
-
-class YoloClient(Detector):
- def __init__(self):
- self.model = self.build_model()
-
- def build_model(self) -> Any:
- """
- Build a yolo detector model
- Returns:
- model (Any)
- """
-
- # Import the optional Ultralytics YOLO model
- try:
- from ultralytics import YOLO
- except ModuleNotFoundError as e:
- raise ImportError(
- "Yolo is an optional detector, ensure the library is installed. "
- "Please install using 'pip install ultralytics'"
- ) from e
-
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="yolov8n-face.pt", source_url=WEIGHT_URL
- )
-
- # Return face_detector
- return YOLO(weight_file)
-
- def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
- """
- Detect and align face with yolo
-
- Args:
- img (np.ndarray): pre-loaded image as numpy array
-
- Returns:
- results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
- """
- resp = []
-
- # Detect faces
- results = self.model.predict(img, verbose=False, show=False, conf=0.25)[0]
-
- # For each face, extract the bounding box, the landmarks and confidence
- for result in results:
-
- if result.boxes is None or result.keypoints is None:
- continue
-
- # Extract the bounding box and the confidence
- x, y, w, h = result.boxes.xywh.tolist()[0]
- confidence = result.boxes.conf.tolist()[0]
-
- # right_eye_conf = result.keypoints.conf[0][0]
- # left_eye_conf = result.keypoints.conf[0][1]
- right_eye = result.keypoints.xy[0][0].tolist()
- left_eye = result.keypoints.xy[0][1].tolist()
-
- # eyes are list of float, need to cast them tuple of int
- left_eye = tuple(int(i) for i in left_eye)
- right_eye = tuple(int(i) for i in right_eye)
-
- x, y, w, h = int(x - w / 2), int(y - h / 2), int(w), int(h)
- facial_area = FacialAreaRegion(
- x=x,
- y=y,
- w=w,
- h=h,
- left_eye=left_eye,
- right_eye=right_eye,
- confidence=confidence,
- )
- resp.append(facial_area)
-
- return resp
diff --git a/deepface/models/face_detection/YuNet.py b/deepface/models/face_detection/YuNet.py
deleted file mode 100644
index 398aed2..0000000
--- a/deepface/models/face_detection/YuNet.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# built-in dependencies
-import os
-from typing import Any, List
-
-# 3rd party dependencies
-import cv2
-import numpy as np
-
-# project dependencies
-from deepface.commons import weight_utils
-from deepface.models.Detector import Detector, FacialAreaRegion
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-
-class YuNetClient(Detector):
- def __init__(self):
- self.model = self.build_model()
-
- def build_model(self) -> Any:
- """
- Build a yunet detector model
- Returns:
- model (Any)
- """
-
- opencv_version = cv2.__version__.split(".")
- if not len(opencv_version) >= 2:
- raise ValueError(
- f"OpenCv's version must have major and minor values but it is {opencv_version}"
- )
-
- opencv_version_major = int(opencv_version[0])
- opencv_version_minor = int(opencv_version[1])
-
- if opencv_version_major < 4 or (opencv_version_major == 4 and opencv_version_minor < 8):
- # min requirement: https://github.com/opencv/opencv_zoo/issues/172
- raise ValueError(f"YuNet requires opencv-python >= 4.8 but you have {cv2.__version__}")
-
- # pylint: disable=C0301
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="face_detection_yunet_2023mar.onnx",
- source_url="https://github.com/opencv/opencv_zoo/raw/main/models/face_detection_yunet/face_detection_yunet_2023mar.onnx",
- )
-
- try:
- face_detector = cv2.FaceDetectorYN_create(weight_file, "", (0, 0))
- except Exception as err:
- raise ValueError(
- "Exception while calling opencv.FaceDetectorYN_create module."
- + "This is an optional dependency."
- + "You can install it as pip install opencv-contrib-python."
- ) from err
- return face_detector
-
- def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
- """
- Detect and align face with yunet
-
- Args:
- img (np.ndarray): pre-loaded image as numpy array
-
- Returns:
- results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
- """
- # FaceDetector.detect_faces does not support score_threshold parameter.
- # We can set it via environment variable.
- score_threshold = float(os.environ.get("yunet_score_threshold", "0.9"))
- resp = []
- faces = []
- height, width = img.shape[0], img.shape[1]
- # resize image if it is too large (Yunet fails to detect faces on large input sometimes)
- # I picked 640 as a threshold because it is the default value of max_size in Yunet.
- resized = False
- r = 1 # resize factor
- if height > 640 or width > 640:
- r = 640.0 / max(height, width)
- img = cv2.resize(img, (int(width * r), int(height * r)))
- height, width = img.shape[0], img.shape[1]
- resized = True
- self.model.setInputSize((width, height))
- self.model.setScoreThreshold(score_threshold)
- _, faces = self.model.detect(img)
- if faces is None:
- return resp
- for face in faces:
- # pylint: disable=W0105
- """
- The detection output faces is a two-dimension array of type CV_32F,
- whose rows are the detected face instances, columns are the location
- of a face and 5 facial landmarks.
- The format of each row is as follows:
- x1, y1, w, h, x_re, y_re, x_le, y_le, x_nt, y_nt,
- x_rcm, y_rcm, x_lcm, y_lcm,
- where x1, y1, w, h are the top-left coordinates, width and height of
- the face bounding box,
- {x, y}_{re, le, nt, rcm, lcm} stands for the coordinates of right eye,
- left eye, nose tip, the right corner and left corner of the mouth respectively.
- """
- (x, y, w, h, x_le, y_le, x_re, y_re) = list(map(int, face[:8]))
-
- # YuNet returns negative coordinates if it thinks part of the detected face
- # is outside the frame.
- x = max(x, 0)
- y = max(y, 0)
- if resized:
- x, y, w, h = int(x / r), int(y / r), int(w / r), int(h / r)
- x_re, y_re, x_le, y_le = (
- int(x_re / r),
- int(y_re / r),
- int(x_le / r),
- int(y_le / r),
- )
- confidence = float(face[-1])
-
- facial_area = FacialAreaRegion(
- x=x,
- y=y,
- w=w,
- h=h,
- confidence=confidence,
- left_eye=(x_re, y_re),
- right_eye=(x_le, y_le),
- )
- resp.append(facial_area)
- return resp
diff --git a/deepface/models/face_detection/__init__.py b/deepface/models/face_detection/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/deepface/models/facial_recognition/ArcFace.py b/deepface/models/facial_recognition/ArcFace.py
deleted file mode 100644
index 596192f..0000000
--- a/deepface/models/facial_recognition/ArcFace.py
+++ /dev/null
@@ -1,169 +0,0 @@
-# project dependencies
-from deepface.commons import package_utils, weight_utils
-from deepface.models.FacialRecognition import FacialRecognition
-
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# pylint: disable=unsubscriptable-object
-
-# --------------------------------
-# dependency configuration
-
-tf_version = package_utils.get_tf_major_version()
-
-if tf_version == 1:
- from keras.models import Model
- from keras.engine import training
- from keras.layers import (
- ZeroPadding2D,
- Input,
- Conv2D,
- BatchNormalization,
- PReLU,
- Add,
- Dropout,
- Flatten,
- Dense,
- )
-else:
- from tensorflow.keras.models import Model
- from tensorflow.python.keras.engine import training
- from tensorflow.keras.layers import (
- ZeroPadding2D,
- Input,
- Conv2D,
- BatchNormalization,
- PReLU,
- Add,
- Dropout,
- Flatten,
- Dense,
- )
-
-# pylint: disable=too-few-public-methods
-class ArcFaceClient(FacialRecognition):
- """
- ArcFace model class
- """
-
- def __init__(self):
- self.model = load_model()
- self.model_name = "ArcFace"
- self.input_shape = (112, 112)
- self.output_shape = 512
-
-
-def load_model(
- url="https://github.com/serengil/deepface_models/releases/download/v1.0/arcface_weights.h5",
-) -> Model:
- """
- Construct ArcFace model, download its weights and load
- Returns:
- model (Model)
- """
- base_model = ResNet34()
- inputs = base_model.inputs[0]
- arcface_model = base_model.outputs[0]
- arcface_model = BatchNormalization(momentum=0.9, epsilon=2e-5)(arcface_model)
- arcface_model = Dropout(0.4)(arcface_model)
- arcface_model = Flatten()(arcface_model)
- arcface_model = Dense(512, activation=None, use_bias=True, kernel_initializer="glorot_normal")(
- arcface_model
- )
- embedding = BatchNormalization(momentum=0.9, epsilon=2e-5, name="embedding", scale=True)(
- arcface_model
- )
- model = Model(inputs, embedding, name=base_model.name)
-
- # ---------------------------------------
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="arcface_weights.h5", source_url=url
- )
-
- model = weight_utils.load_model_weights(model=model, weight_file=weight_file)
- # ---------------------------------------
-
- return model
-
-
-def ResNet34() -> Model:
- """
- ResNet34 model
- Returns:
- model (Model)
- """
- img_input = Input(shape=(112, 112, 3))
-
- x = ZeroPadding2D(padding=1, name="conv1_pad")(img_input)
- x = Conv2D(
- 64, 3, strides=1, use_bias=False, kernel_initializer="glorot_normal", name="conv1_conv"
- )(x)
- x = BatchNormalization(axis=3, epsilon=2e-5, momentum=0.9, name="conv1_bn")(x)
- x = PReLU(shared_axes=[1, 2], name="conv1_prelu")(x)
- x = stack_fn(x)
-
- model = training.Model(img_input, x, name="ResNet34")
-
- return model
-
-
-def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
- bn_axis = 3
-
- if conv_shortcut:
- shortcut = Conv2D(
- filters,
- 1,
- strides=stride,
- use_bias=False,
- kernel_initializer="glorot_normal",
- name=name + "_0_conv",
- )(x)
- shortcut = BatchNormalization(
- axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_0_bn"
- )(shortcut)
- else:
- shortcut = x
-
- x = BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_1_bn")(x)
- x = ZeroPadding2D(padding=1, name=name + "_1_pad")(x)
- x = Conv2D(
- filters,
- 3,
- strides=1,
- kernel_initializer="glorot_normal",
- use_bias=False,
- name=name + "_1_conv",
- )(x)
- x = BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_2_bn")(x)
- x = PReLU(shared_axes=[1, 2], name=name + "_1_prelu")(x)
-
- x = ZeroPadding2D(padding=1, name=name + "_2_pad")(x)
- x = Conv2D(
- filters,
- kernel_size,
- strides=stride,
- kernel_initializer="glorot_normal",
- use_bias=False,
- name=name + "_2_conv",
- )(x)
- x = BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_3_bn")(x)
-
- x = Add(name=name + "_add")([shortcut, x])
- return x
-
-
-def stack1(x, filters, blocks, stride1=2, name=None):
- x = block1(x, filters, stride=stride1, name=name + "_block1")
- for i in range(2, blocks + 1):
- x = block1(x, filters, conv_shortcut=False, name=name + "_block" + str(i))
- return x
-
-
-def stack_fn(x):
- x = stack1(x, 64, 3, name="conv2")
- x = stack1(x, 128, 4, name="conv3")
- x = stack1(x, 256, 6, name="conv4")
- return stack1(x, 512, 3, name="conv5")
diff --git a/deepface/models/facial_recognition/DeepID.py b/deepface/models/facial_recognition/DeepID.py
deleted file mode 100644
index ea03b4e..0000000
--- a/deepface/models/facial_recognition/DeepID.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# project dependencies
-from deepface.commons import package_utils, weight_utils
-from deepface.models.FacialRecognition import FacialRecognition
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-tf_version = package_utils.get_tf_major_version()
-
-if tf_version == 1:
- from keras.models import Model
- from keras.layers import (
- Conv2D,
- Activation,
- Input,
- Add,
- MaxPooling2D,
- Flatten,
- Dense,
- Dropout,
- )
-else:
- from tensorflow.keras.models import Model
- from tensorflow.keras.layers import (
- Conv2D,
- Activation,
- Input,
- Add,
- MaxPooling2D,
- Flatten,
- Dense,
- Dropout,
- )
-
-# pylint: disable=line-too-long
-
-
-# -------------------------------------
-
-# pylint: disable=too-few-public-methods
-class DeepIdClient(FacialRecognition):
- """
- DeepId model class
- """
-
- def __init__(self):
- self.model = load_model()
- self.model_name = "DeepId"
- self.input_shape = (47, 55)
- self.output_shape = 160
-
-
-def load_model(
- url="https://github.com/serengil/deepface_models/releases/download/v1.0/deepid_keras_weights.h5",
-) -> Model:
- """
- Construct DeepId model, download its weights and load
- """
-
- myInput = Input(shape=(55, 47, 3))
-
- x = Conv2D(20, (4, 4), name="Conv1", activation="relu", input_shape=(55, 47, 3))(myInput)
- x = MaxPooling2D(pool_size=2, strides=2, name="Pool1")(x)
- x = Dropout(rate=0.99, name="D1")(x)
-
- x = Conv2D(40, (3, 3), name="Conv2", activation="relu")(x)
- x = MaxPooling2D(pool_size=2, strides=2, name="Pool2")(x)
- x = Dropout(rate=0.99, name="D2")(x)
-
- x = Conv2D(60, (3, 3), name="Conv3", activation="relu")(x)
- x = MaxPooling2D(pool_size=2, strides=2, name="Pool3")(x)
- x = Dropout(rate=0.99, name="D3")(x)
-
- x1 = Flatten()(x)
- fc11 = Dense(160, name="fc11")(x1)
-
- x2 = Conv2D(80, (2, 2), name="Conv4", activation="relu")(x)
- x2 = Flatten()(x2)
- fc12 = Dense(160, name="fc12")(x2)
-
- y = Add()([fc11, fc12])
- y = Activation("relu", name="deepid")(y)
-
- model = Model(inputs=[myInput], outputs=y)
-
- # ---------------------------------
-
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="deepid_keras_weights.h5", source_url=url
- )
-
- model = weight_utils.load_model_weights(
- model=model, weight_file=weight_file
- )
-
- return model
diff --git a/deepface/models/facial_recognition/Dlib.py b/deepface/models/facial_recognition/Dlib.py
deleted file mode 100644
index 3d50521..0000000
--- a/deepface/models/facial_recognition/Dlib.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# built-in dependencies
-from typing import List
-
-# 3rd party dependencies
-import numpy as np
-
-# project dependencies
-from deepface.commons import weight_utils
-from deepface.models.FacialRecognition import FacialRecognition
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# pylint: disable=too-few-public-methods
-
-
-class DlibClient(FacialRecognition):
- """
- Dlib model class
- """
-
- def __init__(self):
- self.model = DlibResNet()
- self.model_name = "Dlib"
- self.input_shape = (150, 150)
- self.output_shape = 128
-
- def forward(self, img: np.ndarray) -> List[float]:
- """
- Find embeddings with Dlib model.
- This model necessitates the override of the forward method
- because it is not a keras model.
- Args:
- img (np.ndarray): pre-loaded image in BGR
- Returns
- embeddings (list): multi-dimensional vector
- """
- # return self.model.predict(img)[0].tolist()
-
- # extract_faces returns 4 dimensional images
- if len(img.shape) == 4:
- img = img[0]
-
- # bgr to rgb
- img = img[:, :, ::-1] # bgr to rgb
-
- # img is in scale of [0, 1] but expected [0, 255]
- if img.max() <= 1:
- img = img * 255
-
- img = img.astype(np.uint8)
-
- img_representation = self.model.model.compute_face_descriptor(img)
- img_representation = np.array(img_representation)
- img_representation = np.expand_dims(img_representation, axis=0)
- return img_representation[0].tolist()
-
-
-class DlibResNet:
- def __init__(self):
-
- # This is not a must dependency. Don't import it in the global level.
- try:
- import dlib
- except ModuleNotFoundError as e:
- raise ImportError(
- "Dlib is an optional dependency, ensure the library is installed."
- "Please install using 'pip install dlib' "
- ) from e
-
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="dlib_face_recognition_resnet_model_v1.dat",
- source_url="http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2",
- compress_type="bz2",
- )
-
- self.model = dlib.face_recognition_model_v1(weight_file)
-
- # return None # classes must return None
diff --git a/deepface/models/facial_recognition/Facenet.py b/deepface/models/facial_recognition/Facenet.py
deleted file mode 100644
index b1ad37c..0000000
--- a/deepface/models/facial_recognition/Facenet.py
+++ /dev/null
@@ -1,1696 +0,0 @@
-# project dependencies
-from deepface.commons import package_utils, weight_utils
-from deepface.models.FacialRecognition import FacialRecognition
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# --------------------------------
-# dependency configuration
-
-tf_version = package_utils.get_tf_major_version()
-
-if tf_version == 1:
- from keras.models import Model
- from keras.layers import Activation
- from keras.layers import BatchNormalization
- from keras.layers import Concatenate
- from keras.layers import Conv2D
- from keras.layers import Dense
- from keras.layers import Dropout
- from keras.layers import GlobalAveragePooling2D
- from keras.layers import Input
- from keras.layers import Lambda
- from keras.layers import MaxPooling2D
- from keras.layers import add
- from keras import backend as K
-else:
- from tensorflow.keras.models import Model
- from tensorflow.keras.layers import Activation
- from tensorflow.keras.layers import BatchNormalization
- from tensorflow.keras.layers import Concatenate
- from tensorflow.keras.layers import Conv2D
- from tensorflow.keras.layers import Dense
- from tensorflow.keras.layers import Dropout
- from tensorflow.keras.layers import GlobalAveragePooling2D
- from tensorflow.keras.layers import Input
- from tensorflow.keras.layers import Lambda
- from tensorflow.keras.layers import MaxPooling2D
- from tensorflow.keras.layers import add
- from tensorflow.keras import backend as K
-
-# --------------------------------
-
-# pylint: disable=too-few-public-methods
-class FaceNet128dClient(FacialRecognition):
- """
- FaceNet-128d model class
- """
-
- def __init__(self):
- self.model = load_facenet128d_model()
- self.model_name = "FaceNet-128d"
- self.input_shape = (160, 160)
- self.output_shape = 128
-
-
-class FaceNet512dClient(FacialRecognition):
- """
- FaceNet-1512d model class
- """
-
- def __init__(self):
- self.model = load_facenet512d_model()
- self.model_name = "FaceNet-512d"
- self.input_shape = (160, 160)
- self.output_shape = 512
-
-
-def scaling(x, scale):
- return x * scale
-
-
-def InceptionResNetV1(dimension: int = 128) -> Model:
- """
- InceptionResNetV1 model heavily inspired from
- github.com/davidsandberg/facenet/blob/master/src/models/inception_resnet_v1.py
- As mentioned in Sandberg's repo's readme, pre-trained models are using Inception ResNet v1
- Besides training process is documented at
- sefiks.com/2018/09/03/face-recognition-with-facenet-in-keras/
-
- Args:
- dimension (int): number of dimensions in the embedding layer
- Returns:
- model (Model)
- """
-
- inputs = Input(shape=(160, 160, 3))
- x = Conv2D(32, 3, strides=2, padding="valid", use_bias=False, name="Conv2d_1a_3x3")(inputs)
- x = BatchNormalization(
- axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_1a_3x3_BatchNorm"
- )(x)
- x = Activation("relu", name="Conv2d_1a_3x3_Activation")(x)
- x = Conv2D(32, 3, strides=1, padding="valid", use_bias=False, name="Conv2d_2a_3x3")(x)
- x = BatchNormalization(
- axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_2a_3x3_BatchNorm"
- )(x)
- x = Activation("relu", name="Conv2d_2a_3x3_Activation")(x)
- x = Conv2D(64, 3, strides=1, padding="same", use_bias=False, name="Conv2d_2b_3x3")(x)
- x = BatchNormalization(
- axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_2b_3x3_BatchNorm"
- )(x)
- x = Activation("relu", name="Conv2d_2b_3x3_Activation")(x)
- x = MaxPooling2D(3, strides=2, name="MaxPool_3a_3x3")(x)
- x = Conv2D(80, 1, strides=1, padding="valid", use_bias=False, name="Conv2d_3b_1x1")(x)
- x = BatchNormalization(
- axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_3b_1x1_BatchNorm"
- )(x)
- x = Activation("relu", name="Conv2d_3b_1x1_Activation")(x)
- x = Conv2D(192, 3, strides=1, padding="valid", use_bias=False, name="Conv2d_4a_3x3")(x)
- x = BatchNormalization(
- axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_4a_3x3_BatchNorm"
- )(x)
- x = Activation("relu", name="Conv2d_4a_3x3_Activation")(x)
- x = Conv2D(256, 3, strides=2, padding="valid", use_bias=False, name="Conv2d_4b_3x3")(x)
- x = BatchNormalization(
- axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_4b_3x3_BatchNorm"
- )(x)
- x = Activation("relu", name="Conv2d_4b_3x3_Activation")(x)
-
- # 5x Block35 (Inception-ResNet-A block):
- branch_0 = Conv2D(
- 32, 1, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_1_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block35_1_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 32, 1, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_1_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_1_Branch_1_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block35_1_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 32, 3, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_1_Conv2d_0b_3x3"
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_1_Branch_1_Conv2d_0b_3x3_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block35_1_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
- branch_2 = Conv2D(
- 32, 1, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_2_Conv2d_0a_1x1"
- )(x)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_1_Branch_2_Conv2d_0a_1x1_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Block35_1_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
- branch_2 = Conv2D(
- 32, 3, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_2_Conv2d_0b_3x3"
- )(branch_2)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_1_Branch_2_Conv2d_0b_3x3_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Block35_1_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
- branch_2 = Conv2D(
- 32, 3, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_2_Conv2d_0c_3x3"
- )(branch_2)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_1_Branch_2_Conv2d_0c_3x3_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Block35_1_Branch_2_Conv2d_0c_3x3_Activation")(branch_2)
- branches = [branch_0, branch_1, branch_2]
- mixed = Concatenate(axis=3, name="Block35_1_Concatenate")(branches)
- up = Conv2D(256, 1, strides=1, padding="same", use_bias=True, name="Block35_1_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(up)
- x = add([x, up])
- x = Activation("relu", name="Block35_1_Activation")(x)
-
- branch_0 = Conv2D(
- 32, 1, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_2_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block35_2_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 32, 1, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_1_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_2_Branch_1_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block35_2_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 32, 3, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_1_Conv2d_0b_3x3"
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_2_Branch_1_Conv2d_0b_3x3_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block35_2_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
- branch_2 = Conv2D(
- 32, 1, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_2_Conv2d_0a_1x1"
- )(x)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_2_Branch_2_Conv2d_0a_1x1_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Block35_2_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
- branch_2 = Conv2D(
- 32, 3, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_2_Conv2d_0b_3x3"
- )(branch_2)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_2_Branch_2_Conv2d_0b_3x3_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Block35_2_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
- branch_2 = Conv2D(
- 32, 3, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_2_Conv2d_0c_3x3"
- )(branch_2)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_2_Branch_2_Conv2d_0c_3x3_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Block35_2_Branch_2_Conv2d_0c_3x3_Activation")(branch_2)
- branches = [branch_0, branch_1, branch_2]
- mixed = Concatenate(axis=3, name="Block35_2_Concatenate")(branches)
- up = Conv2D(256, 1, strides=1, padding="same", use_bias=True, name="Block35_2_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(up)
- x = add([x, up])
- x = Activation("relu", name="Block35_2_Activation")(x)
-
- branch_0 = Conv2D(
- 32, 1, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_3_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block35_3_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 32, 1, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_1_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_3_Branch_1_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block35_3_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 32, 3, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_1_Conv2d_0b_3x3"
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_3_Branch_1_Conv2d_0b_3x3_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block35_3_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
- branch_2 = Conv2D(
- 32, 1, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_2_Conv2d_0a_1x1"
- )(x)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_3_Branch_2_Conv2d_0a_1x1_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Block35_3_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
- branch_2 = Conv2D(
- 32, 3, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_2_Conv2d_0b_3x3"
- )(branch_2)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_3_Branch_2_Conv2d_0b_3x3_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Block35_3_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
- branch_2 = Conv2D(
- 32, 3, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_2_Conv2d_0c_3x3"
- )(branch_2)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_3_Branch_2_Conv2d_0c_3x3_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Block35_3_Branch_2_Conv2d_0c_3x3_Activation")(branch_2)
- branches = [branch_0, branch_1, branch_2]
- mixed = Concatenate(axis=3, name="Block35_3_Concatenate")(branches)
- up = Conv2D(256, 1, strides=1, padding="same", use_bias=True, name="Block35_3_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(up)
- x = add([x, up])
- x = Activation("relu", name="Block35_3_Activation")(x)
-
- branch_0 = Conv2D(
- 32, 1, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_4_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block35_4_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 32, 1, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_1_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_4_Branch_1_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block35_4_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 32, 3, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_1_Conv2d_0b_3x3"
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_4_Branch_1_Conv2d_0b_3x3_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block35_4_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
- branch_2 = Conv2D(
- 32, 1, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_2_Conv2d_0a_1x1"
- )(x)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_4_Branch_2_Conv2d_0a_1x1_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Block35_4_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
- branch_2 = Conv2D(
- 32, 3, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_2_Conv2d_0b_3x3"
- )(branch_2)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_4_Branch_2_Conv2d_0b_3x3_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Block35_4_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
- branch_2 = Conv2D(
- 32, 3, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_2_Conv2d_0c_3x3"
- )(branch_2)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_4_Branch_2_Conv2d_0c_3x3_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Block35_4_Branch_2_Conv2d_0c_3x3_Activation")(branch_2)
- branches = [branch_0, branch_1, branch_2]
- mixed = Concatenate(axis=3, name="Block35_4_Concatenate")(branches)
- up = Conv2D(256, 1, strides=1, padding="same", use_bias=True, name="Block35_4_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(up)
- x = add([x, up])
- x = Activation("relu", name="Block35_4_Activation")(x)
-
- branch_0 = Conv2D(
- 32, 1, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_5_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block35_5_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 32, 1, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_1_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_5_Branch_1_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block35_5_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 32, 3, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_1_Conv2d_0b_3x3"
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_5_Branch_1_Conv2d_0b_3x3_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block35_5_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
- branch_2 = Conv2D(
- 32, 1, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_2_Conv2d_0a_1x1"
- )(x)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_5_Branch_2_Conv2d_0a_1x1_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Block35_5_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
- branch_2 = Conv2D(
- 32, 3, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_2_Conv2d_0b_3x3"
- )(branch_2)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_5_Branch_2_Conv2d_0b_3x3_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Block35_5_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
- branch_2 = Conv2D(
- 32, 3, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_2_Conv2d_0c_3x3"
- )(branch_2)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block35_5_Branch_2_Conv2d_0c_3x3_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Block35_5_Branch_2_Conv2d_0c_3x3_Activation")(branch_2)
- branches = [branch_0, branch_1, branch_2]
- mixed = Concatenate(axis=3, name="Block35_5_Concatenate")(branches)
- up = Conv2D(256, 1, strides=1, padding="same", use_bias=True, name="Block35_5_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(up)
- x = add([x, up])
- x = Activation("relu", name="Block35_5_Activation")(x)
-
- # Mixed 6a (Reduction-A block):
- branch_0 = Conv2D(
- 384, 3, strides=2, padding="valid", use_bias=False, name="Mixed_6a_Branch_0_Conv2d_1a_3x3"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Mixed_6a_Branch_0_Conv2d_1a_3x3_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Mixed_6a_Branch_0_Conv2d_1a_3x3_Activation")(branch_0)
- branch_1 = Conv2D(
- 192, 1, strides=1, padding="same", use_bias=False, name="Mixed_6a_Branch_1_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Mixed_6a_Branch_1_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Mixed_6a_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 192, 3, strides=1, padding="same", use_bias=False, name="Mixed_6a_Branch_1_Conv2d_0b_3x3"
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Mixed_6a_Branch_1_Conv2d_0b_3x3_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Mixed_6a_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
- branch_1 = Conv2D(
- 256, 3, strides=2, padding="valid", use_bias=False, name="Mixed_6a_Branch_1_Conv2d_1a_3x3"
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Mixed_6a_Branch_1_Conv2d_1a_3x3_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Mixed_6a_Branch_1_Conv2d_1a_3x3_Activation")(branch_1)
- branch_pool = MaxPooling2D(
- 3, strides=2, padding="valid", name="Mixed_6a_Branch_2_MaxPool_1a_3x3"
- )(x)
- branches = [branch_0, branch_1, branch_pool]
- x = Concatenate(axis=3, name="Mixed_6a")(branches)
-
- # 10x Block17 (Inception-ResNet-B block):
- branch_0 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_1_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_1_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block17_1_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_1_Branch_1_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_1_Branch_1_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_1_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [1, 7],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_1_Branch_1_Conv2d_0b_1x7",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_1_Branch_1_Conv2d_0b_1x7_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_1_Branch_1_Conv2d_0b_1x7_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [7, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_1_Branch_1_Conv2d_0c_7x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_1_Branch_1_Conv2d_0c_7x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_1_Branch_1_Conv2d_0c_7x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block17_1_Concatenate")(branches)
- up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_1_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
- x = add([x, up])
- x = Activation("relu", name="Block17_1_Activation")(x)
-
- branch_0 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_2_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_2_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block17_2_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_2_Branch_2_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_2_Branch_2_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_2_Branch_2_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [1, 7],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_2_Branch_2_Conv2d_0b_1x7",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_2_Branch_2_Conv2d_0b_1x7_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_2_Branch_2_Conv2d_0b_1x7_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [7, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_2_Branch_2_Conv2d_0c_7x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_2_Branch_2_Conv2d_0c_7x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_2_Branch_2_Conv2d_0c_7x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block17_2_Concatenate")(branches)
- up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_2_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
- x = add([x, up])
- x = Activation("relu", name="Block17_2_Activation")(x)
-
- branch_0 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_3_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_3_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block17_3_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_3_Branch_3_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_3_Branch_3_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_3_Branch_3_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [1, 7],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_3_Branch_3_Conv2d_0b_1x7",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_3_Branch_3_Conv2d_0b_1x7_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_3_Branch_3_Conv2d_0b_1x7_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [7, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_3_Branch_3_Conv2d_0c_7x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_3_Branch_3_Conv2d_0c_7x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_3_Branch_3_Conv2d_0c_7x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block17_3_Concatenate")(branches)
- up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_3_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
- x = add([x, up])
- x = Activation("relu", name="Block17_3_Activation")(x)
-
- branch_0 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_4_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_4_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block17_4_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_4_Branch_4_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_4_Branch_4_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_4_Branch_4_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [1, 7],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_4_Branch_4_Conv2d_0b_1x7",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_4_Branch_4_Conv2d_0b_1x7_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_4_Branch_4_Conv2d_0b_1x7_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [7, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_4_Branch_4_Conv2d_0c_7x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_4_Branch_4_Conv2d_0c_7x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_4_Branch_4_Conv2d_0c_7x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block17_4_Concatenate")(branches)
- up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_4_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
- x = add([x, up])
- x = Activation("relu", name="Block17_4_Activation")(x)
-
- branch_0 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_5_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_5_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block17_5_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_5_Branch_5_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_5_Branch_5_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_5_Branch_5_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [1, 7],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_5_Branch_5_Conv2d_0b_1x7",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_5_Branch_5_Conv2d_0b_1x7_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_5_Branch_5_Conv2d_0b_1x7_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [7, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_5_Branch_5_Conv2d_0c_7x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_5_Branch_5_Conv2d_0c_7x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_5_Branch_5_Conv2d_0c_7x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block17_5_Concatenate")(branches)
- up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_5_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
- x = add([x, up])
- x = Activation("relu", name="Block17_5_Activation")(x)
-
- branch_0 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_6_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_6_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block17_6_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_6_Branch_6_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_6_Branch_6_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_6_Branch_6_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [1, 7],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_6_Branch_6_Conv2d_0b_1x7",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_6_Branch_6_Conv2d_0b_1x7_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_6_Branch_6_Conv2d_0b_1x7_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [7, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_6_Branch_6_Conv2d_0c_7x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_6_Branch_6_Conv2d_0c_7x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_6_Branch_6_Conv2d_0c_7x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block17_6_Concatenate")(branches)
- up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_6_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
- x = add([x, up])
- x = Activation("relu", name="Block17_6_Activation")(x)
-
- branch_0 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_7_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_7_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block17_7_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_7_Branch_7_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_7_Branch_7_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_7_Branch_7_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [1, 7],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_7_Branch_7_Conv2d_0b_1x7",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_7_Branch_7_Conv2d_0b_1x7_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_7_Branch_7_Conv2d_0b_1x7_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [7, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_7_Branch_7_Conv2d_0c_7x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_7_Branch_7_Conv2d_0c_7x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_7_Branch_7_Conv2d_0c_7x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block17_7_Concatenate")(branches)
- up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_7_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
- x = add([x, up])
- x = Activation("relu", name="Block17_7_Activation")(x)
-
- branch_0 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_8_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_8_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block17_8_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_8_Branch_8_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_8_Branch_8_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_8_Branch_8_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [1, 7],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_8_Branch_8_Conv2d_0b_1x7",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_8_Branch_8_Conv2d_0b_1x7_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_8_Branch_8_Conv2d_0b_1x7_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [7, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_8_Branch_8_Conv2d_0c_7x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_8_Branch_8_Conv2d_0c_7x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_8_Branch_8_Conv2d_0c_7x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block17_8_Concatenate")(branches)
- up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_8_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
- x = add([x, up])
- x = Activation("relu", name="Block17_8_Activation")(x)
-
- branch_0 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_9_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_9_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block17_9_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_9_Branch_9_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_9_Branch_9_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_9_Branch_9_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [1, 7],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_9_Branch_9_Conv2d_0b_1x7",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_9_Branch_9_Conv2d_0b_1x7_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_9_Branch_9_Conv2d_0b_1x7_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [7, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_9_Branch_9_Conv2d_0c_7x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_9_Branch_9_Conv2d_0c_7x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_9_Branch_9_Conv2d_0c_7x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block17_9_Concatenate")(branches)
- up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_9_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
- x = add([x, up])
- x = Activation("relu", name="Block17_9_Activation")(x)
-
- branch_0 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_10_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_10_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block17_10_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 128, 1, strides=1, padding="same", use_bias=False, name="Block17_10_Branch_10_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_10_Branch_10_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_10_Branch_10_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [1, 7],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_10_Branch_10_Conv2d_0b_1x7",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_10_Branch_10_Conv2d_0b_1x7_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_10_Branch_10_Conv2d_0b_1x7_Activation")(branch_1)
- branch_1 = Conv2D(
- 128,
- [7, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block17_10_Branch_10_Conv2d_0c_7x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block17_10_Branch_10_Conv2d_0c_7x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block17_10_Branch_10_Conv2d_0c_7x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block17_10_Concatenate")(branches)
- up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_10_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
- x = add([x, up])
- x = Activation("relu", name="Block17_10_Activation")(x)
-
- # Mixed 7a (Reduction-B block): 8 x 8 x 2080
- branch_0 = Conv2D(
- 256, 1, strides=1, padding="same", use_bias=False, name="Mixed_7a_Branch_0_Conv2d_0a_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Mixed_7a_Branch_0_Conv2d_0a_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Mixed_7a_Branch_0_Conv2d_0a_1x1_Activation")(branch_0)
- branch_0 = Conv2D(
- 384, 3, strides=2, padding="valid", use_bias=False, name="Mixed_7a_Branch_0_Conv2d_1a_3x3"
- )(branch_0)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Mixed_7a_Branch_0_Conv2d_1a_3x3_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Mixed_7a_Branch_0_Conv2d_1a_3x3_Activation")(branch_0)
- branch_1 = Conv2D(
- 256, 1, strides=1, padding="same", use_bias=False, name="Mixed_7a_Branch_1_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Mixed_7a_Branch_1_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Mixed_7a_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 256, 3, strides=2, padding="valid", use_bias=False, name="Mixed_7a_Branch_1_Conv2d_1a_3x3"
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Mixed_7a_Branch_1_Conv2d_1a_3x3_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Mixed_7a_Branch_1_Conv2d_1a_3x3_Activation")(branch_1)
- branch_2 = Conv2D(
- 256, 1, strides=1, padding="same", use_bias=False, name="Mixed_7a_Branch_2_Conv2d_0a_1x1"
- )(x)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Mixed_7a_Branch_2_Conv2d_0a_1x1_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Mixed_7a_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
- branch_2 = Conv2D(
- 256, 3, strides=1, padding="same", use_bias=False, name="Mixed_7a_Branch_2_Conv2d_0b_3x3"
- )(branch_2)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Mixed_7a_Branch_2_Conv2d_0b_3x3_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Mixed_7a_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
- branch_2 = Conv2D(
- 256, 3, strides=2, padding="valid", use_bias=False, name="Mixed_7a_Branch_2_Conv2d_1a_3x3"
- )(branch_2)
- branch_2 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Mixed_7a_Branch_2_Conv2d_1a_3x3_BatchNorm",
- )(branch_2)
- branch_2 = Activation("relu", name="Mixed_7a_Branch_2_Conv2d_1a_3x3_Activation")(branch_2)
- branch_pool = MaxPooling2D(
- 3, strides=2, padding="valid", name="Mixed_7a_Branch_3_MaxPool_1a_3x3"
- )(x)
- branches = [branch_0, branch_1, branch_2, branch_pool]
- x = Concatenate(axis=3, name="Mixed_7a")(branches)
-
- # 5x Block8 (Inception-ResNet-C block):
-
- branch_0 = Conv2D(
- 192, 1, strides=1, padding="same", use_bias=False, name="Block8_1_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_1_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block8_1_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 192, 1, strides=1, padding="same", use_bias=False, name="Block8_1_Branch_1_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_1_Branch_1_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_1_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 192,
- [1, 3],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block8_1_Branch_1_Conv2d_0b_1x3",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_1_Branch_1_Conv2d_0b_1x3_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_1_Branch_1_Conv2d_0b_1x3_Activation")(branch_1)
- branch_1 = Conv2D(
- 192,
- [3, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block8_1_Branch_1_Conv2d_0c_3x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_1_Branch_1_Conv2d_0c_3x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_1_Branch_1_Conv2d_0c_3x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block8_1_Concatenate")(branches)
- up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_1_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
- x = add([x, up])
- x = Activation("relu", name="Block8_1_Activation")(x)
-
- branch_0 = Conv2D(
- 192, 1, strides=1, padding="same", use_bias=False, name="Block8_2_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_2_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block8_2_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 192, 1, strides=1, padding="same", use_bias=False, name="Block8_2_Branch_2_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_2_Branch_2_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_2_Branch_2_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 192,
- [1, 3],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block8_2_Branch_2_Conv2d_0b_1x3",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_2_Branch_2_Conv2d_0b_1x3_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_2_Branch_2_Conv2d_0b_1x3_Activation")(branch_1)
- branch_1 = Conv2D(
- 192,
- [3, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block8_2_Branch_2_Conv2d_0c_3x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_2_Branch_2_Conv2d_0c_3x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_2_Branch_2_Conv2d_0c_3x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block8_2_Concatenate")(branches)
- up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_2_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
- x = add([x, up])
- x = Activation("relu", name="Block8_2_Activation")(x)
-
- branch_0 = Conv2D(
- 192, 1, strides=1, padding="same", use_bias=False, name="Block8_3_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_3_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block8_3_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 192, 1, strides=1, padding="same", use_bias=False, name="Block8_3_Branch_3_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_3_Branch_3_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_3_Branch_3_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 192,
- [1, 3],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block8_3_Branch_3_Conv2d_0b_1x3",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_3_Branch_3_Conv2d_0b_1x3_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_3_Branch_3_Conv2d_0b_1x3_Activation")(branch_1)
- branch_1 = Conv2D(
- 192,
- [3, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block8_3_Branch_3_Conv2d_0c_3x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_3_Branch_3_Conv2d_0c_3x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_3_Branch_3_Conv2d_0c_3x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block8_3_Concatenate")(branches)
- up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_3_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
- x = add([x, up])
- x = Activation("relu", name="Block8_3_Activation")(x)
-
- branch_0 = Conv2D(
- 192, 1, strides=1, padding="same", use_bias=False, name="Block8_4_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_4_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block8_4_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 192, 1, strides=1, padding="same", use_bias=False, name="Block8_4_Branch_4_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_4_Branch_4_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_4_Branch_4_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 192,
- [1, 3],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block8_4_Branch_4_Conv2d_0b_1x3",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_4_Branch_4_Conv2d_0b_1x3_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_4_Branch_4_Conv2d_0b_1x3_Activation")(branch_1)
- branch_1 = Conv2D(
- 192,
- [3, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block8_4_Branch_4_Conv2d_0c_3x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_4_Branch_4_Conv2d_0c_3x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_4_Branch_4_Conv2d_0c_3x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block8_4_Concatenate")(branches)
- up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_4_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
- x = add([x, up])
- x = Activation("relu", name="Block8_4_Activation")(x)
-
- branch_0 = Conv2D(
- 192, 1, strides=1, padding="same", use_bias=False, name="Block8_5_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_5_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block8_5_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 192, 1, strides=1, padding="same", use_bias=False, name="Block8_5_Branch_5_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_5_Branch_5_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_5_Branch_5_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 192,
- [1, 3],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block8_5_Branch_5_Conv2d_0b_1x3",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_5_Branch_5_Conv2d_0b_1x3_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_5_Branch_5_Conv2d_0b_1x3_Activation")(branch_1)
- branch_1 = Conv2D(
- 192,
- [3, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block8_5_Branch_5_Conv2d_0c_3x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_5_Branch_5_Conv2d_0c_3x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_5_Branch_5_Conv2d_0c_3x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block8_5_Concatenate")(branches)
- up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_5_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
- x = add([x, up])
- x = Activation("relu", name="Block8_5_Activation")(x)
-
- branch_0 = Conv2D(
- 192, 1, strides=1, padding="same", use_bias=False, name="Block8_6_Branch_0_Conv2d_1x1"
- )(x)
- branch_0 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_6_Branch_0_Conv2d_1x1_BatchNorm",
- )(branch_0)
- branch_0 = Activation("relu", name="Block8_6_Branch_0_Conv2d_1x1_Activation")(branch_0)
- branch_1 = Conv2D(
- 192, 1, strides=1, padding="same", use_bias=False, name="Block8_6_Branch_1_Conv2d_0a_1x1"
- )(x)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_6_Branch_1_Conv2d_0a_1x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_6_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
- branch_1 = Conv2D(
- 192,
- [1, 3],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block8_6_Branch_1_Conv2d_0b_1x3",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_6_Branch_1_Conv2d_0b_1x3_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_6_Branch_1_Conv2d_0b_1x3_Activation")(branch_1)
- branch_1 = Conv2D(
- 192,
- [3, 1],
- strides=1,
- padding="same",
- use_bias=False,
- name="Block8_6_Branch_1_Conv2d_0c_3x1",
- )(branch_1)
- branch_1 = BatchNormalization(
- axis=3,
- momentum=0.995,
- epsilon=0.001,
- scale=False,
- name="Block8_6_Branch_1_Conv2d_0c_3x1_BatchNorm",
- )(branch_1)
- branch_1 = Activation("relu", name="Block8_6_Branch_1_Conv2d_0c_3x1_Activation")(branch_1)
- branches = [branch_0, branch_1]
- mixed = Concatenate(axis=3, name="Block8_6_Concatenate")(branches)
- up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_6_Conv2d_1x1")(
- mixed
- )
- up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 1})(up)
- x = add([x, up])
-
- # Classification block
- x = GlobalAveragePooling2D(name="AvgPool")(x)
- x = Dropout(1.0 - 0.8, name="Dropout")(x)
- # Bottleneck
- x = Dense(dimension, use_bias=False, name="Bottleneck")(x)
- x = BatchNormalization(momentum=0.995, epsilon=0.001, scale=False, name="Bottleneck_BatchNorm")(
- x
- )
-
- # Create model
- model = Model(inputs, x, name="inception_resnet_v1")
-
- return model
-
-
-def load_facenet128d_model(
- url="https://github.com/serengil/deepface_models/releases/download/v1.0/facenet_weights.h5",
-) -> Model:
- """
- Construct FaceNet-128d model, download weights and then load weights
- Args:
- dimension (int): construct FaceNet-128d or FaceNet-512d models
- Returns:
- model (Model)
- """
- model = InceptionResNetV1()
-
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="facenet_weights.h5", source_url=url
- )
- model = weight_utils.load_model_weights(
- model=model, weight_file=weight_file
- )
-
- return model
-
-
-def load_facenet512d_model(
- url="https://github.com/serengil/deepface_models/releases/download/v1.0/facenet512_weights.h5",
-) -> Model:
- """
- Construct FaceNet-512d model, download its weights and load
- Returns:
- model (Model)
- """
-
- model = InceptionResNetV1(dimension=512)
-
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="facenet512_weights.h5", source_url=url
- )
- model = weight_utils.load_model_weights(
- model=model, weight_file=weight_file
- )
-
- return model
diff --git a/deepface/models/facial_recognition/FbDeepFace.py b/deepface/models/facial_recognition/FbDeepFace.py
deleted file mode 100644
index fb41d62..0000000
--- a/deepface/models/facial_recognition/FbDeepFace.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# project dependencies
-from deepface.commons import package_utils, weight_utils
-from deepface.models.FacialRecognition import FacialRecognition
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# --------------------------------
-# dependency configuration
-
-tf_major = package_utils.get_tf_major_version()
-tf_minor = package_utils.get_tf_minor_version()
-
-if tf_major == 1:
- from keras.models import Model, Sequential
- from keras.layers import (
- Convolution2D,
- MaxPooling2D,
- Flatten,
- Dense,
- Dropout,
- )
-else:
- from tensorflow.keras.models import Model, Sequential
- from tensorflow.keras.layers import (
- Convolution2D,
- MaxPooling2D,
- Flatten,
- Dense,
- Dropout,
- )
-
-
-# -------------------------------------
-# pylint: disable=line-too-long, too-few-public-methods
-class DeepFaceClient(FacialRecognition):
- """
- Fb's DeepFace model class
- """
-
- def __init__(self):
- # DeepFace requires tf 2.12 or less
- if tf_major == 2 and tf_minor > 12:
- # Ref: https://github.com/serengil/deepface/pull/1079
- raise ValueError(
- "DeepFace model requires LocallyConnected2D but it is no longer supported"
- f" after tf 2.12 but you have {tf_major}.{tf_minor}. You need to downgrade your tf."
- )
-
- self.model = load_model()
- self.model_name = "DeepFace"
- self.input_shape = (152, 152)
- self.output_shape = 4096
-
-
-def load_model(
- url="https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip",
-) -> Model:
- """
- Construct DeepFace model, download its weights and load
- """
- # we have some checks for this dependency in the init of client
- # putting this in global causes library initialization
- if tf_major == 1:
- from keras.layers import LocallyConnected2D
- else:
- from tensorflow.keras.layers import LocallyConnected2D
-
- base_model = Sequential()
- base_model.add(
- Convolution2D(32, (11, 11), activation="relu", name="C1", input_shape=(152, 152, 3))
- )
- base_model.add(MaxPooling2D(pool_size=3, strides=2, padding="same", name="M2"))
- base_model.add(Convolution2D(16, (9, 9), activation="relu", name="C3"))
- base_model.add(LocallyConnected2D(16, (9, 9), activation="relu", name="L4"))
- base_model.add(LocallyConnected2D(16, (7, 7), strides=2, activation="relu", name="L5"))
- base_model.add(LocallyConnected2D(16, (5, 5), activation="relu", name="L6"))
- base_model.add(Flatten(name="F0"))
- base_model.add(Dense(4096, activation="relu", name="F7"))
- base_model.add(Dropout(rate=0.5, name="D0"))
- base_model.add(Dense(8631, activation="softmax", name="F8"))
-
- # ---------------------------------
-
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="VGGFace2_DeepFace_weights_val-0.9034.h5", source_url=url, compress_type="zip"
- )
-
- base_model = weight_utils.load_model_weights(model=base_model, weight_file=weight_file)
-
- # drop F8 and D0. F7 is the representation layer.
- deepface_model = Model(inputs=base_model.layers[0].input, outputs=base_model.layers[-3].output)
-
- return deepface_model
diff --git a/deepface/models/facial_recognition/GhostFaceNet.py b/deepface/models/facial_recognition/GhostFaceNet.py
deleted file mode 100644
index 37bd728..0000000
--- a/deepface/models/facial_recognition/GhostFaceNet.py
+++ /dev/null
@@ -1,306 +0,0 @@
-# 3rd party dependencies
-import tensorflow as tf
-
-# project dependencies
-from deepface.commons import package_utils, weight_utils
-from deepface.models.FacialRecognition import FacialRecognition
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-tf_major = package_utils.get_tf_major_version()
-if tf_major == 1:
- import keras
- from keras import backend as K
- from keras.models import Model
- from keras.layers import (
- Activation,
- Add,
- BatchNormalization,
- Concatenate,
- Conv2D,
- DepthwiseConv2D,
- GlobalAveragePooling2D,
- Input,
- Reshape,
- Multiply,
- ReLU,
- PReLU,
- )
-else:
- from tensorflow import keras
- from tensorflow.keras import backend as K
- from tensorflow.keras.models import Model
- from tensorflow.keras.layers import (
- Activation,
- Add,
- BatchNormalization,
- Concatenate,
- Conv2D,
- DepthwiseConv2D,
- GlobalAveragePooling2D,
- Input,
- Reshape,
- Multiply,
- ReLU,
- PReLU,
- )
-
-
-# pylint: disable=line-too-long, too-few-public-methods, no-else-return, unsubscriptable-object, comparison-with-callable
-PRETRAINED_WEIGHTS = "https://github.com/HamadYA/GhostFaceNets/releases/download/v1.2/GhostFaceNet_W1.3_S1_ArcFace.h5"
-
-
-class GhostFaceNetClient(FacialRecognition):
- """
- GhostFaceNet model (GhostFaceNetV1 backbone)
- Repo: https://github.com/HamadYA/GhostFaceNets
- Pre-trained weights: https://github.com/HamadYA/GhostFaceNets/releases/tag/v1.2
- GhostFaceNet_W1.3_S1_ArcFace.h5 ~ 16.5MB
- Author declared that this backbone and pre-trained weights got 99.7667% accuracy on LFW
- """
-
- def __init__(self):
- self.model_name = "GhostFaceNet"
- self.input_shape = (112, 112)
- self.output_shape = 512
- self.model = load_model()
-
-
-def load_model():
- model = GhostFaceNetV1()
-
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="ghostfacenet_v1.h5", source_url=PRETRAINED_WEIGHTS
- )
-
- model = weight_utils.load_model_weights(
- model=model, weight_file=weight_file
- )
-
- return model
-
-
-def GhostFaceNetV1() -> Model:
- """
- Build GhostFaceNetV1 model. Refactored from
- github.com/HamadYA/GhostFaceNets/blob/main/backbones/ghost_model.py
- Returns:
- model (Model)
- """
- inputs = Input(shape=(112, 112, 3))
-
- out_channel = 20
-
- nn = Conv2D(
- out_channel,
- (3, 3),
- strides=1,
- padding="same",
- use_bias=False,
- kernel_initializer=keras.initializers.VarianceScaling(
- scale=2.0, mode="fan_out", distribution="truncated_normal"
- ),
- )(inputs)
-
- nn = BatchNormalization(axis=-1)(nn)
- nn = Activation("relu")(nn)
-
- dwkernels = [3, 3, 3, 5, 5, 3, 3, 3, 3, 3, 3, 5, 5, 5, 5, 5]
- exps = [20, 64, 92, 92, 156, 312, 260, 240, 240, 624, 872, 872, 1248, 1248, 1248, 664]
- outs = [20, 32, 32, 52, 52, 104, 104, 104, 104, 144, 144, 208, 208, 208, 208, 208]
- strides_set = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1]
- reductions = [0, 0, 0, 24, 40, 0, 0, 0, 0, 156, 220, 220, 0, 312, 0, 168]
-
- pre_out = out_channel
- for dwk, stride, exp, out, reduction in zip(dwkernels, strides_set, exps, outs, reductions):
- shortcut = not (out == pre_out and stride == 1)
- nn = ghost_bottleneck(nn, dwk, stride, exp, out, reduction, shortcut)
- pre_out = out
-
- nn = Conv2D(
- 664,
- (1, 1),
- strides=(1, 1),
- padding="valid",
- use_bias=False,
- kernel_initializer=keras.initializers.VarianceScaling(
- scale=2.0, mode="fan_out", distribution="truncated_normal"
- ),
- )(nn)
- nn = BatchNormalization(axis=-1)(nn)
- nn = Activation("relu")(nn)
-
- xx = Model(inputs=inputs, outputs=nn, name="GhostFaceNetV1")
-
- # post modelling
- inputs = xx.inputs[0]
- nn = xx.outputs[0]
-
- nn = keras.layers.DepthwiseConv2D(nn.shape[1], use_bias=False, name="GDC_dw")(nn)
- nn = keras.layers.BatchNormalization(momentum=0.99, epsilon=0.001, name="GDC_batchnorm")(nn)
- nn = keras.layers.Conv2D(
- 512, 1, use_bias=True, kernel_initializer="glorot_normal", name="GDC_conv"
- )(nn)
- nn = keras.layers.Flatten(name="GDC_flatten")(nn)
-
- embedding = keras.layers.BatchNormalization(
- momentum=0.99, epsilon=0.001, scale=True, name="pre_embedding"
- )(nn)
- embedding_fp32 = keras.layers.Activation("linear", dtype="float32", name="embedding")(embedding)
-
- model = keras.models.Model(inputs, embedding_fp32, name=xx.name)
- model = replace_relu_with_prelu(model=model)
- return model
-
-
-def se_module(inputs, reduction):
- """
- Refactored from github.com/HamadYA/GhostFaceNets/blob/main/backbones/ghost_model.py
- """
- # get the channel axis
- channel_axis = 1 if K.image_data_format() == "channels_first" else -1
- # filters = channel axis shape
- filters = inputs.shape[channel_axis]
-
- # from None x H x W x C to None x C
- se = GlobalAveragePooling2D()(inputs)
-
- # Reshape None x C to None 1 x 1 x C
- se = Reshape((1, 1, filters))(se)
-
- # Squeeze by using C*se_ratio. The size will be 1 x 1 x C*se_ratio
- se = Conv2D(
- reduction,
- kernel_size=1,
- use_bias=True,
- kernel_initializer=keras.initializers.VarianceScaling(
- scale=2.0, mode="fan_out", distribution="truncated_normal"
- ),
- )(se)
- se = Activation("relu")(se)
-
- # Excitation using C filters. The size will be 1 x 1 x C
- se = Conv2D(
- filters,
- kernel_size=1,
- use_bias=True,
- kernel_initializer=keras.initializers.VarianceScaling(
- scale=2.0, mode="fan_out", distribution="truncated_normal"
- ),
- )(se)
- se = Activation("hard_sigmoid")(se)
-
- return Multiply()([inputs, se])
-
-
-def ghost_module(inputs, out, convkernel=1, dwkernel=3, add_activation=True):
- """
- Refactored from github.com/HamadYA/GhostFaceNets/blob/main/backbones/ghost_model.py
- """
- conv_out_channel = out // 2
- cc = Conv2D(
- conv_out_channel,
- convkernel,
- use_bias=False,
- strides=(1, 1),
- padding="same",
- kernel_initializer=keras.initializers.VarianceScaling(
- scale=2.0, mode="fan_out", distribution="truncated_normal"
- ),
- )(inputs)
- cc = BatchNormalization(axis=-1)(cc)
- if add_activation:
- cc = Activation("relu")(cc)
-
- nn = DepthwiseConv2D(
- dwkernel,
- 1,
- padding="same",
- use_bias=False,
- depthwise_initializer=keras.initializers.VarianceScaling(
- scale=2.0, mode="fan_out", distribution="truncated_normal"
- ),
- )(cc)
- nn = BatchNormalization(axis=-1)(nn)
- if add_activation:
- nn = Activation("relu")(nn)
- return Concatenate()([cc, nn])
-
-
-def ghost_bottleneck(inputs, dwkernel, strides, exp, out, reduction, shortcut=True):
- """
- Refactored from github.com/HamadYA/GhostFaceNets/blob/main/backbones/ghost_model.py
- """
- nn = ghost_module(inputs, exp, add_activation=True)
- if strides > 1:
- # Extra depth conv if strides higher than 1
- nn = DepthwiseConv2D(
- dwkernel,
- strides,
- padding="same",
- use_bias=False,
- depthwise_initializer=keras.initializers.VarianceScaling(
- scale=2.0, mode="fan_out", distribution="truncated_normal"
- ),
- )(nn)
- nn = BatchNormalization(axis=-1)(nn)
-
- if reduction > 0:
- # Squeeze and excite
- nn = se_module(nn, reduction)
-
- # Point-wise linear projection
- nn = ghost_module(nn, out, add_activation=False) # ghost2 = GhostModule(exp, out, relu=False)
-
- if shortcut:
- xx = DepthwiseConv2D(
- dwkernel,
- strides,
- padding="same",
- use_bias=False,
- depthwise_initializer=keras.initializers.VarianceScaling(
- scale=2.0, mode="fan_out", distribution="truncated_normal"
- ),
- )(inputs)
- xx = BatchNormalization(axis=-1)(xx)
- xx = Conv2D(
- out,
- (1, 1),
- strides=(1, 1),
- padding="valid",
- use_bias=False,
- kernel_initializer=keras.initializers.VarianceScaling(
- scale=2.0, mode="fan_out", distribution="truncated_normal"
- ),
- )(xx)
- xx = BatchNormalization(axis=-1)(xx)
- else:
- xx = inputs
- return Add()([xx, nn])
-
-
-def replace_relu_with_prelu(model) -> Model:
- """
- Replaces relu activation function in the built model with prelu.
- Refactored from github.com/HamadYA/GhostFaceNets/blob/main/backbones/ghost_model.py
- Args:
- model (Model): built model with relu activation functions
- Returns
- model (Model): built model with prelu activation functions
- """
-
- def convert_relu(layer):
- if isinstance(layer, ReLU) or (
- isinstance(layer, Activation) and layer.activation == keras.activations.relu
- ):
- layer_name = layer.name.replace("_relu", "_prelu")
- return PReLU(
- shared_axes=[1, 2],
- alpha_initializer=tf.initializers.Constant(0.25),
- name=layer_name,
- )
- return layer
-
- input_tensors = keras.layers.Input(model.input_shape[1:])
- return keras.models.clone_model(model, input_tensors=input_tensors, clone_function=convert_relu)
diff --git a/deepface/models/facial_recognition/OpenFace.py b/deepface/models/facial_recognition/OpenFace.py
deleted file mode 100644
index c9c1b7a..0000000
--- a/deepface/models/facial_recognition/OpenFace.py
+++ /dev/null
@@ -1,394 +0,0 @@
-# 3rd party dependencies
-import tensorflow as tf
-
-# project dependencies
-from deepface.commons import package_utils, weight_utils
-from deepface.models.FacialRecognition import FacialRecognition
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-tf_version = package_utils.get_tf_major_version()
-if tf_version == 1:
- from keras.models import Model
- from keras.layers import Conv2D, ZeroPadding2D, Input, concatenate
- from keras.layers import Dense, Activation, Lambda, Flatten, BatchNormalization
- from keras.layers import MaxPooling2D, AveragePooling2D
- from keras import backend as K
-else:
- from tensorflow.keras.models import Model
- from tensorflow.keras.layers import Conv2D, ZeroPadding2D, Input, concatenate
- from tensorflow.keras.layers import Dense, Activation, Lambda, Flatten, BatchNormalization
- from tensorflow.keras.layers import MaxPooling2D, AveragePooling2D
- from tensorflow.keras import backend as K
-
-# pylint: disable=unnecessary-lambda
-
-# ---------------------------------------
-
-# pylint: disable=too-few-public-methods
-class OpenFaceClient(FacialRecognition):
- """
- OpenFace model class
- """
-
- def __init__(self):
- self.model = load_model()
- self.model_name = "OpenFace"
- self.input_shape = (96, 96)
- self.output_shape = 128
-
-
-def load_model(
- url="https://github.com/serengil/deepface_models/releases/download/v1.0/openface_weights.h5",
-) -> Model:
- """
- Consturct OpenFace model, download its weights and load
- Returns:
- model (Model)
- """
- myInput = Input(shape=(96, 96, 3))
-
- x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)
- x = Conv2D(64, (7, 7), strides=(2, 2), name="conv1")(x)
- x = BatchNormalization(axis=3, epsilon=0.00001, name="bn1")(x)
- x = Activation("relu")(x)
- x = ZeroPadding2D(padding=(1, 1))(x)
- x = MaxPooling2D(pool_size=3, strides=2)(x)
- x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name="lrn_1")(x)
- x = Conv2D(64, (1, 1), name="conv2")(x)
- x = BatchNormalization(axis=3, epsilon=0.00001, name="bn2")(x)
- x = Activation("relu")(x)
- x = ZeroPadding2D(padding=(1, 1))(x)
- x = Conv2D(192, (3, 3), name="conv3")(x)
- x = BatchNormalization(axis=3, epsilon=0.00001, name="bn3")(x)
- x = Activation("relu")(x)
- x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name="lrn_2")(x) # x is equal added
- x = ZeroPadding2D(padding=(1, 1))(x)
- x = MaxPooling2D(pool_size=3, strides=2)(x)
-
- # Inception3a
- inception_3a_3x3 = Conv2D(96, (1, 1), name="inception_3a_3x3_conv1")(x)
- inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_3x3_bn1")(
- inception_3a_3x3
- )
- inception_3a_3x3 = Activation("relu")(inception_3a_3x3)
- inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3)
- inception_3a_3x3 = Conv2D(128, (3, 3), name="inception_3a_3x3_conv2")(inception_3a_3x3)
- inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_3x3_bn2")(
- inception_3a_3x3
- )
- inception_3a_3x3 = Activation("relu")(inception_3a_3x3)
-
- inception_3a_5x5 = Conv2D(16, (1, 1), name="inception_3a_5x5_conv1")(x)
- inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_5x5_bn1")(
- inception_3a_5x5
- )
- inception_3a_5x5 = Activation("relu")(inception_3a_5x5)
- inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5)
- inception_3a_5x5 = Conv2D(32, (5, 5), name="inception_3a_5x5_conv2")(inception_3a_5x5)
- inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_5x5_bn2")(
- inception_3a_5x5
- )
- inception_3a_5x5 = Activation("relu")(inception_3a_5x5)
-
- inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x)
- inception_3a_pool = Conv2D(32, (1, 1), name="inception_3a_pool_conv")(inception_3a_pool)
- inception_3a_pool = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_pool_bn")(
- inception_3a_pool
- )
- inception_3a_pool = Activation("relu")(inception_3a_pool)
- inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3, 4)))(inception_3a_pool)
-
- inception_3a_1x1 = Conv2D(64, (1, 1), name="inception_3a_1x1_conv")(x)
- inception_3a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_1x1_bn")(
- inception_3a_1x1
- )
- inception_3a_1x1 = Activation("relu")(inception_3a_1x1)
-
- inception_3a = concatenate(
- [inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1], axis=3
- )
-
- # Inception3b
- inception_3b_3x3 = Conv2D(96, (1, 1), name="inception_3b_3x3_conv1")(inception_3a)
- inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_3x3_bn1")(
- inception_3b_3x3
- )
- inception_3b_3x3 = Activation("relu")(inception_3b_3x3)
- inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
- inception_3b_3x3 = Conv2D(128, (3, 3), name="inception_3b_3x3_conv2")(inception_3b_3x3)
- inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_3x3_bn2")(
- inception_3b_3x3
- )
- inception_3b_3x3 = Activation("relu")(inception_3b_3x3)
-
- inception_3b_5x5 = Conv2D(32, (1, 1), name="inception_3b_5x5_conv1")(inception_3a)
- inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_5x5_bn1")(
- inception_3b_5x5
- )
- inception_3b_5x5 = Activation("relu")(inception_3b_5x5)
- inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
- inception_3b_5x5 = Conv2D(64, (5, 5), name="inception_3b_5x5_conv2")(inception_3b_5x5)
- inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_5x5_bn2")(
- inception_3b_5x5
- )
- inception_3b_5x5 = Activation("relu")(inception_3b_5x5)
-
- inception_3b_pool = Lambda(lambda x: x**2, name="power2_3b")(inception_3a)
- inception_3b_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_3b_pool)
- inception_3b_pool = Lambda(lambda x: x * 9, name="mult9_3b")(inception_3b_pool)
- inception_3b_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_3b")(inception_3b_pool)
- inception_3b_pool = Conv2D(64, (1, 1), name="inception_3b_pool_conv")(inception_3b_pool)
- inception_3b_pool = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_pool_bn")(
- inception_3b_pool
- )
- inception_3b_pool = Activation("relu")(inception_3b_pool)
- inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)
-
- inception_3b_1x1 = Conv2D(64, (1, 1), name="inception_3b_1x1_conv")(inception_3a)
- inception_3b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_1x1_bn")(
- inception_3b_1x1
- )
- inception_3b_1x1 = Activation("relu")(inception_3b_1x1)
-
- inception_3b = concatenate(
- [inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1], axis=3
- )
-
- # Inception3c
- inception_3c_3x3 = Conv2D(128, (1, 1), strides=(1, 1), name="inception_3c_3x3_conv1")(
- inception_3b
- )
- inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3c_3x3_bn1")(
- inception_3c_3x3
- )
- inception_3c_3x3 = Activation("relu")(inception_3c_3x3)
- inception_3c_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3c_3x3)
- inception_3c_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name="inception_3c_3x3_conv" + "2")(
- inception_3c_3x3
- )
- inception_3c_3x3 = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_3c_3x3_bn" + "2"
- )(inception_3c_3x3)
- inception_3c_3x3 = Activation("relu")(inception_3c_3x3)
-
- inception_3c_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name="inception_3c_5x5_conv1")(
- inception_3b
- )
- inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3c_5x5_bn1")(
- inception_3c_5x5
- )
- inception_3c_5x5 = Activation("relu")(inception_3c_5x5)
- inception_3c_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3c_5x5)
- inception_3c_5x5 = Conv2D(64, (5, 5), strides=(2, 2), name="inception_3c_5x5_conv" + "2")(
- inception_3c_5x5
- )
- inception_3c_5x5 = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_3c_5x5_bn" + "2"
- )(inception_3c_5x5)
- inception_3c_5x5 = Activation("relu")(inception_3c_5x5)
-
- inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b)
- inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_3c_pool)
-
- inception_3c = concatenate([inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3)
-
- # inception 4a
- inception_4a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name="inception_4a_3x3_conv" + "1")(
- inception_3c
- )
- inception_4a_3x3 = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_4a_3x3_bn" + "1"
- )(inception_4a_3x3)
- inception_4a_3x3 = Activation("relu")(inception_4a_3x3)
- inception_4a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4a_3x3)
- inception_4a_3x3 = Conv2D(192, (3, 3), strides=(1, 1), name="inception_4a_3x3_conv" + "2")(
- inception_4a_3x3
- )
- inception_4a_3x3 = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_4a_3x3_bn" + "2"
- )(inception_4a_3x3)
- inception_4a_3x3 = Activation("relu")(inception_4a_3x3)
-
- inception_4a_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name="inception_4a_5x5_conv1")(
- inception_3c
- )
- inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_4a_5x5_bn1")(
- inception_4a_5x5
- )
- inception_4a_5x5 = Activation("relu")(inception_4a_5x5)
- inception_4a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4a_5x5)
- inception_4a_5x5 = Conv2D(64, (5, 5), strides=(1, 1), name="inception_4a_5x5_conv" + "2")(
- inception_4a_5x5
- )
- inception_4a_5x5 = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_4a_5x5_bn" + "2"
- )(inception_4a_5x5)
- inception_4a_5x5 = Activation("relu")(inception_4a_5x5)
-
- inception_4a_pool = Lambda(lambda x: x**2, name="power2_4a")(inception_3c)
- inception_4a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_4a_pool)
- inception_4a_pool = Lambda(lambda x: x * 9, name="mult9_4a")(inception_4a_pool)
- inception_4a_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_4a")(inception_4a_pool)
-
- inception_4a_pool = Conv2D(128, (1, 1), strides=(1, 1), name="inception_4a_pool_conv" + "")(
- inception_4a_pool
- )
- inception_4a_pool = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_4a_pool_bn" + ""
- )(inception_4a_pool)
- inception_4a_pool = Activation("relu")(inception_4a_pool)
- inception_4a_pool = ZeroPadding2D(padding=(2, 2))(inception_4a_pool)
-
- inception_4a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name="inception_4a_1x1_conv" + "")(
- inception_3c
- )
- inception_4a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_4a_1x1_bn" + "")(
- inception_4a_1x1
- )
- inception_4a_1x1 = Activation("relu")(inception_4a_1x1)
-
- inception_4a = concatenate(
- [inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1], axis=3
- )
-
- # inception4e
- inception_4e_3x3 = Conv2D(160, (1, 1), strides=(1, 1), name="inception_4e_3x3_conv" + "1")(
- inception_4a
- )
- inception_4e_3x3 = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_4e_3x3_bn" + "1"
- )(inception_4e_3x3)
- inception_4e_3x3 = Activation("relu")(inception_4e_3x3)
- inception_4e_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4e_3x3)
- inception_4e_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name="inception_4e_3x3_conv" + "2")(
- inception_4e_3x3
- )
- inception_4e_3x3 = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_4e_3x3_bn" + "2"
- )(inception_4e_3x3)
- inception_4e_3x3 = Activation("relu")(inception_4e_3x3)
-
- inception_4e_5x5 = Conv2D(64, (1, 1), strides=(1, 1), name="inception_4e_5x5_conv" + "1")(
- inception_4a
- )
- inception_4e_5x5 = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_4e_5x5_bn" + "1"
- )(inception_4e_5x5)
- inception_4e_5x5 = Activation("relu")(inception_4e_5x5)
- inception_4e_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4e_5x5)
- inception_4e_5x5 = Conv2D(128, (5, 5), strides=(2, 2), name="inception_4e_5x5_conv" + "2")(
- inception_4e_5x5
- )
- inception_4e_5x5 = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_4e_5x5_bn" + "2"
- )(inception_4e_5x5)
- inception_4e_5x5 = Activation("relu")(inception_4e_5x5)
-
- inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a)
- inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_4e_pool)
-
- inception_4e = concatenate([inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3)
-
- # inception5a
- inception_5a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5a_3x3_conv" + "1")(
- inception_4e
- )
- inception_5a_3x3 = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_5a_3x3_bn" + "1"
- )(inception_5a_3x3)
- inception_5a_3x3 = Activation("relu")(inception_5a_3x3)
- inception_5a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5a_3x3)
- inception_5a_3x3 = Conv2D(384, (3, 3), strides=(1, 1), name="inception_5a_3x3_conv" + "2")(
- inception_5a_3x3
- )
- inception_5a_3x3 = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_5a_3x3_bn" + "2"
- )(inception_5a_3x3)
- inception_5a_3x3 = Activation("relu")(inception_5a_3x3)
-
- inception_5a_pool = Lambda(lambda x: x**2, name="power2_5a")(inception_4e)
- inception_5a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_5a_pool)
- inception_5a_pool = Lambda(lambda x: x * 9, name="mult9_5a")(inception_5a_pool)
- inception_5a_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_5a")(inception_5a_pool)
-
- inception_5a_pool = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5a_pool_conv" + "")(
- inception_5a_pool
- )
- inception_5a_pool = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_5a_pool_bn" + ""
- )(inception_5a_pool)
- inception_5a_pool = Activation("relu")(inception_5a_pool)
- inception_5a_pool = ZeroPadding2D(padding=(1, 1))(inception_5a_pool)
-
- inception_5a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name="inception_5a_1x1_conv" + "")(
- inception_4e
- )
- inception_5a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_5a_1x1_bn" + "")(
- inception_5a_1x1
- )
- inception_5a_1x1 = Activation("relu")(inception_5a_1x1)
-
- inception_5a = concatenate([inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3)
-
- # inception_5b
- inception_5b_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5b_3x3_conv" + "1")(
- inception_5a
- )
- inception_5b_3x3 = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_5b_3x3_bn" + "1"
- )(inception_5b_3x3)
- inception_5b_3x3 = Activation("relu")(inception_5b_3x3)
- inception_5b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5b_3x3)
- inception_5b_3x3 = Conv2D(384, (3, 3), strides=(1, 1), name="inception_5b_3x3_conv" + "2")(
- inception_5b_3x3
- )
- inception_5b_3x3 = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_5b_3x3_bn" + "2"
- )(inception_5b_3x3)
- inception_5b_3x3 = Activation("relu")(inception_5b_3x3)
-
- inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a)
-
- inception_5b_pool = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5b_pool_conv" + "")(
- inception_5b_pool
- )
- inception_5b_pool = BatchNormalization(
- axis=3, epsilon=0.00001, name="inception_5b_pool_bn" + ""
- )(inception_5b_pool)
- inception_5b_pool = Activation("relu")(inception_5b_pool)
-
- inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool)
-
- inception_5b_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name="inception_5b_1x1_conv" + "")(
- inception_5a
- )
- inception_5b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_5b_1x1_bn" + "")(
- inception_5b_1x1
- )
- inception_5b_1x1 = Activation("relu")(inception_5b_1x1)
-
- inception_5b = concatenate([inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3)
-
- av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b)
- reshape_layer = Flatten()(av_pool)
- dense_layer = Dense(128, name="dense_layer")(reshape_layer)
- norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1), name="norm_layer")(dense_layer)
-
- # Final Model
- model = Model(inputs=[myInput], outputs=norm_layer)
-
- # -----------------------------------
-
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="openface_weights.h5", source_url=url
- )
-
- model = weight_utils.load_model_weights(
- model=model, weight_file=weight_file
- )
-
- # -----------------------------------
-
- return model
diff --git a/deepface/models/facial_recognition/SFace.py b/deepface/models/facial_recognition/SFace.py
deleted file mode 100644
index 0f1d421..0000000
--- a/deepface/models/facial_recognition/SFace.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# built-in dependencies
-from typing import Any, List
-
-# 3rd party dependencies
-import numpy as np
-import cv2 as cv
-
-# project dependencies
-from deepface.commons import weight_utils
-from deepface.models.FacialRecognition import FacialRecognition
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# pylint: disable=line-too-long, too-few-public-methods
-
-
-class SFaceClient(FacialRecognition):
- """
- SFace model class
- """
-
- def __init__(self):
- self.model = load_model()
- self.model_name = "SFace"
- self.input_shape = (112, 112)
- self.output_shape = 128
-
- def forward(self, img: np.ndarray) -> List[float]:
- """
- Find embeddings with SFace model
- This model necessitates the override of the forward method
- because it is not a keras model.
- Args:
- img (np.ndarray): pre-loaded image in BGR
- Returns
- embeddings (list): multi-dimensional vector
- """
- # return self.model.predict(img)[0].tolist()
-
- # revert the image to original format and preprocess using the model
- input_blob = (img[0] * 255).astype(np.uint8)
-
- embeddings = self.model.model.feature(input_blob)
-
- return embeddings[0].tolist()
-
-
-def load_model(
- url="https://github.com/opencv/opencv_zoo/raw/main/models/face_recognition_sface/face_recognition_sface_2021dec.onnx",
-) -> Any:
- """
- Construct SFace model, download its weights and load
- """
-
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="face_recognition_sface_2021dec.onnx", source_url=url
- )
-
- model = SFaceWrapper(model_path=weight_file)
-
- return model
-
-
-class SFaceWrapper:
- def __init__(self, model_path):
- """
- SFace wrapper covering model construction, layer infos and predict
- """
- try:
- self.model = cv.FaceRecognizerSF.create(
- model=model_path, config="", backend_id=0, target_id=0
- )
- except Exception as err:
- raise ValueError(
- "Exception while calling opencv.FaceRecognizerSF module."
- + "This is an optional dependency."
- + "You can install it as pip install opencv-contrib-python."
- ) from err
diff --git a/deepface/models/facial_recognition/VGGFace.py b/deepface/models/facial_recognition/VGGFace.py
deleted file mode 100644
index 56c8a54..0000000
--- a/deepface/models/facial_recognition/VGGFace.py
+++ /dev/null
@@ -1,162 +0,0 @@
-# built-in dependencies
-from typing import List
-
-# 3rd party dependencies
-import numpy as np
-
-# project dependencies
-from deepface.commons import package_utils, weight_utils
-from deepface.modules import verification
-from deepface.models.FacialRecognition import FacialRecognition
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# ---------------------------------------
-
-tf_version = package_utils.get_tf_major_version()
-if tf_version == 1:
- from keras.models import Model, Sequential
- from keras.layers import (
- Convolution2D,
- ZeroPadding2D,
- MaxPooling2D,
- Flatten,
- Dropout,
- Activation,
- )
-else:
- from tensorflow.keras.models import Model, Sequential
- from tensorflow.keras.layers import (
- Convolution2D,
- ZeroPadding2D,
- MaxPooling2D,
- Flatten,
- Dropout,
- Activation,
- )
-
-# ---------------------------------------
-
-# pylint: disable=too-few-public-methods
-class VggFaceClient(FacialRecognition):
- """
- VGG-Face model class
- """
-
- def __init__(self):
- self.model = load_model()
- self.model_name = "VGG-Face"
- self.input_shape = (224, 224)
- self.output_shape = 4096
-
- def forward(self, img: np.ndarray) -> List[float]:
- """
- Generates embeddings using the VGG-Face model.
- This method incorporates an additional normalization layer,
- necessitating the override of the forward method.
-
- Args:
- img (np.ndarray): pre-loaded image in BGR
- Returns
- embeddings (list): multi-dimensional vector
- """
- # model.predict causes memory issue when it is called in a for loop
- # embedding = model.predict(img, verbose=0)[0].tolist()
-
- # having normalization layer in descriptor troubles for some gpu users (e.g. issue 957, 966)
- # instead we are now calculating it with traditional way not with keras backend
- embedding = self.model(img, training=False).numpy()[0].tolist()
- embedding = verification.l2_normalize(embedding)
- return embedding.tolist()
-
-
-def base_model() -> Sequential:
- """
- Base model of VGG-Face being used for classification - not to find embeddings
- Returns:
- model (Sequential): model was trained to classify 2622 identities
- """
- model = Sequential()
- model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
- model.add(Convolution2D(64, (3, 3), activation="relu"))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(64, (3, 3), activation="relu"))
- model.add(MaxPooling2D((2, 2), strides=(2, 2)))
-
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(128, (3, 3), activation="relu"))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(128, (3, 3), activation="relu"))
- model.add(MaxPooling2D((2, 2), strides=(2, 2)))
-
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(256, (3, 3), activation="relu"))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(256, (3, 3), activation="relu"))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(256, (3, 3), activation="relu"))
- model.add(MaxPooling2D((2, 2), strides=(2, 2)))
-
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(512, (3, 3), activation="relu"))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(512, (3, 3), activation="relu"))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(512, (3, 3), activation="relu"))
- model.add(MaxPooling2D((2, 2), strides=(2, 2)))
-
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(512, (3, 3), activation="relu"))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(512, (3, 3), activation="relu"))
- model.add(ZeroPadding2D((1, 1)))
- model.add(Convolution2D(512, (3, 3), activation="relu"))
- model.add(MaxPooling2D((2, 2), strides=(2, 2)))
-
- model.add(Convolution2D(4096, (7, 7), activation="relu"))
- model.add(Dropout(0.5))
- model.add(Convolution2D(4096, (1, 1), activation="relu"))
- model.add(Dropout(0.5))
- model.add(Convolution2D(2622, (1, 1)))
- model.add(Flatten())
- model.add(Activation("softmax"))
-
- return model
-
-
-def load_model(
- url="https://github.com/serengil/deepface_models/releases/download/v1.0/vgg_face_weights.h5",
-) -> Model:
- """
- Final VGG-Face model being used for finding embeddings
- Returns:
- model (Model): returning 4096 dimensional vectors
- """
-
- model = base_model()
-
- weight_file = weight_utils.download_weights_if_necessary(
- file_name="vgg_face_weights.h5", source_url=url
- )
-
- model = weight_utils.load_model_weights(
- model=model, weight_file=weight_file
- )
-
- # 2622d dimensional model
- # vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
-
- # 4096 dimensional model offers 6% to 14% increasement on accuracy!
- # - softmax causes underfitting
- # - added normalization layer to avoid underfitting with euclidean
- # as described here: https://github.com/serengil/deepface/issues/944
- base_model_output = Sequential()
- base_model_output = Flatten()(model.layers[-5].output)
- # keras backend's l2 normalization layer troubles some gpu users (e.g. issue 957, 966)
- # base_model_output = Lambda(lambda x: K.l2_normalize(x, axis=1), name="norm_layer")(
- # base_model_output
- # )
- vgg_face_descriptor = Model(inputs=model.input, outputs=base_model_output)
-
- return vgg_face_descriptor
diff --git a/deepface/models/facial_recognition/__init__.py b/deepface/models/facial_recognition/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/deepface/models/spoofing/FasNet.py b/deepface/models/spoofing/FasNet.py
deleted file mode 100644
index 5eb6f92..0000000
--- a/deepface/models/spoofing/FasNet.py
+++ /dev/null
@@ -1,215 +0,0 @@
-# built-in dependencies
-from typing import Union
-
-# 3rd party dependencies
-import cv2
-import numpy as np
-
-# project dependencies
-from deepface.commons import weight_utils
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# pylint: disable=line-too-long, too-few-public-methods, nested-min-max
-class Fasnet:
- """
- Mini Face Anti Spoofing Net Library from repo: github.com/minivision-ai/Silent-Face-Anti-Spoofing
-
- Minivision's Silent-Face-Anti-Spoofing Repo licensed under Apache License 2.0
- Ref: github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/src/model_lib/MiniFASNet.py
- """
-
- def __init__(self):
- # pytorch is an opitonal dependency, enforce it to be installed if class imported
- try:
- import torch
- except Exception as err:
- raise ValueError(
- "You must install torch with `pip install pytorch` command to use face anti spoofing module"
- ) from err
-
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
- self.device = device
-
- # download pre-trained models if not installed yet
- first_model_weight_file = weight_utils.download_weights_if_necessary(
- file_name="2.7_80x80_MiniFASNetV2.pth",
- source_url="https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/raw/master/resources/anti_spoof_models/2.7_80x80_MiniFASNetV2.pth",
- )
-
- second_model_weight_file = weight_utils.download_weights_if_necessary(
- file_name="4_0_0_80x80_MiniFASNetV1SE.pth",
- source_url="https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/raw/master/resources/anti_spoof_models/4_0_0_80x80_MiniFASNetV1SE.pth",
- )
-
- # guarantees Fasnet imported and torch installed
- from deepface.models.spoofing import FasNetBackbone
-
- # Fasnet will use 2 distinct models to predict, then it will find the sum of predictions
- # to make a final prediction
-
- first_model = FasNetBackbone.MiniFASNetV2(conv6_kernel=(5, 5)).to(device)
- second_model = FasNetBackbone.MiniFASNetV1SE(conv6_kernel=(5, 5)).to(device)
-
- # load model weight for first model
- state_dict = torch.load(first_model_weight_file, map_location=device)
- keys = iter(state_dict)
- first_layer_name = keys.__next__()
-
- if first_layer_name.find("module.") >= 0:
- from collections import OrderedDict
-
- new_state_dict = OrderedDict()
- for key, value in state_dict.items():
- name_key = key[7:]
- new_state_dict[name_key] = value
- first_model.load_state_dict(new_state_dict)
- else:
- first_model.load_state_dict(state_dict)
-
- # load model weight for second model
- state_dict = torch.load(second_model_weight_file, map_location=device)
- keys = iter(state_dict)
- first_layer_name = keys.__next__()
-
- if first_layer_name.find("module.") >= 0:
- from collections import OrderedDict
-
- new_state_dict = OrderedDict()
- for key, value in state_dict.items():
- name_key = key[7:]
- new_state_dict[name_key] = value
- second_model.load_state_dict(new_state_dict)
- else:
- second_model.load_state_dict(state_dict)
-
- # evaluate models
- _ = first_model.eval()
- _ = second_model.eval()
-
- self.first_model = first_model
- self.second_model = second_model
-
- def analyze(self, img: np.ndarray, facial_area: Union[list, tuple]):
- """
- Analyze a given image spoofed or not
- Args:
- img (np.ndarray): pre loaded image
- facial_area (list or tuple): facial rectangle area coordinates with x, y, w, h respectively
- Returns:
- result (tuple): a result tuple consisting of is_real and score
- """
- import torch
- import torch.nn.functional as F
-
- x, y, w, h = facial_area
- first_img = crop(img, (x, y, w, h), 2.7, 80, 80)
- second_img = crop(img, (x, y, w, h), 4, 80, 80)
-
- test_transform = Compose(
- [
- ToTensor(),
- ]
- )
-
- first_img = test_transform(first_img)
- first_img = first_img.unsqueeze(0).to(self.device)
-
- second_img = test_transform(second_img)
- second_img = second_img.unsqueeze(0).to(self.device)
-
- with torch.no_grad():
- first_result = self.first_model.forward(first_img)
- first_result = F.softmax(first_result).cpu().numpy()
-
- second_result = self.second_model.forward(second_img)
- second_result = F.softmax(second_result).cpu().numpy()
-
- prediction = np.zeros((1, 3))
- prediction += first_result
- prediction += second_result
-
- label = np.argmax(prediction)
- is_real = True if label == 1 else False # pylint: disable=simplifiable-if-expression
- score = prediction[0][label] / 2
-
- return is_real, score
-
-
-# subsdiary classes and functions
-
-
-def to_tensor(pic):
- """Convert a ``numpy.ndarray`` to tensor.
-
- See ``ToTensor`` for more details.
-
- Args:
- pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
-
- Returns:
- Tensor: Converted image.
- """
- import torch
-
- # handle numpy array
- # IR image channel=1: modify by lzc --> 20190730
- if pic.ndim == 2:
- pic = pic.reshape((pic.shape[0], pic.shape[1], 1))
-
- img = torch.from_numpy(pic.transpose((2, 0, 1)))
- # backward compatibility
- # return img.float().div(255) modify by zkx
- return img.float()
-
-
-class Compose:
- def __init__(self, transforms):
- self.transforms = transforms
-
- def __call__(self, img):
- for t in self.transforms:
- img = t(img)
- return img
-
-
-class ToTensor:
- def __call__(self, pic):
- return to_tensor(pic)
-
-
-def _get_new_box(src_w, src_h, bbox, scale):
- x = bbox[0]
- y = bbox[1]
- box_w = bbox[2]
- box_h = bbox[3]
- scale = min((src_h - 1) / box_h, min((src_w - 1) / box_w, scale))
- new_width = box_w * scale
- new_height = box_h * scale
- center_x, center_y = box_w / 2 + x, box_h / 2 + y
- left_top_x = center_x - new_width / 2
- left_top_y = center_y - new_height / 2
- right_bottom_x = center_x + new_width / 2
- right_bottom_y = center_y + new_height / 2
- if left_top_x < 0:
- right_bottom_x -= left_top_x
- left_top_x = 0
- if left_top_y < 0:
- right_bottom_y -= left_top_y
- left_top_y = 0
- if right_bottom_x > src_w - 1:
- left_top_x -= right_bottom_x - src_w + 1
- right_bottom_x = src_w - 1
- if right_bottom_y > src_h - 1:
- left_top_y -= right_bottom_y - src_h + 1
- right_bottom_y = src_h - 1
- return int(left_top_x), int(left_top_y), int(right_bottom_x), int(right_bottom_y)
-
-
-def crop(org_img, bbox, scale, out_w, out_h):
- src_h, src_w, _ = np.shape(org_img)
- left_top_x, left_top_y, right_bottom_x, right_bottom_y = _get_new_box(src_w, src_h, bbox, scale)
- img = org_img[left_top_y : right_bottom_y + 1, left_top_x : right_bottom_x + 1]
- dst_img = cv2.resize(img, (out_w, out_h))
- return dst_img
diff --git a/deepface/models/spoofing/FasNetBackbone.py b/deepface/models/spoofing/FasNetBackbone.py
deleted file mode 100644
index abfb6ce..0000000
--- a/deepface/models/spoofing/FasNetBackbone.py
+++ /dev/null
@@ -1,524 +0,0 @@
-# These classes are copied from Minivision's Silent-Face-Anti-Spoofing Repo
-# licensed under Apache License 2.0
-# Ref: github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/src/model_lib/MiniFASNet.py
-
-# 3rd party dependencies
-import torch
-from torch.nn import (
- Linear,
- Conv2d,
- BatchNorm1d,
- BatchNorm2d,
- PReLU,
- ReLU,
- Sigmoid,
- AdaptiveAvgPool2d,
- Sequential,
- Module,
-)
-
-# pylint: disable=super-with-arguments, too-many-instance-attributes, unused-argument, redefined-builtin, too-few-public-methods
-
-keep_dict = {
- "1.8M": [
- 32,
- 32,
- 103,
- 103,
- 64,
- 13,
- 13,
- 64,
- 26,
- 26,
- 64,
- 13,
- 13,
- 64,
- 52,
- 52,
- 64,
- 231,
- 231,
- 128,
- 154,
- 154,
- 128,
- 52,
- 52,
- 128,
- 26,
- 26,
- 128,
- 52,
- 52,
- 128,
- 26,
- 26,
- 128,
- 26,
- 26,
- 128,
- 308,
- 308,
- 128,
- 26,
- 26,
- 128,
- 26,
- 26,
- 128,
- 512,
- 512,
- ],
- "1.8M_": [
- 32,
- 32,
- 103,
- 103,
- 64,
- 13,
- 13,
- 64,
- 13,
- 13,
- 64,
- 13,
- 13,
- 64,
- 13,
- 13,
- 64,
- 231,
- 231,
- 128,
- 231,
- 231,
- 128,
- 52,
- 52,
- 128,
- 26,
- 26,
- 128,
- 77,
- 77,
- 128,
- 26,
- 26,
- 128,
- 26,
- 26,
- 128,
- 308,
- 308,
- 128,
- 26,
- 26,
- 128,
- 26,
- 26,
- 128,
- 512,
- 512,
- ],
-}
-
-
-def MiniFASNetV2(embedding_size=128, conv6_kernel=(7, 7), drop_p=0.2, num_classes=3, img_channel=3):
- return MiniFASNet(
- keep_dict["1.8M_"], embedding_size, conv6_kernel, drop_p, num_classes, img_channel
- )
-
-
-def MiniFASNetV1SE(
- embedding_size=128, conv6_kernel=(7, 7), drop_p=0.75, num_classes=3, img_channel=3
-):
- return MiniFASNetSE(
- keep_dict["1.8M"], embedding_size, conv6_kernel, drop_p, num_classes, img_channel
- )
-
-
-class Flatten(Module):
- def forward(self, input):
- return input.view(input.size(0), -1)
-
-
-class Conv_block(Module):
- def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
- super(Conv_block, self).__init__()
- self.conv = Conv2d(
- in_c,
- out_c,
- kernel_size=kernel,
- groups=groups,
- stride=stride,
- padding=padding,
- bias=False,
- )
- self.bn = BatchNorm2d(out_c)
- self.prelu = PReLU(out_c)
-
- def forward(self, x):
- x = self.conv(x)
- x = self.bn(x)
- x = self.prelu(x)
- return x
-
-
-class Linear_block(Module):
- def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
- super(Linear_block, self).__init__()
- self.conv = Conv2d(
- in_c,
- out_channels=out_c,
- kernel_size=kernel,
- groups=groups,
- stride=stride,
- padding=padding,
- bias=False,
- )
- self.bn = BatchNorm2d(out_c)
-
- def forward(self, x):
- x = self.conv(x)
- x = self.bn(x)
- return x
-
-
-class Depth_Wise(Module):
- def __init__(
- self, c1, c2, c3, residual=False, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=1
- ):
- super(Depth_Wise, self).__init__()
- c1_in, c1_out = c1
- c2_in, c2_out = c2
- c3_in, c3_out = c3
- self.conv = Conv_block(c1_in, out_c=c1_out, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
- self.conv_dw = Conv_block(
- c2_in, c2_out, groups=c2_in, kernel=kernel, padding=padding, stride=stride
- )
- self.project = Linear_block(c3_in, c3_out, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
- self.residual = residual
-
- def forward(self, x):
- if self.residual:
- short_cut = x
- x = self.conv(x)
- x = self.conv_dw(x)
- x = self.project(x)
- if self.residual:
- output = short_cut + x
- else:
- output = x
- return output
-
-
-class Depth_Wise_SE(Module):
- def __init__(
- self,
- c1,
- c2,
- c3,
- residual=False,
- kernel=(3, 3),
- stride=(2, 2),
- padding=(1, 1),
- groups=1,
- se_reduct=8,
- ):
- super(Depth_Wise_SE, self).__init__()
- c1_in, c1_out = c1
- c2_in, c2_out = c2
- c3_in, c3_out = c3
- self.conv = Conv_block(c1_in, out_c=c1_out, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
- self.conv_dw = Conv_block(
- c2_in, c2_out, groups=c2_in, kernel=kernel, padding=padding, stride=stride
- )
- self.project = Linear_block(c3_in, c3_out, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
- self.residual = residual
- self.se_module = SEModule(c3_out, se_reduct)
-
- def forward(self, x):
- if self.residual:
- short_cut = x
- x = self.conv(x)
- x = self.conv_dw(x)
- x = self.project(x)
- if self.residual:
- x = self.se_module(x)
- output = short_cut + x
- else:
- output = x
- return output
-
-
-class SEModule(Module):
- def __init__(self, channels, reduction):
- super(SEModule, self).__init__()
- self.avg_pool = AdaptiveAvgPool2d(1)
- self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
- self.bn1 = BatchNorm2d(channels // reduction)
- self.relu = ReLU(inplace=True)
- self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
- self.bn2 = BatchNorm2d(channels)
- self.sigmoid = Sigmoid()
-
- def forward(self, x):
- module_input = x
- x = self.avg_pool(x)
- x = self.fc1(x)
- x = self.bn1(x)
- x = self.relu(x)
- x = self.fc2(x)
- x = self.bn2(x)
- x = self.sigmoid(x)
- return module_input * x
-
-
-class Residual(Module):
- def __init__(self, c1, c2, c3, num_block, groups, kernel=(3, 3), stride=(1, 1), padding=(1, 1)):
- super(Residual, self).__init__()
- modules = []
- for i in range(num_block):
- c1_tuple = c1[i]
- c2_tuple = c2[i]
- c3_tuple = c3[i]
- modules.append(
- Depth_Wise(
- c1_tuple,
- c2_tuple,
- c3_tuple,
- residual=True,
- kernel=kernel,
- padding=padding,
- stride=stride,
- groups=groups,
- )
- )
- self.model = Sequential(*modules)
-
- def forward(self, x):
- return self.model(x)
-
-
-class ResidualSE(Module):
- def __init__(
- self,
- c1,
- c2,
- c3,
- num_block,
- groups,
- kernel=(3, 3),
- stride=(1, 1),
- padding=(1, 1),
- se_reduct=4,
- ):
- super(ResidualSE, self).__init__()
- modules = []
- for i in range(num_block):
- c1_tuple = c1[i]
- c2_tuple = c2[i]
- c3_tuple = c3[i]
- if i == num_block - 1:
- modules.append(
- Depth_Wise_SE(
- c1_tuple,
- c2_tuple,
- c3_tuple,
- residual=True,
- kernel=kernel,
- padding=padding,
- stride=stride,
- groups=groups,
- se_reduct=se_reduct,
- )
- )
- else:
- modules.append(
- Depth_Wise(
- c1_tuple,
- c2_tuple,
- c3_tuple,
- residual=True,
- kernel=kernel,
- padding=padding,
- stride=stride,
- groups=groups,
- )
- )
- self.model = Sequential(*modules)
-
- def forward(self, x):
- return self.model(x)
-
-
-class MiniFASNet(Module):
- def __init__(
- self, keep, embedding_size, conv6_kernel=(7, 7), drop_p=0.0, num_classes=3, img_channel=3
- ):
- super(MiniFASNet, self).__init__()
- self.embedding_size = embedding_size
-
- self.conv1 = Conv_block(img_channel, keep[0], kernel=(3, 3), stride=(2, 2), padding=(1, 1))
- self.conv2_dw = Conv_block(
- keep[0], keep[1], kernel=(3, 3), stride=(1, 1), padding=(1, 1), groups=keep[1]
- )
-
- c1 = [(keep[1], keep[2])]
- c2 = [(keep[2], keep[3])]
- c3 = [(keep[3], keep[4])]
-
- self.conv_23 = Depth_Wise(
- c1[0], c2[0], c3[0], kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=keep[3]
- )
-
- c1 = [(keep[4], keep[5]), (keep[7], keep[8]), (keep[10], keep[11]), (keep[13], keep[14])]
- c2 = [(keep[5], keep[6]), (keep[8], keep[9]), (keep[11], keep[12]), (keep[14], keep[15])]
- c3 = [(keep[6], keep[7]), (keep[9], keep[10]), (keep[12], keep[13]), (keep[15], keep[16])]
-
- self.conv_3 = Residual(
- c1, c2, c3, num_block=4, groups=keep[4], kernel=(3, 3), stride=(1, 1), padding=(1, 1)
- )
-
- c1 = [(keep[16], keep[17])]
- c2 = [(keep[17], keep[18])]
- c3 = [(keep[18], keep[19])]
-
- self.conv_34 = Depth_Wise(
- c1[0], c2[0], c3[0], kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=keep[19]
- )
-
- c1 = [
- (keep[19], keep[20]),
- (keep[22], keep[23]),
- (keep[25], keep[26]),
- (keep[28], keep[29]),
- (keep[31], keep[32]),
- (keep[34], keep[35]),
- ]
- c2 = [
- (keep[20], keep[21]),
- (keep[23], keep[24]),
- (keep[26], keep[27]),
- (keep[29], keep[30]),
- (keep[32], keep[33]),
- (keep[35], keep[36]),
- ]
- c3 = [
- (keep[21], keep[22]),
- (keep[24], keep[25]),
- (keep[27], keep[28]),
- (keep[30], keep[31]),
- (keep[33], keep[34]),
- (keep[36], keep[37]),
- ]
-
- self.conv_4 = Residual(
- c1, c2, c3, num_block=6, groups=keep[19], kernel=(3, 3), stride=(1, 1), padding=(1, 1)
- )
-
- c1 = [(keep[37], keep[38])]
- c2 = [(keep[38], keep[39])]
- c3 = [(keep[39], keep[40])]
-
- self.conv_45 = Depth_Wise(
- c1[0], c2[0], c3[0], kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=keep[40]
- )
-
- c1 = [(keep[40], keep[41]), (keep[43], keep[44])]
- c2 = [(keep[41], keep[42]), (keep[44], keep[45])]
- c3 = [(keep[42], keep[43]), (keep[45], keep[46])]
-
- self.conv_5 = Residual(
- c1, c2, c3, num_block=2, groups=keep[40], kernel=(3, 3), stride=(1, 1), padding=(1, 1)
- )
- self.conv_6_sep = Conv_block(
- keep[46], keep[47], kernel=(1, 1), stride=(1, 1), padding=(0, 0)
- )
- self.conv_6_dw = Linear_block(
- keep[47], keep[48], groups=keep[48], kernel=conv6_kernel, stride=(1, 1), padding=(0, 0)
- )
- self.conv_6_flatten = Flatten()
- self.linear = Linear(512, embedding_size, bias=False)
- self.bn = BatchNorm1d(embedding_size)
- self.drop = torch.nn.Dropout(p=drop_p)
- self.prob = Linear(embedding_size, num_classes, bias=False)
-
- def forward(self, x):
- out = self.conv1(x)
- out = self.conv2_dw(out)
- out = self.conv_23(out)
- out = self.conv_3(out)
- out = self.conv_34(out)
- out = self.conv_4(out)
- out = self.conv_45(out)
- out = self.conv_5(out)
- out = self.conv_6_sep(out)
- out = self.conv_6_dw(out)
- out = self.conv_6_flatten(out)
- if self.embedding_size != 512:
- out = self.linear(out)
- out = self.bn(out)
- out = self.drop(out)
- out = self.prob(out)
- return out
-
-
-class MiniFASNetSE(MiniFASNet):
- def __init__(
- self, keep, embedding_size, conv6_kernel=(7, 7), drop_p=0.75, num_classes=4, img_channel=3
- ):
- super(MiniFASNetSE, self).__init__(
- keep=keep,
- embedding_size=embedding_size,
- conv6_kernel=conv6_kernel,
- drop_p=drop_p,
- num_classes=num_classes,
- img_channel=img_channel,
- )
-
- c1 = [(keep[4], keep[5]), (keep[7], keep[8]), (keep[10], keep[11]), (keep[13], keep[14])]
- c2 = [(keep[5], keep[6]), (keep[8], keep[9]), (keep[11], keep[12]), (keep[14], keep[15])]
- c3 = [(keep[6], keep[7]), (keep[9], keep[10]), (keep[12], keep[13]), (keep[15], keep[16])]
-
- self.conv_3 = ResidualSE(
- c1, c2, c3, num_block=4, groups=keep[4], kernel=(3, 3), stride=(1, 1), padding=(1, 1)
- )
-
- c1 = [
- (keep[19], keep[20]),
- (keep[22], keep[23]),
- (keep[25], keep[26]),
- (keep[28], keep[29]),
- (keep[31], keep[32]),
- (keep[34], keep[35]),
- ]
- c2 = [
- (keep[20], keep[21]),
- (keep[23], keep[24]),
- (keep[26], keep[27]),
- (keep[29], keep[30]),
- (keep[32], keep[33]),
- (keep[35], keep[36]),
- ]
- c3 = [
- (keep[21], keep[22]),
- (keep[24], keep[25]),
- (keep[27], keep[28]),
- (keep[30], keep[31]),
- (keep[33], keep[34]),
- (keep[36], keep[37]),
- ]
-
- self.conv_4 = ResidualSE(
- c1, c2, c3, num_block=6, groups=keep[19], kernel=(3, 3), stride=(1, 1), padding=(1, 1)
- )
-
- c1 = [(keep[40], keep[41]), (keep[43], keep[44])]
- c2 = [(keep[41], keep[42]), (keep[44], keep[45])]
- c3 = [(keep[42], keep[43]), (keep[45], keep[46])]
- self.conv_5 = ResidualSE(
- c1, c2, c3, num_block=2, groups=keep[40], kernel=(3, 3), stride=(1, 1), padding=(1, 1)
- )
diff --git a/deepface/models/spoofing/__init__.py b/deepface/models/spoofing/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/deepface/modules/__init__.py b/deepface/modules/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/deepface/modules/demography.py b/deepface/modules/demography.py
deleted file mode 100644
index b68314b..0000000
--- a/deepface/modules/demography.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# built-in dependencies
-from typing import Any, Dict, List, Union
-
-# 3rd party dependencies
-import numpy as np
-from tqdm import tqdm
-
-# project dependencies
-from deepface.modules import modeling, detection, preprocessing
-from deepface.models.demography import Gender, Race, Emotion
-
-
-def analyze(
- img_path: Union[str, np.ndarray],
- actions: Union[tuple, list] = ("emotion", "age", "gender", "race"),
- enforce_detection: bool = True,
- detector_backend: str = "opencv",
- align: bool = True,
- expand_percentage: int = 0,
- silent: bool = False,
- anti_spoofing: bool = False,
-) -> List[Dict[str, Any]]:
- """
- Analyze facial attributes such as age, gender, emotion, and race in the provided image.
-
- Args:
- img_path (str or np.ndarray): The exact path to the image, a numpy array in BGR format,
- or a base64 encoded image. If the source image contains multiple faces, the result will
- include information for each detected face.
-
- actions (tuple): Attributes to analyze. The default is ('age', 'gender', 'emotion', 'race').
- You can exclude some of these attributes from the analysis if needed.
-
- enforce_detection (boolean): If no face is detected in an image, raise an exception.
- Set to False to avoid the exception for low-resolution images (default is True).
-
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
- (default is opencv).
-
- distance_metric (string): Metric for measuring similarity. Options: 'cosine',
- 'euclidean', 'euclidean_l2' (default is cosine).
-
- align (boolean): Perform alignment based on the eye positions (default is True).
-
- expand_percentage (int): expand detected facial area with a percentage (default is 0).
-
- silent (boolean): Suppress or allow some log messages for a quieter analysis process
- (default is False).
-
- anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
-
- Returns:
- results (List[Dict[str, Any]]): A list of dictionaries, where each dictionary represents
- the analysis results for a detected face.
-
- Each dictionary in the list contains the following keys:
-
- - 'region' (dict): Represents the rectangular region of the detected face in the image.
- - 'x': x-coordinate of the top-left corner of the face.
- - 'y': y-coordinate of the top-left corner of the face.
- - 'w': Width of the detected face region.
- - 'h': Height of the detected face region.
-
- - 'age' (float): Estimated age of the detected face.
-
- - 'face_confidence' (float): Confidence score for the detected face.
- Indicates the reliability of the face detection.
-
- - 'dominant_gender' (str): The dominant gender in the detected face.
- Either "Man" or "Woman."
-
- - 'gender' (dict): Confidence scores for each gender category.
- - 'Man': Confidence score for the male gender.
- - 'Woman': Confidence score for the female gender.
-
- - 'dominant_emotion' (str): The dominant emotion in the detected face.
- Possible values include "sad," "angry," "surprise," "fear," "happy,"
- "disgust," and "neutral."
-
- - 'emotion' (dict): Confidence scores for each emotion category.
- - 'sad': Confidence score for sadness.
- - 'angry': Confidence score for anger.
- - 'surprise': Confidence score for surprise.
- - 'fear': Confidence score for fear.
- - 'happy': Confidence score for happiness.
- - 'disgust': Confidence score for disgust.
- - 'neutral': Confidence score for neutrality.
-
- - 'dominant_race' (str): The dominant race in the detected face.
- Possible values include "indian," "asian," "latino hispanic,"
- "black," "middle eastern," and "white."
-
- - 'race' (dict): Confidence scores for each race category.
- - 'indian': Confidence score for Indian ethnicity.
- - 'asian': Confidence score for Asian ethnicity.
- - 'latino hispanic': Confidence score for Latino/Hispanic ethnicity.
- - 'black': Confidence score for Black ethnicity.
- - 'middle eastern': Confidence score for Middle Eastern ethnicity.
- - 'white': Confidence score for White ethnicity.
- """
-
- # if actions is passed as tuple with single item, interestingly it becomes str here
- if isinstance(actions, str):
- actions = (actions,)
-
- # check if actions is not an iterable or empty.
- if not hasattr(actions, "__getitem__") or not actions:
- raise ValueError("`actions` must be a list of strings.")
-
- actions = list(actions)
-
- # For each action, check if it is valid
- for action in actions:
- if action not in ("emotion", "age", "gender", "race"):
- raise ValueError(
- f"Invalid action passed ({repr(action)})). "
- "Valid actions are `emotion`, `age`, `gender`, `race`."
- )
- # ---------------------------------
- resp_objects = []
-
- img_objs = detection.extract_faces(
- img_path=img_path,
- detector_backend=detector_backend,
- enforce_detection=enforce_detection,
- grayscale=False,
- align=align,
- expand_percentage=expand_percentage,
- anti_spoofing=anti_spoofing,
- )
-
- for img_obj in img_objs:
- if anti_spoofing is True and img_obj.get("is_real", True) is False:
- raise ValueError("Spoof detected in the given image.")
-
- img_content = img_obj["face"]
- img_region = img_obj["facial_area"]
- img_confidence = img_obj["confidence"]
- if img_content.shape[0] == 0 or img_content.shape[1] == 0:
- continue
-
- # rgb to bgr
- img_content = img_content[:, :, ::-1]
-
- # resize input image
- img_content = preprocessing.resize_image(img=img_content, target_size=(224, 224))
-
- obj = {}
- # facial attribute analysis
- pbar = tqdm(
- range(0, len(actions)),
- desc="Finding actions",
- disable=silent if len(actions) > 1 else True,
- )
- for index in pbar:
- action = actions[index]
- pbar.set_description(f"Action: {action}")
-
- if action == "emotion":
- emotion_predictions = modeling.build_model(
- task="facial_attribute", model_name="Emotion"
- ).predict(img_content)
- sum_of_predictions = emotion_predictions.sum()
-
- obj["emotion"] = {}
- for i, emotion_label in enumerate(Emotion.labels):
- emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
- obj["emotion"][emotion_label] = emotion_prediction
-
- obj["dominant_emotion"] = Emotion.labels[np.argmax(emotion_predictions)]
-
- elif action == "age":
- apparent_age = modeling.build_model(
- task="facial_attribute", model_name="Age"
- ).predict(img_content)
- # int cast is for exception - object of type 'float32' is not JSON serializable
- obj["age"] = int(apparent_age)
-
- elif action == "gender":
- gender_predictions = modeling.build_model(
- task="facial_attribute", model_name="Gender"
- ).predict(img_content)
- obj["gender"] = {}
- for i, gender_label in enumerate(Gender.labels):
- gender_prediction = 100 * gender_predictions[i]
- obj["gender"][gender_label] = gender_prediction
-
- obj["dominant_gender"] = Gender.labels[np.argmax(gender_predictions)]
-
- elif action == "race":
- race_predictions = modeling.build_model(
- task="facial_attribute", model_name="Race"
- ).predict(img_content)
- sum_of_predictions = race_predictions.sum()
-
- obj["race"] = {}
- for i, race_label in enumerate(Race.labels):
- race_prediction = 100 * race_predictions[i] / sum_of_predictions
- obj["race"][race_label] = race_prediction
-
- obj["dominant_race"] = Race.labels[np.argmax(race_predictions)]
-
- # -----------------------------
- # mention facial areas
- obj["region"] = img_region
- # include image confidence
- obj["face_confidence"] = img_confidence
-
- resp_objects.append(obj)
-
- return resp_objects
diff --git a/deepface/modules/detection.py b/deepface/modules/detection.py
deleted file mode 100644
index 46165a9..0000000
--- a/deepface/modules/detection.py
+++ /dev/null
@@ -1,410 +0,0 @@
-# built-in dependencies
-from typing import Any, Dict, List, Tuple, Union, Optional
-
-# 3rd part dependencies
-from heapq import nlargest
-import numpy as np
-import cv2
-
-# project dependencies
-from deepface.modules import modeling
-from deepface.models.Detector import Detector, DetectedFace, FacialAreaRegion
-from deepface.commons import image_utils
-
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# pylint: disable=no-else-raise
-
-
-def extract_faces(
- img_path: Union[str, np.ndarray],
- detector_backend: str = "opencv",
- enforce_detection: bool = True,
- align: bool = True,
- expand_percentage: int = 0,
- grayscale: bool = False,
- color_face: str = "rgb",
- normalize_face: bool = True,
- anti_spoofing: bool = False,
- max_faces: Optional[int] = None,
-) -> List[Dict[str, Any]]:
- """
- Extract faces from a given image
-
- Args:
- img_path (str or np.ndarray): Path to the first image. Accepts exact image path
- as a string, numpy array (BGR), or base64 encoded images.
-
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
- (default is opencv)
-
- enforce_detection (boolean): If no face is detected in an image, raise an exception.
- Default is True. Set to False to avoid the exception for low-resolution images.
-
- align (bool): Flag to enable face alignment (default is True).
-
- expand_percentage (int): expand detected facial area with a percentage.
-
- grayscale (boolean): (Deprecated) Flag to convert the output face image to grayscale
- (default is False).
-
- color_face (string): Color to return face image output. Options: 'rgb', 'bgr' or 'gray'
- (default is 'rgb').
-
- normalize_face (boolean): Flag to enable normalization (divide by 255) of the output
- face image output face image normalization (default is True).
-
- anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
-
- Returns:
- results (List[Dict[str, Any]]): A list of dictionaries, where each dictionary contains:
-
- - "face" (np.ndarray): The detected face as a NumPy array in RGB format.
-
- - "facial_area" (Dict[str, Any]): The detected face's regions as a dictionary containing:
- - keys 'x', 'y', 'w', 'h' with int values
- - keys 'left_eye', 'right_eye' with a tuple of 2 ints as values.
- left eye and right eye are eyes on the left and right respectively with respect
- to the person itself instead of observer.
-
- - "confidence" (float): The confidence score associated with the detected face.
-
- - "is_real" (boolean): antispoofing analyze result. this key is just available in the
- result only if anti_spoofing is set to True in input arguments.
-
- - "antispoof_score" (float): score of antispoofing analyze result. this key is
- just available in the result only if anti_spoofing is set to True in input arguments.
- """
-
- resp_objs = []
-
- # img might be path, base64 or numpy array. Convert it to numpy whatever it is.
- img, img_name = image_utils.load_image(img_path)
-
- if img is None:
- raise ValueError(f"Exception while loading {img_name}")
-
- height, width, _ = img.shape
-
- base_region = FacialAreaRegion(x=0, y=0, w=width, h=height, confidence=0)
-
- if detector_backend == "skip":
- face_objs = [DetectedFace(img=img, facial_area=base_region, confidence=0)]
- else:
- face_objs = detect_faces(
- detector_backend=detector_backend,
- img=img,
- align=align,
- expand_percentage=expand_percentage,
- max_faces=max_faces,
- )
-
- # in case of no face found
- if len(face_objs) == 0 and enforce_detection is True:
- if img_name is not None:
- raise ValueError(
- f"Face could not be detected in {img_name}."
- "Please confirm that the picture is a face photo "
- "or consider to set enforce_detection param to False."
- )
- else:
- raise ValueError(
- "Face could not be detected. Please confirm that the picture is a face photo "
- "or consider to set enforce_detection param to False."
- )
-
- if len(face_objs) == 0 and enforce_detection is False:
- face_objs = [DetectedFace(img=img, facial_area=base_region, confidence=0)]
-
- for face_obj in face_objs:
- current_img = face_obj.img
- current_region = face_obj.facial_area
-
- if current_img.shape[0] == 0 or current_img.shape[1] == 0:
- continue
-
- if grayscale is True:
- logger.warn("Parameter grayscale is deprecated. Use color_face instead.")
- current_img = cv2.cvtColor(current_img, cv2.COLOR_BGR2GRAY)
- else:
- if color_face == "rgb":
- current_img = current_img[:, :, ::-1]
- elif color_face == "bgr":
- pass # image is in BGR
- elif color_face == "gray":
- current_img = cv2.cvtColor(current_img, cv2.COLOR_BGR2GRAY)
- else:
- raise ValueError(f"The color_face can be rgb, bgr or gray, but it is {color_face}.")
-
- if normalize_face:
- current_img = current_img / 255 # normalize input in [0, 1]
-
- # cast to int for flask, and do final checks for borders
- x = max(0, int(current_region.x))
- y = max(0, int(current_region.y))
- w = min(width - x - 1, int(current_region.w))
- h = min(height - y - 1, int(current_region.h))
-
- resp_obj = {
- "face": current_img,
- "facial_area": {
- "x": x,
- "y": y,
- "w": w,
- "h": h,
- "left_eye": current_region.left_eye,
- "right_eye": current_region.right_eye,
- },
- "confidence": round(float(current_region.confidence or 0), 2),
- }
-
- if anti_spoofing is True:
- antispoof_model = modeling.build_model(task="spoofing", model_name="Fasnet")
- is_real, antispoof_score = antispoof_model.analyze(img=img, facial_area=(x, y, w, h))
- resp_obj["is_real"] = is_real
- resp_obj["antispoof_score"] = antispoof_score
-
- resp_objs.append(resp_obj)
-
- if len(resp_objs) == 0 and enforce_detection == True:
- raise ValueError(
- f"Exception while extracting faces from {img_name}."
- "Consider to set enforce_detection arg to False."
- )
-
- return resp_objs
-
-
-def detect_faces(
- detector_backend: str,
- img: np.ndarray,
- align: bool = True,
- expand_percentage: int = 0,
- max_faces: Optional[int] = None,
-) -> List[DetectedFace]:
- """
- Detect face(s) from a given image
- Args:
- detector_backend (str): detector name
-
- img (np.ndarray): pre-loaded image
-
- align (bool): enable or disable alignment after detection
-
- expand_percentage (int): expand detected facial area with a percentage (default is 0).
-
- Returns:
- results (List[DetectedFace]): A list of DetectedFace objects
- where each object contains:
-
- - img (np.ndarray): The detected face as a NumPy array.
-
- - facial_area (FacialAreaRegion): The facial area region represented as x, y, w, h,
- left_eye and right eye. left eye and right eye are eyes on the left and right
- with respect to the person instead of observer.
-
- - confidence (float): The confidence score associated with the detected face.
- """
- height, width, _ = img.shape
- face_detector: Detector = modeling.build_model(
- task="face_detector", model_name=detector_backend
- )
-
- # validate expand percentage score
- if expand_percentage < 0:
- logger.warn(
- f"Expand percentage cannot be negative but you set it to {expand_percentage}."
- "Overwritten it to 0."
- )
- expand_percentage = 0
-
- # If faces are close to the upper boundary, alignment move them outside
- # Add a black border around an image to avoid this.
- height_border = int(0.5 * height)
- width_border = int(0.5 * width)
- if align is True:
- img = cv2.copyMakeBorder(
- img,
- height_border,
- height_border,
- width_border,
- width_border,
- cv2.BORDER_CONSTANT,
- value=[0, 0, 0], # Color of the border (black)
- )
-
- # find facial areas of given image
- facial_areas = face_detector.detect_faces(img)
-
- if max_faces is not None and max_faces < len(facial_areas):
- facial_areas = nlargest(
- max_faces, facial_areas, key=lambda facial_area: facial_area.w * facial_area.h
- )
-
- return [
- expand_and_align_face(
- facial_area=facial_area,
- img=img,
- align=align,
- expand_percentage=expand_percentage,
- width_border=width_border,
- height_border=height_border,
- )
- for facial_area in facial_areas
- ]
-
-
-def expand_and_align_face(
- facial_area: FacialAreaRegion,
- img: np.ndarray,
- align: bool,
- expand_percentage: int,
- width_border: int,
- height_border: int,
-) -> DetectedFace:
- x = facial_area.x
- y = facial_area.y
- w = facial_area.w
- h = facial_area.h
- left_eye = facial_area.left_eye
- right_eye = facial_area.right_eye
- confidence = facial_area.confidence
-
- if expand_percentage > 0:
- # Expand the facial region height and width by the provided percentage
- # ensuring that the expanded region stays within img.shape limits
- expanded_w = w + int(w * expand_percentage / 100)
- expanded_h = h + int(h * expand_percentage / 100)
-
- x = max(0, x - int((expanded_w - w) / 2))
- y = max(0, y - int((expanded_h - h) / 2))
- w = min(img.shape[1] - x, expanded_w)
- h = min(img.shape[0] - y, expanded_h)
-
- # extract detected face unaligned
- detected_face = img[int(y) : int(y + h), int(x) : int(x + w)]
- # align original image, then find projection of detected face area after alignment
- if align is True: # and left_eye is not None and right_eye is not None:
- aligned_img, angle = align_img_wrt_eyes(img=img, left_eye=left_eye, right_eye=right_eye)
-
- rotated_x1, rotated_y1, rotated_x2, rotated_y2 = project_facial_area(
- facial_area=(x, y, x + w, y + h), angle=angle, size=(img.shape[0], img.shape[1])
- )
- detected_face = aligned_img[
- int(rotated_y1) : int(rotated_y2), int(rotated_x1) : int(rotated_x2)
- ]
-
- # restore x, y, le and re before border added
- x = x - width_border
- y = y - height_border
- # w and h will not change
- if left_eye is not None:
- left_eye = (left_eye[0] - width_border, left_eye[1] - height_border)
- if right_eye is not None:
- right_eye = (right_eye[0] - width_border, right_eye[1] - height_border)
-
- return DetectedFace(
- img=detected_face,
- facial_area=FacialAreaRegion(
- x=x, y=y, h=h, w=w, confidence=confidence, left_eye=left_eye, right_eye=right_eye
- ),
- confidence=confidence,
- )
-
-
-def align_img_wrt_eyes(
- img: np.ndarray,
- left_eye: Union[list, tuple],
- right_eye: Union[list, tuple],
-) -> Tuple[np.ndarray, float]:
- """
- Align a given image horizantally with respect to their left and right eye locations
- Args:
- img (np.ndarray): pre-loaded image with detected face
- left_eye (list or tuple): coordinates of left eye with respect to the person itself
- right_eye(list or tuple): coordinates of right eye with respect to the person itself
- Returns:
- img (np.ndarray): aligned facial image
- """
- # if eye could not be detected for the given image, return image itself
- if left_eye is None or right_eye is None:
- return img, 0
-
- # sometimes unexpectedly detected images come with nil dimensions
- if img.shape[0] == 0 or img.shape[1] == 0:
- return img, 0
-
- angle = float(np.degrees(np.arctan2(left_eye[1] - right_eye[1], left_eye[0] - right_eye[0])))
-
- (h, w) = img.shape[:2]
- center = (w // 2, h // 2)
- M = cv2.getRotationMatrix2D(center, angle, 1.0)
- img = cv2.warpAffine(
- img, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0)
- )
-
- return img, angle
-
-
-def project_facial_area(
- facial_area: Tuple[int, int, int, int], angle: float, size: Tuple[int, int]
-) -> Tuple[int, int, int, int]:
- """
- Update pre-calculated facial area coordinates after image itself
- rotated with respect to the eyes.
- Inspried from the work of @UmutDeniz26 - github.com/serengil/retinaface/pull/80
-
- Args:
- facial_area (tuple of int): Representing the (x1, y1, x2, y2) of the facial area.
- x2 is equal to x1 + w1, and y2 is equal to y1 + h1
- angle (float): Angle of rotation in degrees. Its sign determines the direction of rotation.
- Note that angles > 360 degrees are normalized to the range [0, 360).
- size (tuple of int): Tuple representing the size of the image (width, height).
-
- Returns:
- rotated_coordinates (tuple of int): Representing the new coordinates
- (x1, y1, x2, y2) or (x1, y1, x1+w1, y1+h1) of the rotated facial area.
- """
-
- # Normalize the witdh of the angle so we don't have to
- # worry about rotations greater than 360 degrees.
- # We workaround the quirky behavior of the modulo operator
- # for negative angle values.
- direction = 1 if angle >= 0 else -1
- angle = abs(angle) % 360
- if angle == 0:
- return facial_area
-
- # Angle in radians
- angle = angle * np.pi / 180
-
- height, weight = size
-
- # Translate the facial area to the center of the image
- x = (facial_area[0] + facial_area[2]) / 2 - weight / 2
- y = (facial_area[1] + facial_area[3]) / 2 - height / 2
-
- # Rotate the facial area
- x_new = x * np.cos(angle) + y * direction * np.sin(angle)
- y_new = -x * direction * np.sin(angle) + y * np.cos(angle)
-
- # Translate the facial area back to the original position
- x_new = x_new + weight / 2
- y_new = y_new + height / 2
-
- # Calculate projected coordinates after alignment
- x1 = x_new - (facial_area[2] - facial_area[0]) / 2
- y1 = y_new - (facial_area[3] - facial_area[1]) / 2
- x2 = x_new + (facial_area[2] - facial_area[0]) / 2
- y2 = y_new + (facial_area[3] - facial_area[1]) / 2
-
- # validate projected coordinates are in image's boundaries
- x1 = max(int(x1), 0)
- y1 = max(int(y1), 0)
- x2 = min(int(x2), weight)
- y2 = min(int(y2), height)
-
- return (x1, y1, x2, y2)
diff --git a/deepface/modules/modeling.py b/deepface/modules/modeling.py
deleted file mode 100644
index c097c92..0000000
--- a/deepface/modules/modeling.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# built-in dependencies
-from typing import Any
-
-# project dependencies
-from deepface.models.facial_recognition import (
- VGGFace,
- OpenFace,
- FbDeepFace,
- DeepID,
- ArcFace,
- SFace,
- Dlib,
- Facenet,
- GhostFaceNet,
-)
-from deepface.models.face_detection import (
- FastMtCnn,
- MediaPipe,
- MtCnn,
- OpenCv,
- Dlib as DlibDetector,
- RetinaFace,
- Ssd,
- Yolo,
- YuNet,
- CenterFace,
-)
-from deepface.models.demography import Age, Gender, Race, Emotion
-from deepface.models.spoofing import FasNet
-
-
-def build_model(task: str, model_name: str) -> Any:
- """
- This function loads a pre-trained models as singletonish way
- Parameters:
- task (str): facial_recognition, facial_attribute, face_detector, spoofing
- model_name (str): model identifier
- - VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib,
- ArcFace, SFace, GhostFaceNet for face recognition
- - Age, Gender, Emotion, Race for facial attributes
- - opencv, mtcnn, ssd, dlib, retinaface, mediapipe, yolov8, yunet,
- fastmtcnn or centerface for face detectors
- - Fasnet for spoofing
- Returns:
- built model class
- """
-
- # singleton design pattern
- global cached_models
-
- models = {
- "facial_recognition": {
- "VGG-Face": VGGFace.VggFaceClient,
- "OpenFace": OpenFace.OpenFaceClient,
- "Facenet": Facenet.FaceNet128dClient,
- "Facenet512": Facenet.FaceNet512dClient,
- "DeepFace": FbDeepFace.DeepFaceClient,
- "DeepID": DeepID.DeepIdClient,
- "Dlib": Dlib.DlibClient,
- "ArcFace": ArcFace.ArcFaceClient,
- "SFace": SFace.SFaceClient,
- "GhostFaceNet": GhostFaceNet.GhostFaceNetClient,
- },
- "spoofing": {
- "Fasnet": FasNet.Fasnet,
- },
- "facial_attribute": {
- "Emotion": Emotion.EmotionClient,
- "Age": Age.ApparentAgeClient,
- "Gender": Gender.GenderClient,
- "Race": Race.RaceClient,
- },
- "face_detector": {
- "opencv": OpenCv.OpenCvClient,
- "mtcnn": MtCnn.MtCnnClient,
- "ssd": Ssd.SsdClient,
- "dlib": DlibDetector.DlibClient,
- "retinaface": RetinaFace.RetinaFaceClient,
- "mediapipe": MediaPipe.MediaPipeClient,
- "yolov8": Yolo.YoloClient,
- "yunet": YuNet.YuNetClient,
- "fastmtcnn": FastMtCnn.FastMtCnnClient,
- "centerface": CenterFace.CenterFaceClient,
- },
- }
-
- if models.get(task) is None:
- raise ValueError(f"unimplemented task - {task}")
-
- if not "cached_models" in globals():
- cached_models = {current_task: {} for current_task in models.keys()}
-
- if cached_models[task].get(model_name) is None:
- model = models[task].get(model_name)
- if model:
- cached_models[task][model_name] = model()
- else:
- raise ValueError(f"Invalid model_name passed - {task}/{model_name}")
-
- return cached_models[task][model_name]
diff --git a/deepface/modules/preprocessing.py b/deepface/modules/preprocessing.py
deleted file mode 100644
index 459adba..0000000
--- a/deepface/modules/preprocessing.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# built-in dependencies
-from typing import Tuple
-
-# 3rd party
-import numpy as np
-import cv2
-
-# project dependencies
-from deepface.commons import package_utils
-
-
-tf_major_version = package_utils.get_tf_major_version()
-if tf_major_version == 1:
- from keras.preprocessing import image
-elif tf_major_version == 2:
- from tensorflow.keras.preprocessing import image
-
-
-def normalize_input(img: np.ndarray, normalization: str = "base") -> np.ndarray:
- """Normalize input image.
-
- Args:
- img (numpy array): the input image.
- normalization (str, optional): the normalization technique. Defaults to "base",
- for no normalization.
-
- Returns:
- numpy array: the normalized image.
- """
-
- # issue 131 declares that some normalization techniques improves the accuracy
-
- if normalization == "base":
- return img
-
- # @trevorgribble and @davedgd contributed this feature
- # restore input in scale of [0, 255] because it was normalized in scale of
- # [0, 1] in preprocess_face
- img *= 255
-
- if normalization == "raw":
- pass # return just restored pixels
-
- elif normalization == "Facenet":
- mean, std = img.mean(), img.std()
- img = (img - mean) / std
-
- elif normalization == "Facenet2018":
- # simply / 127.5 - 1 (similar to facenet 2018 model preprocessing step as @iamrishab posted)
- img /= 127.5
- img -= 1
-
- elif normalization == "VGGFace":
- # mean subtraction based on VGGFace1 training data
- img[..., 0] -= 93.5940
- img[..., 1] -= 104.7624
- img[..., 2] -= 129.1863
-
- elif normalization == "VGGFace2":
- # mean subtraction based on VGGFace2 training data
- img[..., 0] -= 91.4953
- img[..., 1] -= 103.8827
- img[..., 2] -= 131.0912
-
- elif normalization == "ArcFace":
- # Reference study: The faces are cropped and resized to 112×112,
- # and each pixel (ranged between [0, 255]) in RGB images is normalised
- # by subtracting 127.5 then divided by 128.
- img -= 127.5
- img /= 128
- else:
- raise ValueError(f"unimplemented normalization type - {normalization}")
-
- return img
-
-
-def resize_image(img: np.ndarray, target_size: Tuple[int, int]) -> np.ndarray:
- """
- Resize an image to expected size of a ml model with adding black pixels.
- Args:
- img (np.ndarray): pre-loaded image as numpy array
- target_size (tuple): input shape of ml model
- Returns:
- img (np.ndarray): resized input image
- """
- factor_0 = target_size[0] / img.shape[0]
- factor_1 = target_size[1] / img.shape[1]
- factor = min(factor_0, factor_1)
-
- dsize = (
- int(img.shape[1] * factor),
- int(img.shape[0] * factor),
- )
- img = cv2.resize(img, dsize)
-
- diff_0 = target_size[0] - img.shape[0]
- diff_1 = target_size[1] - img.shape[1]
-
- # Put the base image in the middle of the padded image
- img = np.pad(
- img,
- (
- (diff_0 // 2, diff_0 - diff_0 // 2),
- (diff_1 // 2, diff_1 - diff_1 // 2),
- (0, 0),
- ),
- "constant",
- )
-
- # double check: if target image is not still the same size with target.
- if img.shape[0:2] != target_size:
- img = cv2.resize(img, target_size)
-
- # make it 4-dimensional how ML models expect
- img = image.img_to_array(img)
- img = np.expand_dims(img, axis=0)
-
- if img.max() > 1:
- img = (img.astype(np.float32) / 255.0).astype(np.float32)
-
- return img
diff --git a/deepface/modules/recognition.py b/deepface/modules/recognition.py
deleted file mode 100644
index 799dfbc..0000000
--- a/deepface/modules/recognition.py
+++ /dev/null
@@ -1,417 +0,0 @@
-# built-in dependencies
-import os
-import pickle
-from typing import List, Union, Optional, Dict, Any, Set
-import time
-
-# 3rd party dependencies
-import numpy as np
-import pandas as pd
-from tqdm import tqdm
-
-# project dependencies
-from deepface.commons import image_utils
-from deepface.modules import representation, detection, verification
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-
-def find(
- img_path: Union[str, np.ndarray],
- db_path: str,
- model_name: str = "VGG-Face",
- distance_metric: str = "cosine",
- enforce_detection: bool = True,
- detector_backend: str = "opencv",
- align: bool = True,
- expand_percentage: int = 0,
- threshold: Optional[float] = None,
- normalization: str = "base",
- silent: bool = False,
- refresh_database: bool = True,
- anti_spoofing: bool = False,
-) -> List[pd.DataFrame]:
- """
- Identify individuals in a database
-
- Args:
- img_path (str or np.ndarray): The exact path to the image, a numpy array in BGR format,
- or a base64 encoded image. If the source image contains multiple faces, the result will
- include information for each detected face.
-
- db_path (string): Path to the folder containing image files. All detected faces
- in the database will be considered in the decision-making process.
-
- model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
- OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
-
- distance_metric (string): Metric for measuring similarity. Options: 'cosine',
- 'euclidean', 'euclidean_l2'.
-
- enforce_detection (boolean): If no face is detected in an image, raise an exception.
- Default is True. Set to False to avoid the exception for low-resolution images.
-
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'.
-
- align (boolean): Perform alignment based on the eye positions.
-
- expand_percentage (int): expand detected facial area with a percentage (default is 0).
-
- threshold (float): Specify a threshold to determine whether a pair represents the same
- person or different individuals. This threshold is used for comparing distances.
- If left unset, default pre-tuned threshold values will be applied based on the specified
- model name and distance metric (default is None).
-
- normalization (string): Normalize the input image before feeding it to the model.
- Default is base. Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace
-
- silent (boolean): Suppress or allow some log messages for a quieter analysis process.
-
- refresh_database (boolean): Synchronizes the images representation (pkl) file with the
- directory/db files, if set to false, it will ignore any file changes inside the db_path
- directory (default is True).
-
- anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
-
-
- Returns:
- results (List[pd.DataFrame]): A list of pandas dataframes. Each dataframe corresponds
- to the identity information for an individual detected in the source image.
- The DataFrame columns include:
-
- - 'identity': Identity label of the detected individual.
-
- - 'target_x', 'target_y', 'target_w', 'target_h': Bounding box coordinates of the
- target face in the database.
-
- - 'source_x', 'source_y', 'source_w', 'source_h': Bounding box coordinates of the
- detected face in the source image.
-
- - 'threshold': threshold to determine a pair whether same person or different persons
-
- - 'distance': Similarity score between the faces based on the
- specified model and distance metric
- """
-
- tic = time.time()
-
- if not os.path.isdir(db_path):
- raise ValueError(f"Passed path {db_path} does not exist!")
-
- img, _ = image_utils.load_image(img_path)
- if img is None:
- raise ValueError(f"Passed image path {img_path} does not exist!")
-
- file_parts = [
- "ds",
- "model",
- model_name,
- "detector",
- detector_backend,
- "aligned" if align else "unaligned",
- "normalization",
- normalization,
- "expand",
- str(expand_percentage),
- ]
-
- file_name = "_".join(file_parts) + ".pkl"
- file_name = file_name.replace("-", "").lower()
-
- datastore_path = os.path.join(db_path, file_name)
- representations = []
-
- # required columns for representations
- df_cols = [
- "identity",
- "hash",
- "embedding",
- "target_x",
- "target_y",
- "target_w",
- "target_h",
- ]
-
- # Ensure the proper pickle file exists
- if not os.path.exists(datastore_path):
- with open(datastore_path, "wb") as f:
- pickle.dump([], f)
-
- # Load the representations from the pickle file
- with open(datastore_path, "rb") as f:
- representations = pickle.load(f)
-
- # check each item of representations list has required keys
- for i, current_representation in enumerate(representations):
- missing_keys = set(df_cols) - set(current_representation.keys())
- if len(missing_keys) > 0:
- raise ValueError(
- f"{i}-th item does not have some required keys - {missing_keys}."
- f"Consider to delete {datastore_path}"
- )
-
- # embedded images
- pickled_images = [representation["identity"] for representation in representations]
-
- # Get the list of images on storage
- storage_images = image_utils.list_images(path=db_path)
-
- if len(storage_images) == 0 and refresh_database is True:
- raise ValueError(f"No item found in {db_path}")
- if len(representations) == 0 and refresh_database is False:
- raise ValueError(f"Nothing is found in {datastore_path}")
-
- must_save_pickle = False
- new_images, old_images, replaced_images = set(), set(), set()
-
- if not refresh_database:
- logger.info(
- f"Could be some changes in {db_path} not tracked."
- "Set refresh_database to true to assure that any changes will be tracked."
- )
-
- # Enforce data consistency amongst on disk images and pickle file
- if refresh_database:
- new_images = set(storage_images) - set(pickled_images) # images added to storage
- old_images = set(pickled_images) - set(storage_images) # images removed from storage
-
- # detect replaced images
- for current_representation in representations:
- identity = current_representation["identity"]
- if identity in old_images:
- continue
- alpha_hash = current_representation["hash"]
- beta_hash = image_utils.find_image_hash(identity)
- if alpha_hash != beta_hash:
- logger.debug(f"Even though {identity} represented before, it's replaced later.")
- replaced_images.add(identity)
-
- if not silent and (len(new_images) > 0 or len(old_images) > 0 or len(replaced_images) > 0):
- logger.info(
- f"Found {len(new_images)} newly added image(s)"
- f", {len(old_images)} removed image(s)"
- f", {len(replaced_images)} replaced image(s)."
- )
-
- # append replaced images into both old and new images. these will be dropped and re-added.
- new_images.update(replaced_images)
- old_images.update(replaced_images)
-
- # remove old images first
- if len(old_images) > 0:
- representations = [rep for rep in representations if rep["identity"] not in old_images]
- must_save_pickle = True
-
- # find representations for new images
- if len(new_images) > 0:
- representations += __find_bulk_embeddings(
- employees=new_images,
- model_name=model_name,
- detector_backend=detector_backend,
- enforce_detection=enforce_detection,
- align=align,
- expand_percentage=expand_percentage,
- normalization=normalization,
- silent=silent,
- ) # add new images
- must_save_pickle = True
-
- if must_save_pickle:
- with open(datastore_path, "wb") as f:
- pickle.dump(representations, f)
- if not silent:
- logger.info(f"There are now {len(representations)} representations in {file_name}")
-
- # Should we have no representations bailout
- if len(representations) == 0:
- if not silent:
- toc = time.time()
- logger.info(f"find function duration {toc - tic} seconds")
- return []
-
- # ----------------------------
- # now, we got representations for facial database
- df = pd.DataFrame(representations)
-
- if silent is False:
- logger.info(f"Searching {img_path} in {df.shape[0]} length datastore")
-
- # img path might have more than once face
- source_objs = detection.extract_faces(
- img_path=img_path,
- detector_backend=detector_backend,
- grayscale=False,
- enforce_detection=enforce_detection,
- align=align,
- expand_percentage=expand_percentage,
- anti_spoofing=anti_spoofing,
- )
-
- resp_obj = []
-
- for source_obj in source_objs:
- if anti_spoofing is True and source_obj.get("is_real", True) is False:
- raise ValueError("Spoof detected in the given image.")
- source_img = source_obj["face"]
- source_region = source_obj["facial_area"]
- target_embedding_obj = representation.represent(
- img_path=source_img,
- model_name=model_name,
- enforce_detection=enforce_detection,
- detector_backend="skip",
- align=align,
- normalization=normalization,
- )
-
- target_representation = target_embedding_obj[0]["embedding"]
-
- result_df = df.copy() # df will be filtered in each img
- result_df["source_x"] = source_region["x"]
- result_df["source_y"] = source_region["y"]
- result_df["source_w"] = source_region["w"]
- result_df["source_h"] = source_region["h"]
-
- distances = []
- for _, instance in df.iterrows():
- source_representation = instance["embedding"]
- if source_representation is None:
- distances.append(float("inf")) # no representation for this image
- continue
-
- target_dims = len(list(target_representation))
- source_dims = len(list(source_representation))
- if target_dims != source_dims:
- raise ValueError(
- "Source and target embeddings must have same dimensions but "
- + f"{target_dims}:{source_dims}. Model structure may change"
- + " after pickle created. Delete the {file_name} and re-run."
- )
-
- distance = verification.find_distance(
- source_representation, target_representation, distance_metric
- )
-
- distances.append(distance)
-
- # ---------------------------
- target_threshold = threshold or verification.find_threshold(model_name, distance_metric)
-
- result_df["threshold"] = target_threshold
- result_df["distance"] = distances
-
- result_df = result_df.drop(columns=["embedding"])
- # pylint: disable=unsubscriptable-object
- result_df = result_df[result_df["distance"] <= target_threshold]
- result_df = result_df.sort_values(by=["distance"], ascending=True).reset_index(drop=True)
-
- resp_obj.append(result_df)
-
- # -----------------------------------
-
- if not silent:
- toc = time.time()
- logger.info(f"find function duration {toc - tic} seconds")
-
- return resp_obj
-
-
-def __find_bulk_embeddings(
- employees: Set[str],
- model_name: str = "VGG-Face",
- detector_backend: str = "opencv",
- enforce_detection: bool = True,
- align: bool = True,
- expand_percentage: int = 0,
- normalization: str = "base",
- silent: bool = False,
-) -> List[Dict["str", Any]]:
- """
- Find embeddings of a list of images
-
- Args:
- employees (list): list of exact image paths
-
- model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
- OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
-
- detector_backend (str): face detector model name
-
- enforce_detection (bool): set this to False if you
- want to proceed when you cannot detect any face
-
- align (bool): enable or disable alignment of image
- before feeding to facial recognition model
-
- expand_percentage (int): expand detected facial area with a
- percentage (default is 0).
-
- normalization (bool): normalization technique
-
- silent (bool): enable or disable informative logging
- Returns:
- representations (list): pivot list of dict with
- image name, hash, embedding and detected face area's coordinates
- """
- representations = []
- for employee in tqdm(
- employees,
- desc="Finding representations",
- disable=silent,
- ):
- file_hash = image_utils.find_image_hash(employee)
-
- try:
- img_objs = detection.extract_faces(
- img_path=employee,
- detector_backend=detector_backend,
- grayscale=False,
- enforce_detection=enforce_detection,
- align=align,
- expand_percentage=expand_percentage,
- )
-
- except ValueError as err:
- logger.error(f"Exception while extracting faces from {employee}: {str(err)}")
- img_objs = []
-
- if len(img_objs) == 0:
- representations.append(
- {
- "identity": employee,
- "hash": file_hash,
- "embedding": None,
- "target_x": 0,
- "target_y": 0,
- "target_w": 0,
- "target_h": 0,
- }
- )
- else:
- for img_obj in img_objs:
- img_content = img_obj["face"]
- img_region = img_obj["facial_area"]
- embedding_obj = representation.represent(
- img_path=img_content,
- model_name=model_name,
- enforce_detection=enforce_detection,
- detector_backend="skip",
- align=align,
- normalization=normalization,
- )
-
- img_representation = embedding_obj[0]["embedding"]
- representations.append(
- {
- "identity": employee,
- "hash": file_hash,
- "embedding": img_representation,
- "target_x": img_region["x"],
- "target_y": img_region["y"],
- "target_w": img_region["w"],
- "target_h": img_region["h"],
- }
- )
-
- return representations
diff --git a/deepface/modules/representation.py b/deepface/modules/representation.py
deleted file mode 100644
index a147640..0000000
--- a/deepface/modules/representation.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# built-in dependencies
-from typing import Any, Dict, List, Union, Optional
-
-# 3rd party dependencies
-import numpy as np
-
-# project dependencies
-from deepface.commons import image_utils
-from deepface.modules import modeling, detection, preprocessing
-from deepface.models.FacialRecognition import FacialRecognition
-
-
-def represent(
- img_path: Union[str, np.ndarray],
- model_name: str = "VGG-Face",
- enforce_detection: bool = True,
- detector_backend: str = "opencv",
- align: bool = True,
- expand_percentage: int = 0,
- normalization: str = "base",
- anti_spoofing: bool = False,
- max_faces: Optional[int] = None,
-) -> List[Dict[str, Any]]:
- """
- Represent facial images as multi-dimensional vector embeddings.
-
- Args:
- img_path (str or np.ndarray): The exact path to the image, a numpy array in BGR format,
- or a base64 encoded image. If the source image contains multiple faces, the result will
- include information for each detected face.
-
- model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
- OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet
-
- enforce_detection (boolean): If no face is detected in an image, raise an exception.
- Default is True. Set to False to avoid the exception for low-resolution images.
-
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'.
-
- align (boolean): Perform alignment based on the eye positions.
-
- expand_percentage (int): expand detected facial area with a percentage (default is 0).
-
- normalization (string): Normalize the input image before feeding it to the model.
- Default is base. Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace
-
- anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
-
- max_faces (int): Set a limit on the number of faces to be processed (default is None).
-
- Returns:
- results (List[Dict[str, Any]]): A list of dictionaries, each containing the
- following fields:
-
- - embedding (List[float]): Multidimensional vector representing facial features.
- The number of dimensions varies based on the reference model
- (e.g., FaceNet returns 128 dimensions, VGG-Face returns 4096 dimensions).
- - facial_area (dict): Detected facial area by face detection in dictionary format.
- Contains 'x' and 'y' as the left-corner point, and 'w' and 'h'
- as the width and height. If `detector_backend` is set to 'skip', it represents
- the full image area and is nonsensical.
- - face_confidence (float): Confidence score of face detection. If `detector_backend` is set
- to 'skip', the confidence will be 0 and is nonsensical.
- """
- resp_objs = []
-
- model: FacialRecognition = modeling.build_model(
- task="facial_recognition", model_name=model_name
- )
-
- # ---------------------------------
- # we have run pre-process in verification. so, this can be skipped if it is coming from verify.
- target_size = model.input_shape
- if detector_backend != "skip":
- img_objs = detection.extract_faces(
- img_path=img_path,
- detector_backend=detector_backend,
- grayscale=False,
- enforce_detection=enforce_detection,
- align=align,
- expand_percentage=expand_percentage,
- anti_spoofing=anti_spoofing,
- max_faces=max_faces,
- )
- else: # skip
- # Try load. If load error, will raise exception internal
- img, _ = image_utils.load_image(img_path)
-
- if len(img.shape) != 3:
- raise ValueError(f"Input img must be 3 dimensional but it is {img.shape}")
-
- # make dummy region and confidence to keep compatibility with `extract_faces`
- img_objs = [
- {
- "face": img,
- "facial_area": {"x": 0, "y": 0, "w": img.shape[0], "h": img.shape[1]},
- "confidence": 0,
- }
- ]
- # ---------------------------------
-
- if max_faces is not None and max_faces < len(img_objs):
- # sort as largest facial areas come first
- img_objs = sorted(
- img_objs,
- key=lambda img_obj: img_obj["facial_area"]["w"] * img_obj["facial_area"]["h"],
- reverse=True,
- )
- # discard rest of the items
- img_objs = img_objs[0:max_faces]
-
- for img_obj in img_objs:
- if anti_spoofing is True and img_obj.get("is_real", True) is False:
- raise ValueError("Spoof detected in the given image.")
- img = img_obj["face"]
-
- # rgb to bgr
- img = img[:, :, ::-1]
-
- region = img_obj["facial_area"]
- confidence = img_obj["confidence"]
-
- # resize to expected shape of ml model
- img = preprocessing.resize_image(
- img=img,
- # thanks to DeepId (!)
- target_size=(target_size[1], target_size[0]),
- )
-
- # custom normalization
- img = preprocessing.normalize_input(img=img, normalization=normalization)
-
- embedding = model.forward(img)
-
- resp_objs.append(
- {
- "embedding": embedding,
- "facial_area": region,
- "face_confidence": confidence,
- }
- )
-
- return resp_objs
diff --git a/deepface/modules/streaming.py b/deepface/modules/streaming.py
deleted file mode 100644
index c1a0363..0000000
--- a/deepface/modules/streaming.py
+++ /dev/null
@@ -1,1006 +0,0 @@
-# built-in dependencies
-import os
-import time
-from typing import List, Tuple, Optional
-import traceback
-
-# 3rd party dependencies
-import numpy as np
-import pandas as pd
-import cv2
-
-# project dependencies
-from deepface import DeepFace
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# dependency configuration
-os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
-
-
-IDENTIFIED_IMG_SIZE = 112
-TEXT_COLOR = (255, 255, 255)
-
-# pylint: disable=unused-variable
-def analysis(
- db_path: str,
- model_name="VGG-Face",
- detector_backend="opencv",
- distance_metric="cosine",
- enable_face_analysis=True,
- source=0,
- time_threshold=5,
- frame_threshold=5,
- anti_spoofing: bool = False,
-):
- """
- Run real time face recognition and facial attribute analysis
-
- Args:
- db_path (string): Path to the folder containing image files. All detected faces
- in the database will be considered in the decision-making process.
-
- model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
- OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
-
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
- (default is opencv).
-
- distance_metric (string): Metric for measuring similarity. Options: 'cosine',
- 'euclidean', 'euclidean_l2' (default is cosine).
-
- enable_face_analysis (bool): Flag to enable face analysis (default is True).
-
- source (Any): The source for the video stream (default is 0, which represents the
- default camera).
-
- time_threshold (int): The time threshold (in seconds) for face recognition (default is 5).
-
- frame_threshold (int): The frame threshold for face recognition (default is 5).
-
- anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
-
- Returns:
- None
- """
- # initialize models
- build_demography_models(enable_face_analysis=enable_face_analysis)
- build_facial_recognition_model(model_name=model_name)
- # call a dummy find function for db_path once to create embeddings before starting webcam
- _ = search_identity(
- detected_face=np.zeros([224, 224, 3]),
- db_path=db_path,
- detector_backend=detector_backend,
- distance_metric=distance_metric,
- model_name=model_name,
- )
-
- freezed_img = None
- freeze = False
- num_frames_with_faces = 0
- tic = time.time()
-
- cap = cv2.VideoCapture(source) # webcam
- while True:
- has_frame, img = cap.read()
- if not has_frame:
- break
-
- # we are adding some figures into img such as identified facial image, age, gender
- # that is why, we need raw image itself to make analysis
- raw_img = img.copy()
-
- faces_coordinates = []
- if freeze is False:
- faces_coordinates = grab_facial_areas(
- img=img, detector_backend=detector_backend, anti_spoofing=anti_spoofing
- )
-
- # we will pass img to analyze modules (identity, demography) and add some illustrations
- # that is why, we will not be able to extract detected face from img clearly
- detected_faces = extract_facial_areas(img=img, faces_coordinates=faces_coordinates)
-
- img = highlight_facial_areas(img=img, faces_coordinates=faces_coordinates)
- img = countdown_to_freeze(
- img=img,
- faces_coordinates=faces_coordinates,
- frame_threshold=frame_threshold,
- num_frames_with_faces=num_frames_with_faces,
- )
-
- num_frames_with_faces = num_frames_with_faces + 1 if len(faces_coordinates) else 0
-
- freeze = num_frames_with_faces > 0 and num_frames_with_faces % frame_threshold == 0
- if freeze:
- # add analyze results into img - derive from raw_img
- img = highlight_facial_areas(
- img=raw_img, faces_coordinates=faces_coordinates, anti_spoofing=anti_spoofing
- )
-
- # age, gender and emotion analysis
- img = perform_demography_analysis(
- enable_face_analysis=enable_face_analysis,
- img=raw_img,
- faces_coordinates=faces_coordinates,
- detected_faces=detected_faces,
- )
- # facial recogntion analysis
- img = perform_facial_recognition(
- img=img,
- faces_coordinates=faces_coordinates,
- detected_faces=detected_faces,
- db_path=db_path,
- detector_backend=detector_backend,
- distance_metric=distance_metric,
- model_name=model_name,
- )
-
- # freeze the img after analysis
- freezed_img = img.copy()
-
- # start counter for freezing
- tic = time.time()
- logger.info("freezed")
-
- elif freeze is True and time.time() - tic > time_threshold:
- freeze = False
- freezed_img = None
- # reset counter for freezing
- tic = time.time()
- logger.info("freeze released")
-
- freezed_img = countdown_to_release(img=freezed_img, tic=tic, time_threshold=time_threshold)
-
- cv2.imshow("img", img if freezed_img is None else freezed_img)
-
- if cv2.waitKey(1) & 0xFF == ord("q"): # press q to quit
- break
-
- # kill open cv things
- cap.release()
- cv2.destroyAllWindows()
-
-
-def build_facial_recognition_model(model_name: str) -> None:
- """
- Build facial recognition model
- Args:
- model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
- OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
- Returns
- input_shape (tuple): input shape of given facial recognitio n model.
- """
- _ = DeepFace.build_model(task="facial_recognition", model_name=model_name)
- logger.info(f"{model_name} is built")
-
-
-def search_identity(
- detected_face: np.ndarray,
- db_path: str,
- model_name: str,
- detector_backend: str,
- distance_metric: str,
-) -> Tuple[Optional[str], Optional[np.ndarray]]:
- """
- Search an identity in facial database.
- Args:
- detected_face (np.ndarray): extracted individual facial image
- db_path (string): Path to the folder containing image files. All detected faces
- in the database will be considered in the decision-making process.
- model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
- OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
- (default is opencv).
- distance_metric (string): Metric for measuring similarity. Options: 'cosine',
- 'euclidean', 'euclidean_l2' (default is cosine).
- Returns:
- result (tuple): result consisting of following objects
- identified image path (str)
- identified image itself (np.ndarray)
- """
- target_path = None
- try:
- dfs = DeepFace.find(
- img_path=detected_face,
- db_path=db_path,
- model_name=model_name,
- detector_backend=detector_backend,
- distance_metric=distance_metric,
- enforce_detection=False,
- silent=True,
- )
- except ValueError as err:
- if f"No item found in {db_path}" in str(err):
- logger.warn(
- f"No item is found in {db_path}."
- "So, no facial recognition analysis will be performed."
- )
- dfs = []
- else:
- raise err
- if len(dfs) == 0:
- # you may consider to return unknown person's image here
- return None, None
-
- # detected face is coming from parent, safe to access 1st index
- df = dfs[0]
-
- if df.shape[0] == 0:
- return None, None
-
- candidate = df.iloc[0]
- target_path = candidate["identity"]
- logger.info(f"Hello, {target_path}")
-
- # load found identity image - extracted if possible
- target_objs = DeepFace.extract_faces(
- img_path=target_path,
- detector_backend=detector_backend,
- enforce_detection=False,
- align=True,
- )
-
- # extract facial area of the identified image if and only if it has one face
- # otherwise, show image as is
- if len(target_objs) == 1:
- # extract 1st item directly
- target_obj = target_objs[0]
- target_img = target_obj["face"]
- target_img *= 255
- target_img = target_img[:, :, ::-1]
- else:
- target_img = cv2.imread(target_path)
-
- # resize anyway
- target_img = cv2.resize(target_img, (IDENTIFIED_IMG_SIZE, IDENTIFIED_IMG_SIZE))
-
- return target_path.split("/")[-1], target_img
-
-
-def build_demography_models(enable_face_analysis: bool) -> None:
- """
- Build demography analysis models
- Args:
- enable_face_analysis (bool): Flag to enable face analysis (default is True).
- Returns:
- None
- """
- if enable_face_analysis is False:
- return
- DeepFace.build_model(task="facial_attribute", model_name="Age")
- logger.info("Age model is just built")
- DeepFace.build_model(task="facial_attribute", model_name="Gender")
- logger.info("Gender model is just built")
- DeepFace.build_model(task="facial_attribute", model_name="Emotion")
- logger.info("Emotion model is just built")
-
-
-def highlight_facial_areas(
- img: np.ndarray,
- faces_coordinates: List[Tuple[int, int, int, int, bool, float]],
- anti_spoofing: bool = False,
-) -> np.ndarray:
- """
- Highlight detected faces with rectangles in the given image
- Args:
- img (np.ndarray): image itself
- faces_coordinates (list): list of face coordinates as tuple with x, y, w and h
- also is_real and antispoof_score keys
- anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
- Returns:
- img (np.ndarray): image with highlighted facial areas
- """
- for x, y, w, h, is_real, antispoof_score in faces_coordinates:
- # highlight facial area with rectangle
-
- if anti_spoofing is False:
- color = (67, 67, 67)
- else:
- if is_real is True:
- color = (0, 255, 0)
- else:
- color = (0, 0, 255)
- cv2.rectangle(img, (x, y), (x + w, y + h), color, 1)
- return img
-
-
-def countdown_to_freeze(
- img: np.ndarray,
- faces_coordinates: List[Tuple[int, int, int, int, bool, float]],
- frame_threshold: int,
- num_frames_with_faces: int,
-) -> np.ndarray:
- """
- Highlight time to freeze in the image's facial areas
- Args:
- img (np.ndarray): image itself
- faces_coordinates (list): list of face coordinates as tuple with x, y, w and h
- frame_threshold (int): how many sequantial frames required with face(s) to freeze
- num_frames_with_faces (int): how many sequantial frames do we have with face(s)
- Returns:
- img (np.ndarray): image with counter values
- """
- for x, y, w, h, is_real, antispoof_score in faces_coordinates:
- cv2.putText(
- img,
- str(frame_threshold - (num_frames_with_faces % frame_threshold)),
- (int(x + w / 4), int(y + h / 1.5)),
- cv2.FONT_HERSHEY_SIMPLEX,
- 4,
- (255, 255, 255),
- 2,
- )
- return img
-
-
-def countdown_to_release(
- img: Optional[np.ndarray], tic: float, time_threshold: int
-) -> Optional[np.ndarray]:
- """
- Highlight time to release the freezing in the image top left area
- Args:
- img (np.ndarray): image itself
- tic (float): time specifying when freezing started
- time_threshold (int): freeze time threshold
- Returns:
- img (np.ndarray): image with time to release the freezing
- """
- # do not take any action if it is not frozen yet
- if img is None:
- return img
- toc = time.time()
- time_left = int(time_threshold - (toc - tic) + 1)
- cv2.rectangle(img, (10, 10), (90, 50), (67, 67, 67), -10)
- cv2.putText(
- img,
- str(time_left),
- (40, 40),
- cv2.FONT_HERSHEY_SIMPLEX,
- 1,
- (255, 255, 255),
- 1,
- )
- return img
-
-
-def grab_facial_areas(
- img: np.ndarray, detector_backend: str, threshold: int = 130, anti_spoofing: bool = False
-) -> List[Tuple[int, int, int, int, bool, float]]:
- """
- Find facial area coordinates in the given image
- Args:
- img (np.ndarray): image itself
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
- (default is opencv).
- threshold (int): threshold for facial area, discard smaller ones
- Returns
- result (list): list of tuple with x, y, w and h coordinates
- """
- try:
- face_objs = DeepFace.extract_faces(
- img_path=img,
- detector_backend=detector_backend,
- # you may consider to extract with larger expanding value
- expand_percentage=0,
- anti_spoofing=anti_spoofing,
- )
- faces = [
- (
- face_obj["facial_area"]["x"],
- face_obj["facial_area"]["y"],
- face_obj["facial_area"]["w"],
- face_obj["facial_area"]["h"],
- face_obj.get("is_real", True),
- face_obj.get("antispoof_score", 0),
- )
- for face_obj in face_objs
- if face_obj["facial_area"]["w"] > threshold
- ]
- return faces
- except: # to avoid exception if no face detected
- return []
-
-
-def extract_facial_areas(
- img: np.ndarray, faces_coordinates: List[Tuple[int, int, int, int, bool, float]]
-) -> List[np.ndarray]:
- """
- Extract facial areas as numpy array from given image
- Args:
- img (np.ndarray): image itself
- faces_coordinates (list): list of facial area coordinates as tuple with
- x, y, w and h values also is_real and antispoof_score keys
- Returns:
- detected_faces (list): list of detected facial area images
- """
- detected_faces = []
- for x, y, w, h, is_real, antispoof_score in faces_coordinates:
- detected_face = img[int(y) : int(y + h), int(x) : int(x + w)]
- detected_faces.append(detected_face)
- return detected_faces
-
-
-def perform_facial_recognition(
- img: np.ndarray,
- detected_faces: List[np.ndarray],
- faces_coordinates: List[Tuple[int, int, int, int, bool, float]],
- db_path: str,
- detector_backend: str,
- distance_metric: str,
- model_name: str,
-) -> np.ndarray:
- """
- Perform facial recognition
- Args:
- img (np.ndarray): image itself
- detected_faces (list): list of extracted detected face images as numpy
- faces_coordinates (list): list of facial area coordinates as tuple with
- x, y, w and h values also is_real and antispoof_score keys
- db_path (string): Path to the folder containing image files. All detected faces
- in the database will be considered in the decision-making process.
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
- (default is opencv).
- distance_metric (string): Metric for measuring similarity. Options: 'cosine',
- 'euclidean', 'euclidean_l2' (default is cosine).
- model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
- OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
- Returns:
- img (np.ndarray): image with identified face informations
- """
- for idx, (x, y, w, h, is_real, antispoof_score) in enumerate(faces_coordinates):
- detected_face = detected_faces[idx]
- target_label, target_img = search_identity(
- detected_face=detected_face,
- db_path=db_path,
- detector_backend=detector_backend,
- distance_metric=distance_metric,
- model_name=model_name,
- )
- if target_label is None:
- continue
-
- img = overlay_identified_face(
- img=img,
- target_img=target_img,
- label=target_label,
- x=x,
- y=y,
- w=w,
- h=h,
- )
-
- return img
-
-
-def perform_demography_analysis(
- enable_face_analysis: bool,
- img: np.ndarray,
- faces_coordinates: List[Tuple[int, int, int, int, bool, float]],
- detected_faces: List[np.ndarray],
-) -> np.ndarray:
- """
- Perform demography analysis on given image
- Args:
- enable_face_analysis (bool): Flag to enable face analysis.
- img (np.ndarray): image itself
- faces_coordinates (list): list of face coordinates as tuple with
- x, y, w and h values also is_real and antispoof_score keys
- detected_faces (list): list of extracted detected face images as numpy
- Returns:
- img (np.ndarray): image with analyzed demography information
- """
- if enable_face_analysis is False:
- return img
- for idx, (x, y, w, h, is_real, antispoof_score) in enumerate(faces_coordinates):
- detected_face = detected_faces[idx]
- demographies = DeepFace.analyze(
- img_path=detected_face,
- actions=("age", "gender", "emotion"),
- detector_backend="skip",
- enforce_detection=False,
- silent=True,
- )
-
- if len(demographies) == 0:
- continue
-
- # safe to access 1st index because detector backend is skip
- demography = demographies[0]
-
- img = overlay_emotion(img=img, emotion_probas=demography["emotion"], x=x, y=y, w=w, h=h)
- img = overlay_age_gender(
- img=img,
- apparent_age=demography["age"],
- gender=demography["dominant_gender"][0:1], # M or W
- x=x,
- y=y,
- w=w,
- h=h,
- )
- return img
-
-
-def overlay_identified_face(
- img: np.ndarray,
- target_img: np.ndarray,
- label: str,
- x: int,
- y: int,
- w: int,
- h: int,
-) -> np.ndarray:
- """
- Overlay the identified face onto image itself
- Args:
- img (np.ndarray): image itself
- target_img (np.ndarray): identified face's image
- label (str): name of the identified face
- x (int): x coordinate of the face on the given image
- y (int): y coordinate of the face on the given image
- w (int): w coordinate of the face on the given image
- h (int): h coordinate of the face on the given image
- Returns:
- img (np.ndarray): image with overlayed identity
- """
- try:
- if y - IDENTIFIED_IMG_SIZE > 0 and x + w + IDENTIFIED_IMG_SIZE < img.shape[1]:
- # top right
- img[
- y - IDENTIFIED_IMG_SIZE : y,
- x + w : x + w + IDENTIFIED_IMG_SIZE,
- ] = target_img
-
- overlay = img.copy()
- opacity = 0.4
- cv2.rectangle(
- img,
- (x + w, y),
- (x + w + IDENTIFIED_IMG_SIZE, y + 20),
- (46, 200, 255),
- cv2.FILLED,
- )
- cv2.addWeighted(
- overlay,
- opacity,
- img,
- 1 - opacity,
- 0,
- img,
- )
-
- cv2.putText(
- img,
- label,
- (x + w, y + 10),
- cv2.FONT_HERSHEY_SIMPLEX,
- 0.5,
- TEXT_COLOR,
- 1,
- )
-
- # connect face and text
- cv2.line(
- img,
- (x + int(w / 2), y),
- (x + 3 * int(w / 4), y - int(IDENTIFIED_IMG_SIZE / 2)),
- (67, 67, 67),
- 1,
- )
- cv2.line(
- img,
- (x + 3 * int(w / 4), y - int(IDENTIFIED_IMG_SIZE / 2)),
- (x + w, y - int(IDENTIFIED_IMG_SIZE / 2)),
- (67, 67, 67),
- 1,
- )
-
- elif y + h + IDENTIFIED_IMG_SIZE < img.shape[0] and x - IDENTIFIED_IMG_SIZE > 0:
- # bottom left
- img[
- y + h : y + h + IDENTIFIED_IMG_SIZE,
- x - IDENTIFIED_IMG_SIZE : x,
- ] = target_img
-
- overlay = img.copy()
- opacity = 0.4
- cv2.rectangle(
- img,
- (x - IDENTIFIED_IMG_SIZE, y + h - 20),
- (x, y + h),
- (46, 200, 255),
- cv2.FILLED,
- )
- cv2.addWeighted(
- overlay,
- opacity,
- img,
- 1 - opacity,
- 0,
- img,
- )
-
- cv2.putText(
- img,
- label,
- (x - IDENTIFIED_IMG_SIZE, y + h - 10),
- cv2.FONT_HERSHEY_SIMPLEX,
- 0.5,
- TEXT_COLOR,
- 1,
- )
-
- # connect face and text
- cv2.line(
- img,
- (x + int(w / 2), y + h),
- (
- x + int(w / 2) - int(w / 4),
- y + h + int(IDENTIFIED_IMG_SIZE / 2),
- ),
- (67, 67, 67),
- 1,
- )
- cv2.line(
- img,
- (
- x + int(w / 2) - int(w / 4),
- y + h + int(IDENTIFIED_IMG_SIZE / 2),
- ),
- (x, y + h + int(IDENTIFIED_IMG_SIZE / 2)),
- (67, 67, 67),
- 1,
- )
-
- elif y - IDENTIFIED_IMG_SIZE > 0 and x - IDENTIFIED_IMG_SIZE > 0:
- # top left
- img[y - IDENTIFIED_IMG_SIZE : y, x - IDENTIFIED_IMG_SIZE : x] = target_img
-
- overlay = img.copy()
- opacity = 0.4
- cv2.rectangle(
- img,
- (x - IDENTIFIED_IMG_SIZE, y),
- (x, y + 20),
- (46, 200, 255),
- cv2.FILLED,
- )
- cv2.addWeighted(
- overlay,
- opacity,
- img,
- 1 - opacity,
- 0,
- img,
- )
-
- cv2.putText(
- img,
- label,
- (x - IDENTIFIED_IMG_SIZE, y + 10),
- cv2.FONT_HERSHEY_SIMPLEX,
- 0.5,
- TEXT_COLOR,
- 1,
- )
-
- # connect face and text
- cv2.line(
- img,
- (x + int(w / 2), y),
- (
- x + int(w / 2) - int(w / 4),
- y - int(IDENTIFIED_IMG_SIZE / 2),
- ),
- (67, 67, 67),
- 1,
- )
- cv2.line(
- img,
- (
- x + int(w / 2) - int(w / 4),
- y - int(IDENTIFIED_IMG_SIZE / 2),
- ),
- (x, y - int(IDENTIFIED_IMG_SIZE / 2)),
- (67, 67, 67),
- 1,
- )
-
- elif (
- x + w + IDENTIFIED_IMG_SIZE < img.shape[1]
- and y + h + IDENTIFIED_IMG_SIZE < img.shape[0]
- ):
- # bottom righ
- img[
- y + h : y + h + IDENTIFIED_IMG_SIZE,
- x + w : x + w + IDENTIFIED_IMG_SIZE,
- ] = target_img
-
- overlay = img.copy()
- opacity = 0.4
- cv2.rectangle(
- img,
- (x + w, y + h - 20),
- (x + w + IDENTIFIED_IMG_SIZE, y + h),
- (46, 200, 255),
- cv2.FILLED,
- )
- cv2.addWeighted(
- overlay,
- opacity,
- img,
- 1 - opacity,
- 0,
- img,
- )
-
- cv2.putText(
- img,
- label,
- (x + w, y + h - 10),
- cv2.FONT_HERSHEY_SIMPLEX,
- 0.5,
- TEXT_COLOR,
- 1,
- )
-
- # connect face and text
- cv2.line(
- img,
- (x + int(w / 2), y + h),
- (
- x + int(w / 2) + int(w / 4),
- y + h + int(IDENTIFIED_IMG_SIZE / 2),
- ),
- (67, 67, 67),
- 1,
- )
- cv2.line(
- img,
- (
- x + int(w / 2) + int(w / 4),
- y + h + int(IDENTIFIED_IMG_SIZE / 2),
- ),
- (x + w, y + h + int(IDENTIFIED_IMG_SIZE / 2)),
- (67, 67, 67),
- 1,
- )
- else:
- logger.info("cannot put facial recognition info on the image")
- except Exception as err: # pylint: disable=broad-except
- logger.error(f"{str(err)} - {traceback.format_exc()}")
- return img
-
-
-def overlay_emotion(
- img: np.ndarray, emotion_probas: dict, x: int, y: int, w: int, h: int
-) -> np.ndarray:
- """
- Overlay the analyzed emotion of face onto image itself
- Args:
- img (np.ndarray): image itself
- emotion_probas (dict): probability of different emotionas dictionary
- x (int): x coordinate of the face on the given image
- y (int): y coordinate of the face on the given image
- w (int): w coordinate of the face on the given image
- h (int): h coordinate of the face on the given image
- Returns:
- img (np.ndarray): image with overlay emotion analsis results
- """
- emotion_df = pd.DataFrame(emotion_probas.items(), columns=["emotion", "score"])
- emotion_df = emotion_df.sort_values(by=["score"], ascending=False).reset_index(drop=True)
-
- # background of mood box
-
- # transparency
- overlay = img.copy()
- opacity = 0.4
-
- # put gray background to the right of the detected image
- if x + w + IDENTIFIED_IMG_SIZE < img.shape[1]:
- cv2.rectangle(
- img,
- (x + w, y),
- (x + w + IDENTIFIED_IMG_SIZE, y + h),
- (64, 64, 64),
- cv2.FILLED,
- )
- cv2.addWeighted(overlay, opacity, img, 1 - opacity, 0, img)
-
- # put gray background to the left of the detected image
- elif x - IDENTIFIED_IMG_SIZE > 0:
- cv2.rectangle(
- img,
- (x - IDENTIFIED_IMG_SIZE, y),
- (x, y + h),
- (64, 64, 64),
- cv2.FILLED,
- )
- cv2.addWeighted(overlay, opacity, img, 1 - opacity, 0, img)
-
- for index, instance in emotion_df.iterrows():
- current_emotion = instance["emotion"]
- emotion_label = f"{current_emotion} "
- emotion_score = instance["score"] / 100
-
- filled_bar_x = 35 # this is the size if an emotion is 100%
- bar_x = int(filled_bar_x * emotion_score)
-
- if x + w + IDENTIFIED_IMG_SIZE < img.shape[1]:
-
- text_location_y = y + 20 + (index + 1) * 20
- text_location_x = x + w
-
- if text_location_y < y + h:
- cv2.putText(
- img,
- emotion_label,
- (text_location_x, text_location_y),
- cv2.FONT_HERSHEY_SIMPLEX,
- 0.5,
- (255, 255, 255),
- 1,
- )
-
- cv2.rectangle(
- img,
- (x + w + 70, y + 13 + (index + 1) * 20),
- (
- x + w + 70 + bar_x,
- y + 13 + (index + 1) * 20 + 5,
- ),
- (255, 255, 255),
- cv2.FILLED,
- )
-
- elif x - IDENTIFIED_IMG_SIZE > 0:
-
- text_location_y = y + 20 + (index + 1) * 20
- text_location_x = x - IDENTIFIED_IMG_SIZE
-
- if text_location_y <= y + h:
- cv2.putText(
- img,
- emotion_label,
- (text_location_x, text_location_y),
- cv2.FONT_HERSHEY_SIMPLEX,
- 0.5,
- (255, 255, 255),
- 1,
- )
-
- cv2.rectangle(
- img,
- (
- x - IDENTIFIED_IMG_SIZE + 70,
- y + 13 + (index + 1) * 20,
- ),
- (
- x - IDENTIFIED_IMG_SIZE + 70 + bar_x,
- y + 13 + (index + 1) * 20 + 5,
- ),
- (255, 255, 255),
- cv2.FILLED,
- )
-
- return img
-
-
-def overlay_age_gender(
- img: np.ndarray, apparent_age: float, gender: str, x: int, y: int, w: int, h: int
-) -> np.ndarray:
- """
- Overlay the analyzed age and gender of face onto image itself
- Args:
- img (np.ndarray): image itself
- apparent_age (float): analyzed apparent age
- gender (str): analyzed gender
- x (int): x coordinate of the face on the given image
- y (int): y coordinate of the face on the given image
- w (int): w coordinate of the face on the given image
- h (int): h coordinate of the face on the given image
- Returns:
- img (np.ndarray): image with overlay age and gender analsis results
- """
- logger.debug(f"{apparent_age} years old {gender}")
- analysis_report = f"{int(apparent_age)} {gender}"
-
- info_box_color = (46, 200, 255)
-
- # show its age and gender on the top of the image
- if y - IDENTIFIED_IMG_SIZE + int(IDENTIFIED_IMG_SIZE / 5) > 0:
-
- triangle_coordinates = np.array(
- [
- (x + int(w / 2), y),
- (
- x + int(w / 2) - int(w / 10),
- y - int(IDENTIFIED_IMG_SIZE / 3),
- ),
- (
- x + int(w / 2) + int(w / 10),
- y - int(IDENTIFIED_IMG_SIZE / 3),
- ),
- ]
- )
-
- cv2.drawContours(
- img,
- [triangle_coordinates],
- 0,
- info_box_color,
- -1,
- )
-
- cv2.rectangle(
- img,
- (
- x + int(w / 5),
- y - IDENTIFIED_IMG_SIZE + int(IDENTIFIED_IMG_SIZE / 5),
- ),
- (x + w - int(w / 5), y - int(IDENTIFIED_IMG_SIZE / 3)),
- info_box_color,
- cv2.FILLED,
- )
-
- cv2.putText(
- img,
- analysis_report,
- (x + int(w / 3.5), y - int(IDENTIFIED_IMG_SIZE / 2.1)),
- cv2.FONT_HERSHEY_SIMPLEX,
- 1,
- (0, 111, 255),
- 2,
- )
-
- # show its age and gender on the top of the image
- elif y + h + IDENTIFIED_IMG_SIZE - int(IDENTIFIED_IMG_SIZE / 5) < img.shape[0]:
-
- triangle_coordinates = np.array(
- [
- (x + int(w / 2), y + h),
- (
- x + int(w / 2) - int(w / 10),
- y + h + int(IDENTIFIED_IMG_SIZE / 3),
- ),
- (
- x + int(w / 2) + int(w / 10),
- y + h + int(IDENTIFIED_IMG_SIZE / 3),
- ),
- ]
- )
-
- cv2.drawContours(
- img,
- [triangle_coordinates],
- 0,
- info_box_color,
- -1,
- )
-
- cv2.rectangle(
- img,
- (x + int(w / 5), y + h + int(IDENTIFIED_IMG_SIZE / 3)),
- (
- x + w - int(w / 5),
- y + h + IDENTIFIED_IMG_SIZE - int(IDENTIFIED_IMG_SIZE / 5),
- ),
- info_box_color,
- cv2.FILLED,
- )
-
- cv2.putText(
- img,
- analysis_report,
- (x + int(w / 3.5), y + h + int(IDENTIFIED_IMG_SIZE / 1.5)),
- cv2.FONT_HERSHEY_SIMPLEX,
- 1,
- (0, 111, 255),
- 2,
- )
-
- return img
diff --git a/deepface/modules/verification.py b/deepface/modules/verification.py
deleted file mode 100644
index b6b1002..0000000
--- a/deepface/modules/verification.py
+++ /dev/null
@@ -1,382 +0,0 @@
-# built-in dependencies
-import time
-from typing import Any, Dict, Optional, Union, List, Tuple
-
-# 3rd party dependencies
-import numpy as np
-
-# project dependencies
-from deepface.modules import representation, detection, modeling
-from deepface.models.FacialRecognition import FacialRecognition
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-
-def verify(
- img1_path: Union[str, np.ndarray, List[float]],
- img2_path: Union[str, np.ndarray, List[float]],
- model_name: str = "VGG-Face",
- detector_backend: str = "opencv",
- distance_metric: str = "cosine",
- enforce_detection: bool = True,
- align: bool = True,
- expand_percentage: int = 0,
- normalization: str = "base",
- silent: bool = False,
- threshold: Optional[float] = None,
- anti_spoofing: bool = False,
-) -> Dict[str, Any]:
- """
- Verify if an image pair represents the same person or different persons.
-
- The verification function converts facial images to vectors and calculates the similarity
- between those vectors. Vectors of images of the same person should exhibit higher similarity
- (or lower distance) than vectors of images of different persons.
-
- Args:
- img1_path (str or np.ndarray or List[float]): Path to the first image.
- Accepts exact image path as a string, numpy array (BGR), base64 encoded images
- or pre-calculated embeddings.
-
- img2_path (str or np.ndarray or or List[float]): Path to the second image.
- Accepts exact image path as a string, numpy array (BGR), base64 encoded images
- or pre-calculated embeddings.
-
- model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
- OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
-
- detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
- 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
- (default is opencv)
-
- distance_metric (string): Metric for measuring similarity. Options: 'cosine',
- 'euclidean', 'euclidean_l2' (default is cosine).
-
- enforce_detection (boolean): If no face is detected in an image, raise an exception.
- Set to False to avoid the exception for low-resolution images (default is True).
-
- align (bool): Flag to enable face alignment (default is True).
-
- expand_percentage (int): expand detected facial area with a percentage (default is 0).
-
- normalization (string): Normalize the input image before feeding it to the model.
- Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace (default is base)
-
- silent (boolean): Suppress or allow some log messages for a quieter analysis process
- (default is False).
-
- threshold (float): Specify a threshold to determine whether a pair represents the same
- person or different individuals. This threshold is used for comparing distances.
- If left unset, default pre-tuned threshold values will be applied based on the specified
- model name and distance metric (default is None).
-
- anti_spoofing (boolean): Flag to enable anti spoofing (default is False).
-
- Returns:
- result (dict): A dictionary containing verification results.
-
- - 'verified' (bool): Indicates whether the images represent the same person (True)
- or different persons (False).
-
- - 'distance' (float): The distance measure between the face vectors.
- A lower distance indicates higher similarity.
-
- - 'threshold' (float): The maximum threshold used for verification.
- If the distance is below this threshold, the images are considered a match.
-
- - 'model' (str): The chosen face recognition model.
-
- - 'similarity_metric' (str): The chosen similarity metric for measuring distances.
-
- - 'facial_areas' (dict): Rectangular regions of interest for faces in both images.
- - 'img1': {'x': int, 'y': int, 'w': int, 'h': int}
- Region of interest for the first image.
- - 'img2': {'x': int, 'y': int, 'w': int, 'h': int}
- Region of interest for the second image.
-
- - 'time' (float): Time taken for the verification process in seconds.
- """
-
- tic = time.time()
-
- model: FacialRecognition = modeling.build_model(
- task="facial_recognition", model_name=model_name
- )
- dims = model.output_shape
-
- no_facial_area = {
- "x": None,
- "y": None,
- "w": None,
- "h": None,
- "left_eye": None,
- "right_eye": None,
- }
-
- def extract_embeddings_and_facial_areas(
- img_path: Union[str, np.ndarray, List[float]], index: int
- ) -> Tuple[List[List[float]], List[dict]]:
- """
- Extracts facial embeddings and corresponding facial areas from an
- image or returns pre-calculated embeddings.
-
- Depending on the type of img_path, the function either extracts
- facial embeddings from the provided image
- (via a path or NumPy array) or verifies that the input is a list of
- pre-calculated embeddings and validates them.
-
- Args:
- img_path (Union[str, np.ndarray, List[float]]):
- - A string representing the file path to an image,
- - A NumPy array containing the image data,
- - Or a list of pre-calculated embedding values (of type `float`).
- index (int): An index value used in error messages and logging
- to identify the number of the image.
-
- Returns:
- Tuple[List[List[float]], List[dict]]:
- - A list containing lists of facial embeddings for each detected face.
- - A list of dictionaries where each dictionary contains facial area information.
- """
- if isinstance(img_path, list):
- # given image is already pre-calculated embedding
- if not all(isinstance(dim, float) for dim in img_path):
- raise ValueError(
- f"When passing img{index}_path as a list,"
- " ensure that all its items are of type float."
- )
-
- if silent is False:
- logger.warn(
- f"You passed {index}-th image as pre-calculated embeddings."
- "Please ensure that embeddings have been calculated"
- f" for the {model_name} model."
- )
-
- if len(img_path) != dims:
- raise ValueError(
- f"embeddings of {model_name} should have {dims} dimensions,"
- f" but {index}-th image has {len(img_path)} dimensions input"
- )
-
- img_embeddings = [img_path]
- img_facial_areas = [no_facial_area]
- else:
- try:
- img_embeddings, img_facial_areas = __extract_faces_and_embeddings(
- img_path=img_path,
- model_name=model_name,
- detector_backend=detector_backend,
- enforce_detection=enforce_detection,
- align=align,
- expand_percentage=expand_percentage,
- normalization=normalization,
- anti_spoofing=anti_spoofing,
- )
- except ValueError as err:
- raise ValueError(f"Exception while processing img{index}_path") from err
- return img_embeddings, img_facial_areas
-
- img1_embeddings, img1_facial_areas = extract_embeddings_and_facial_areas(img1_path, 1)
- img2_embeddings, img2_facial_areas = extract_embeddings_and_facial_areas(img2_path, 2)
-
- min_distance, min_idx, min_idy = float("inf"), None, None
- for idx, img1_embedding in enumerate(img1_embeddings):
- for idy, img2_embedding in enumerate(img2_embeddings):
- distance = find_distance(img1_embedding, img2_embedding, distance_metric)
- if distance < min_distance:
- min_distance, min_idx, min_idy = distance, idx, idy
-
- # find the face pair with minimum distance
- threshold = threshold or find_threshold(model_name, distance_metric)
- distance = float(min_distance)
- facial_areas = (
- no_facial_area if min_idx is None else img1_facial_areas[min_idx],
- no_facial_area if min_idy is None else img2_facial_areas[min_idy],
- )
-
- toc = time.time()
-
- resp_obj = {
- "verified": distance <= threshold,
- "distance": distance,
- "threshold": threshold,
- "model": model_name,
- "detector_backend": detector_backend,
- "similarity_metric": distance_metric,
- "facial_areas": {"img1": facial_areas[0], "img2": facial_areas[1]},
- "time": round(toc - tic, 2),
- }
-
- return resp_obj
-
-
-def __extract_faces_and_embeddings(
- img_path: Union[str, np.ndarray],
- model_name: str = "VGG-Face",
- detector_backend: str = "opencv",
- enforce_detection: bool = True,
- align: bool = True,
- expand_percentage: int = 0,
- normalization: str = "base",
- anti_spoofing: bool = False,
-) -> Tuple[List[List[float]], List[dict]]:
- """
- Extract facial areas and find corresponding embeddings for given image
- Returns:
- embeddings (List[float])
- facial areas (List[dict])
- """
- embeddings = []
- facial_areas = []
-
- img_objs = detection.extract_faces(
- img_path=img_path,
- detector_backend=detector_backend,
- grayscale=False,
- enforce_detection=enforce_detection,
- align=align,
- expand_percentage=expand_percentage,
- anti_spoofing=anti_spoofing,
- )
-
- # find embeddings for each face
- for img_obj in img_objs:
- if anti_spoofing is True and img_obj.get("is_real", True) is False:
- raise ValueError("Spoof detected in given image.")
- img_embedding_obj = representation.represent(
- img_path=img_obj["face"],
- model_name=model_name,
- enforce_detection=enforce_detection,
- detector_backend="skip",
- align=align,
- normalization=normalization,
- )
- # already extracted face given, safe to access its 1st item
- img_embedding = img_embedding_obj[0]["embedding"]
- embeddings.append(img_embedding)
- facial_areas.append(img_obj["facial_area"])
-
- return embeddings, facial_areas
-
-
-def find_cosine_distance(
- source_representation: Union[np.ndarray, list], test_representation: Union[np.ndarray, list]
-) -> np.float64:
- """
- Find cosine distance between two given vectors
- Args:
- source_representation (np.ndarray or list): 1st vector
- test_representation (np.ndarray or list): 2nd vector
- Returns
- distance (np.float64): calculated cosine distance
- """
- if isinstance(source_representation, list):
- source_representation = np.array(source_representation)
-
- if isinstance(test_representation, list):
- test_representation = np.array(test_representation)
-
- a = np.dot(source_representation, test_representation)
- b = np.linalg.norm(source_representation)
- c = np.linalg.norm(test_representation)
- return 1 - a / (b * c)
-
-
-def find_euclidean_distance(
- source_representation: Union[np.ndarray, list], test_representation: Union[np.ndarray, list]
-) -> np.float64:
- """
- Find euclidean distance between two given vectors
- Args:
- source_representation (np.ndarray or list): 1st vector
- test_representation (np.ndarray or list): 2nd vector
- Returns
- distance (np.float64): calculated euclidean distance
- """
- if isinstance(source_representation, list):
- source_representation = np.array(source_representation)
-
- if isinstance(test_representation, list):
- test_representation = np.array(test_representation)
-
- return np.linalg.norm(source_representation - test_representation)
-
-
-def l2_normalize(x: Union[np.ndarray, list]) -> np.ndarray:
- """
- Normalize input vector with l2
- Args:
- x (np.ndarray or list): given vector
- Returns:
- y (np.ndarray): l2 normalized vector
- """
- if isinstance(x, list):
- x = np.array(x)
- norm = np.linalg.norm(x)
- return x if norm == 0 else x / norm
-
-
-def find_distance(
- alpha_embedding: Union[np.ndarray, list],
- beta_embedding: Union[np.ndarray, list],
- distance_metric: str,
-) -> np.float64:
- """
- Wrapper to find distance between vectors according to the given distance metric
- Args:
- source_representation (np.ndarray or list): 1st vector
- test_representation (np.ndarray or list): 2nd vector
- Returns
- distance (np.float64): calculated cosine distance
- """
- if distance_metric == "cosine":
- distance = find_cosine_distance(alpha_embedding, beta_embedding)
- elif distance_metric == "euclidean":
- distance = find_euclidean_distance(alpha_embedding, beta_embedding)
- elif distance_metric == "euclidean_l2":
- distance = find_euclidean_distance(
- l2_normalize(alpha_embedding), l2_normalize(beta_embedding)
- )
- else:
- raise ValueError("Invalid distance_metric passed - ", distance_metric)
- return distance
-
-
-def find_threshold(model_name: str, distance_metric: str) -> float:
- """
- Retrieve pre-tuned threshold values for a model and distance metric pair
- Args:
- model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
- OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
- distance_metric (str): distance metric name. Options are cosine, euclidean
- and euclidean_l2.
- Returns:
- threshold (float): threshold value for that model name and distance metric
- pair. Distances less than this threshold will be classified same person.
- """
-
- base_threshold = {"cosine": 0.40, "euclidean": 0.55, "euclidean_l2": 0.75}
-
- thresholds = {
- # "VGG-Face": {"cosine": 0.40, "euclidean": 0.60, "euclidean_l2": 0.86}, # 2622d
- "VGG-Face": {
- "cosine": 0.68,
- "euclidean": 1.17,
- "euclidean_l2": 1.17,
- }, # 4096d - tuned with LFW
- "Facenet": {"cosine": 0.40, "euclidean": 10, "euclidean_l2": 0.80},
- "Facenet512": {"cosine": 0.30, "euclidean": 23.56, "euclidean_l2": 1.04},
- "ArcFace": {"cosine": 0.68, "euclidean": 4.15, "euclidean_l2": 1.13},
- "Dlib": {"cosine": 0.07, "euclidean": 0.6, "euclidean_l2": 0.4},
- "SFace": {"cosine": 0.593, "euclidean": 10.734, "euclidean_l2": 1.055},
- "OpenFace": {"cosine": 0.10, "euclidean": 0.55, "euclidean_l2": 0.55},
- "DeepFace": {"cosine": 0.23, "euclidean": 64, "euclidean_l2": 0.64},
- "DeepID": {"cosine": 0.015, "euclidean": 45, "euclidean_l2": 0.17},
- "GhostFaceNet": {"cosine": 0.65, "euclidean": 35.71, "euclidean_l2": 1.10},
- }
-
- threshold = thresholds.get(model_name, base_threshold).get(distance_metric, 0.4)
-
- return threshold
diff --git a/icon/benchmarks.jpg b/icon/benchmarks.jpg
deleted file mode 100644
index adcd846..0000000
Binary files a/icon/benchmarks.jpg and /dev/null differ
diff --git a/icon/bmc-button.png b/icon/bmc-button.png
deleted file mode 100644
index 464bfd9..0000000
Binary files a/icon/bmc-button.png and /dev/null differ
diff --git a/icon/deepface-and-react.jpg b/icon/deepface-and-react.jpg
deleted file mode 100644
index 4203a23..0000000
Binary files a/icon/deepface-and-react.jpg and /dev/null differ
diff --git a/icon/deepface-api.jpg b/icon/deepface-api.jpg
deleted file mode 100644
index 8ca3fec..0000000
Binary files a/icon/deepface-api.jpg and /dev/null differ
diff --git a/icon/deepface-big-data.jpg b/icon/deepface-big-data.jpg
deleted file mode 100644
index 5061f1e..0000000
Binary files a/icon/deepface-big-data.jpg and /dev/null differ
diff --git a/icon/deepface-dockerized-v2.jpg b/icon/deepface-dockerized-v2.jpg
deleted file mode 100644
index db79929..0000000
Binary files a/icon/deepface-dockerized-v2.jpg and /dev/null differ
diff --git a/icon/deepface-icon-labeled.png b/icon/deepface-icon-labeled.png
deleted file mode 100644
index 520887c..0000000
Binary files a/icon/deepface-icon-labeled.png and /dev/null differ
diff --git a/icon/deepface-icon.png b/icon/deepface-icon.png
deleted file mode 100644
index d0a3b9b..0000000
Binary files a/icon/deepface-icon.png and /dev/null differ
diff --git a/icon/detector-outputs-20230203.jpg b/icon/detector-outputs-20230203.jpg
deleted file mode 100644
index b894e75..0000000
Binary files a/icon/detector-outputs-20230203.jpg and /dev/null differ
diff --git a/icon/detector-outputs-20240302.jpg b/icon/detector-outputs-20240302.jpg
deleted file mode 100644
index b7bf517..0000000
Binary files a/icon/detector-outputs-20240302.jpg and /dev/null differ
diff --git a/icon/detector-outputs-20240414.jpg b/icon/detector-outputs-20240414.jpg
deleted file mode 100644
index 15f73fb..0000000
Binary files a/icon/detector-outputs-20240414.jpg and /dev/null differ
diff --git a/icon/detector-portfolio-v5.jpg b/icon/detector-portfolio-v5.jpg
deleted file mode 100644
index e35cef1..0000000
Binary files a/icon/detector-portfolio-v5.jpg and /dev/null differ
diff --git a/icon/detector-portfolio-v6.jpg b/icon/detector-portfolio-v6.jpg
deleted file mode 100644
index 3fb158a..0000000
Binary files a/icon/detector-portfolio-v6.jpg and /dev/null differ
diff --git a/icon/embedding.jpg b/icon/embedding.jpg
deleted file mode 100644
index 8555913..0000000
Binary files a/icon/embedding.jpg and /dev/null differ
diff --git a/icon/face-anti-spoofing.jpg b/icon/face-anti-spoofing.jpg
deleted file mode 100644
index bade126..0000000
Binary files a/icon/face-anti-spoofing.jpg and /dev/null differ
diff --git a/icon/model-portfolio-20240316.jpg b/icon/model-portfolio-20240316.jpg
deleted file mode 100644
index 7252155..0000000
Binary files a/icon/model-portfolio-20240316.jpg and /dev/null differ
diff --git a/icon/model-portfolio-v8.jpg b/icon/model-portfolio-v8.jpg
deleted file mode 100644
index b359f00..0000000
Binary files a/icon/model-portfolio-v8.jpg and /dev/null differ
diff --git a/icon/patreon.png b/icon/patreon.png
deleted file mode 100644
index 21cbfd9..0000000
Binary files a/icon/patreon.png and /dev/null differ
diff --git a/icon/retinaface-results.jpeg b/icon/retinaface-results.jpeg
deleted file mode 100644
index 11e5db9..0000000
Binary files a/icon/retinaface-results.jpeg and /dev/null differ
diff --git a/icon/stock-1.jpg b/icon/stock-1.jpg
deleted file mode 100644
index b786042..0000000
Binary files a/icon/stock-1.jpg and /dev/null differ
diff --git a/icon/stock-2.jpg b/icon/stock-2.jpg
deleted file mode 100644
index 4e542ea..0000000
Binary files a/icon/stock-2.jpg and /dev/null differ
diff --git a/icon/stock-3.jpg b/icon/stock-3.jpg
deleted file mode 100644
index 68254cb..0000000
Binary files a/icon/stock-3.jpg and /dev/null differ
diff --git a/icon/stock-6-v2.jpg b/icon/stock-6-v2.jpg
deleted file mode 100644
index a7fe5ac..0000000
Binary files a/icon/stock-6-v2.jpg and /dev/null differ
diff --git a/icon/verify-many-faces.jpg b/icon/verify-many-faces.jpg
deleted file mode 100644
index 66fc890..0000000
Binary files a/icon/verify-many-faces.jpg and /dev/null differ
diff --git a/package_info.json b/package_info.json
deleted file mode 100644
index 1dc17ea..0000000
--- a/package_info.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "version": "0.0.94"
-}
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index 8f40c82..0000000
--- a/requirements.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-requests>=2.27.1
-numpy>=1.14.0
-pandas>=0.23.4
-gdown>=3.10.1
-tqdm>=4.30.0
-Pillow>=5.2.0
-opencv-python>=4.5.5.64
-tensorflow>=1.9.0
-keras>=2.2.0
-Flask>=1.1.2
-flask_cors>=4.0.1
-mtcnn>=0.1.0
-retina-face>=0.0.1
-fire>=0.4.0
-gunicorn>=20.1.0
diff --git a/requirements_additional.txt b/requirements_additional.txt
deleted file mode 100644
index ea76fde..0000000
--- a/requirements_additional.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-opencv-contrib-python>=4.3.0.36
-mediapipe>=0.8.7.3
-dlib>=19.20.0
-ultralytics>=8.0.122
-facenet-pytorch>=2.5.3
-torch>=2.1.2
\ No newline at end of file
diff --git a/requirements_local b/requirements_local
deleted file mode 100644
index e869c3f..0000000
--- a/requirements_local
+++ /dev/null
@@ -1,6 +0,0 @@
-numpy==1.22.3
-pandas==2.0.3
-Pillow==9.0.0
-opencv-python==4.9.0.80
-tensorflow==2.9.0
-keras==2.9.0
diff --git a/scripts/dockerize.sh b/scripts/dockerize.sh
deleted file mode 100644
index f29bed7..0000000
--- a/scripts/dockerize.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-# Dockerfile is in the root
-cd ..
-
-# start docker
-# sudo service docker start
-
-# list current docker packages
-# docker container ls -a
-
-# delete existing deepface packages
-# docker rm -f $(docker ps -a -q --filter "ancestor=deepface")
-
-# build deepface image
-docker build -t deepface .
-
-# copy weights from your local
-# docker cp ~/.deepface/weights/. :/root/.deepface/weights/
-
-# run the built image
-# docker run --net="host" deepface
-docker run -p 5005:5000 deepface
-
-# or pull the pre-built image from docker hub and run it
-# docker pull serengil/deepface
-# docker run -p 5005:5000 serengil/deepface
-
-# to access the inside of docker image when it is in running status
-# docker exec -it /bin/sh
-
-# healthcheck
-# sleep 3s
-# curl localhost:5000
\ No newline at end of file
diff --git a/scripts/push-release.sh b/scripts/push-release.sh
deleted file mode 100644
index 5b3e6fa..0000000
--- a/scripts/push-release.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-cd ..
-
-echo "deleting existing release related files"
-rm -rf dist/*
-rm -rf build/*
-
-echo "creating a package for current release - pypi compatible"
-python setup.py sdist bdist_wheel
-
-echo "pushing the release to pypi"
-python -m twine upload dist/*
\ No newline at end of file
diff --git a/setup.py b/setup.py
deleted file mode 100644
index 3dc0270..0000000
--- a/setup.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import json
-import setuptools
-
-with open("README.md", "r", encoding="utf-8") as fh:
- long_description = fh.read()
-
-with open("requirements.txt", "r", encoding="utf-8") as f:
- requirements = f.read().split("\n")
-
-with open("package_info.json", "r", encoding="utf-8") as f:
- package_info = json.load(f)
-
-setuptools.setup(
- name="deepface",
- version=package_info["version"],
- author="Sefik Ilkin Serengil",
- author_email="serengil@gmail.com",
- description=(
- "A Lightweight Face Recognition and Facial Attribute Analysis Framework"
- " (Age, Gender, Emotion, Race) for Python"
- ),
- data_files=[("", ["README.md", "requirements.txt", "package_info.json"])],
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/serengil/deepface",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 3",
- "License :: OSI Approved :: MIT License",
- "Operating System :: OS Independent",
- ],
- entry_points={
- "console_scripts": ["deepface = deepface.DeepFace:cli"],
- },
- python_requires=">=3.7",
- license="MIT",
- install_requires=requirements,
-)
diff --git a/tests/dataset/couple.jpg b/tests/dataset/couple.jpg
deleted file mode 100644
index 1a07d76..0000000
Binary files a/tests/dataset/couple.jpg and /dev/null differ
diff --git a/tests/dataset/face-recognition-pivot.csv b/tests/dataset/face-recognition-pivot.csv
deleted file mode 100644
index 7c3f68b..0000000
--- a/tests/dataset/face-recognition-pivot.csv
+++ /dev/null
@@ -1,281 +0,0 @@
-file_x,file_y,decision,VGG-Face_cosine,VGG-Face_euclidean,VGG-Face_euclidean_l2,Facenet_cosine,Facenet_euclidean,Facenet_euclidean_l2,OpenFace_cosine,OpenFace_euclidean_l2,DeepFace_cosine,DeepFace_euclidean,DeepFace_euclidean_l2
-deepface/tests/dataset/img38.jpg,deepface/tests/dataset/img39.jpg,Yes,0.2057,0.389,0.6414,0.1601,6.8679,0.5658,0.5925,1.0886,0.2554,61.3336,0.7147
-deepface/tests/dataset/img38.jpg,deepface/tests/dataset/img40.jpg,Yes,0.2117,0.3179,0.6508,0.2739,8.9049,0.7402,0.396,0.8899,0.2685,63.3747,0.7328
-deepface/tests/dataset/img38.jpg,deepface/tests/dataset/img41.jpg,Yes,0.1073,0.2482,0.4632,0.1257,6.1593,0.5014,0.7157,1.1964,0.2452,60.3454,0.7002
-deepface/tests/dataset/img39.jpg,deepface/tests/dataset/img40.jpg,Yes,0.2991,0.4567,0.7734,0.3134,9.3798,0.7917,0.4941,0.9941,0.1703,45.1688,0.5836
-deepface/tests/dataset/img39.jpg,deepface/tests/dataset/img41.jpg,Yes,0.1666,0.3542,0.5772,0.1502,6.6491,0.5481,0.2381,0.6901,0.2194,50.4356,0.6624
-deepface/tests/dataset/img40.jpg,deepface/tests/dataset/img41.jpg,Yes,0.1706,0.3066,0.5841,0.2017,7.6423,0.6352,0.567,1.0649,0.2423,54.2499,0.6961
-deepface/tests/dataset/img3.jpg,deepface/tests/dataset/img12.jpg,Yes,0.2533,0.5199,0.7118,0.4062,11.2632,0.9014,0.1908,0.6178,0.2337,58.8794,0.6837
-deepface/tests/dataset/img3.jpg,deepface/tests/dataset/img53.jpg,Yes,0.1655,0.3567,0.5754,0.184,7.5388,0.6066,0.1465,0.5412,0.243,55.2642,0.6971
-deepface/tests/dataset/img3.jpg,deepface/tests/dataset/img54.jpg,Yes,0.1982,0.4739,0.6297,0.406,11.0618,0.9011,0.1132,0.4758,0.1824,49.7875,0.6041
-deepface/tests/dataset/img3.jpg,deepface/tests/dataset/img55.jpg,Yes,0.1835,0.3742,0.6057,0.1366,6.4168,0.5227,0.1755,0.5924,0.1697,55.179,0.5825
-deepface/tests/dataset/img3.jpg,deepface/tests/dataset/img56.jpg,Yes,0.1652,0.4005,0.5748,0.1833,7.3432,0.6054,0.1803,0.6005,0.2061,59.007,0.642
-deepface/tests/dataset/img12.jpg,deepface/tests/dataset/img53.jpg,Yes,0.372,0.6049,0.8626,0.3933,11.1382,0.8869,0.1068,0.4621,0.1633,48.5516,0.5715
-deepface/tests/dataset/img12.jpg,deepface/tests/dataset/img54.jpg,Yes,0.2153,0.5145,0.6561,0.2694,9.1155,0.734,0.1943,0.6234,0.1881,52.7146,0.6133
-deepface/tests/dataset/img12.jpg,deepface/tests/dataset/img55.jpg,Yes,0.3551,0.5941,0.8428,0.4726,12.0647,0.9722,0.1054,0.4591,0.1265,48.2432,0.5029
-deepface/tests/dataset/img12.jpg,deepface/tests/dataset/img56.jpg,Yes,0.2826,0.565,0.7518,0.4761,11.9569,0.9758,0.1364,0.5224,0.1908,57.6735,0.6177
-deepface/tests/dataset/img53.jpg,deepface/tests/dataset/img54.jpg,Yes,0.3363,0.593,0.8202,0.4627,11.8744,0.962,0.1964,0.6267,0.174,46.6212,0.5898
-deepface/tests/dataset/img53.jpg,deepface/tests/dataset/img55.jpg,Yes,0.187,0.3313,0.6116,0.1625,7.0394,0.5701,0.1312,0.5123,0.1439,52.3132,0.5365
-deepface/tests/dataset/img53.jpg,deepface/tests/dataset/img56.jpg,Yes,0.1385,0.3776,0.5263,0.141,6.4913,0.5311,0.1285,0.507,0.2005,58.0586,0.6332
-deepface/tests/dataset/img54.jpg,deepface/tests/dataset/img55.jpg,Yes,0.3124,0.5756,0.7905,0.4033,10.944,0.8981,0.1738,0.5896,0.1351,49.8255,0.5198
-deepface/tests/dataset/img54.jpg,deepface/tests/dataset/img56.jpg,Yes,0.2571,0.5473,0.717,0.3912,10.6329,0.8846,0.1802,0.6002,0.1648,53.0881,0.574
-deepface/tests/dataset/img55.jpg,deepface/tests/dataset/img56.jpg,Yes,0.2217,0.4543,0.6658,0.1433,6.4387,0.5353,0.1677,0.5792,0.1505,53.6812,0.5486
-deepface/tests/dataset/img1.jpg,deepface/tests/dataset/img2.jpg,Yes,0.2342,0.5033,0.6844,0.2508,8.2369,0.7082,0.0844,0.4109,0.2417,64.2748,0.6952
-deepface/tests/dataset/img1.jpg,deepface/tests/dataset/img4.jpg,Yes,0.2051,0.3916,0.6405,0.2766,8.7946,0.7437,0.1662,0.5766,0.2292,64.7785,0.6771
-deepface/tests/dataset/img1.jpg,deepface/tests/dataset/img5.jpg,Yes,0.2963,0.3948,0.7699,0.2696,8.4689,0.7343,0.0965,0.4393,0.2306,71.6647,0.679
-deepface/tests/dataset/img1.jpg,deepface/tests/dataset/img6.jpg,Yes,0.254,0.4464,0.7128,0.2164,7.7171,0.6579,0.0691,0.3718,0.2365,64.7594,0.6877
-deepface/tests/dataset/img1.jpg,deepface/tests/dataset/img7.jpg,Yes,0.3104,0.4764,0.7879,0.2112,7.5718,0.65,0.1027,0.4531,0.2385,61.371,0.6906
-deepface/tests/dataset/img1.jpg,deepface/tests/dataset/img10.jpg,Yes,0.3363,0.5448,0.8202,0.2129,7.6484,0.6525,0.0661,0.3635,0.2472,65.0668,0.7031
-deepface/tests/dataset/img1.jpg,deepface/tests/dataset/img11.jpg,Yes,0.3083,0.5416,0.7852,0.2042,7.6195,0.639,0.1626,0.5703,0.2001,61.3824,0.6326
-deepface/tests/dataset/img2.jpg,deepface/tests/dataset/img4.jpg,Yes,0.1397,0.3961,0.5285,0.1957,7.351,0.6256,0.2497,0.7066,0.1349,51.5853,0.5194
-deepface/tests/dataset/img2.jpg,deepface/tests/dataset/img5.jpg,Yes,0.1995,0.482,0.6317,0.1574,6.4195,0.561,0.1333,0.5164,0.1583,60.6365,0.5627
-deepface/tests/dataset/img2.jpg,deepface/tests/dataset/img6.jpg,Yes,0.0908,0.3251,0.4261,0.0787,4.625,0.3969,0.0632,0.3556,0.0756,38.218,0.3888
-deepface/tests/dataset/img2.jpg,deepface/tests/dataset/img7.jpg,Yes,0.2,0.4664,0.6325,0.1642,6.6261,0.5731,0.1049,0.4581,0.098,42.1113,0.4428
-deepface/tests/dataset/img2.jpg,deepface/tests/dataset/img10.jpg,Yes,0.2077,0.4862,0.6444,0.1593,6.5693,0.5644,0.0589,0.3431,0.1118,45.9168,0.4729
-deepface/tests/dataset/img2.jpg,deepface/tests/dataset/img11.jpg,Yes,0.2349,0.5235,0.6854,0.1869,7.2485,0.6114,0.1029,0.4536,0.1548,55.617,0.5564
-deepface/tests/dataset/img4.jpg,deepface/tests/dataset/img5.jpg,Yes,0.1991,0.3869,0.6311,0.1199,5.7256,0.4898,0.2891,0.7604,0.1797,64.7925,0.5995
-deepface/tests/dataset/img4.jpg,deepface/tests/dataset/img6.jpg,Yes,0.1937,0.4095,0.6224,0.1772,7.0495,0.5954,0.2199,0.6632,0.1788,59.9202,0.598
-deepface/tests/dataset/img4.jpg,deepface/tests/dataset/img7.jpg,Yes,0.245,0.4526,0.7,0.1663,6.7868,0.5767,0.3435,0.8289,0.1971,61.177,0.6279
-deepface/tests/dataset/img4.jpg,deepface/tests/dataset/img10.jpg,Yes,0.1882,0.4274,0.6136,0.1304,6.0445,0.5107,0.2052,0.6406,0.1239,49.4937,0.4979
-deepface/tests/dataset/img4.jpg,deepface/tests/dataset/img11.jpg,Yes,0.2569,0.5093,0.7168,0.1909,7.4277,0.618,0.2874,0.7582,0.1737,59.8839,0.5894
-deepface/tests/dataset/img5.jpg,deepface/tests/dataset/img6.jpg,Yes,0.1858,0.3915,0.6095,0.1818,6.967,0.6029,0.13,0.5099,0.1742,63.6179,0.5903
-deepface/tests/dataset/img5.jpg,deepface/tests/dataset/img7.jpg,Yes,0.2639,0.4391,0.7264,0.1754,6.7894,0.5923,0.1174,0.4846,0.1523,59.6056,0.5519
-deepface/tests/dataset/img5.jpg,deepface/tests/dataset/img10.jpg,Yes,0.2013,0.4449,0.6344,0.1143,5.525,0.478,0.1228,0.4957,0.1942,66.7805,0.6232
-deepface/tests/dataset/img5.jpg,deepface/tests/dataset/img11.jpg,Yes,0.3348,0.5599,0.8183,0.1975,7.4008,0.6285,0.2071,0.6436,0.1692,63.0817,0.5818
-deepface/tests/dataset/img6.jpg,deepface/tests/dataset/img7.jpg,Yes,0.192,0.4085,0.6196,0.1275,5.892,0.505,0.1004,0.4482,0.094,42.0465,0.4335
-deepface/tests/dataset/img6.jpg,deepface/tests/dataset/img10.jpg,Yes,0.214,0.4593,0.6542,0.1237,5.8374,0.4974,0.0517,0.3216,0.11,46.1197,0.4691
-deepface/tests/dataset/img6.jpg,deepface/tests/dataset/img11.jpg,Yes,0.2755,0.5319,0.7423,0.1772,7.1072,0.5953,0.1383,0.526,0.1771,59.9849,0.5951
-deepface/tests/dataset/img7.jpg,deepface/tests/dataset/img10.jpg,Yes,0.3425,0.5729,0.8276,0.1708,6.8133,0.5845,0.0956,0.4374,0.1552,52.8909,0.5571
-deepface/tests/dataset/img7.jpg,deepface/tests/dataset/img11.jpg,Yes,0.2912,0.5417,0.7632,0.2449,8.3025,0.6998,0.148,0.544,0.1894,60.469,0.6154
-deepface/tests/dataset/img10.jpg,deepface/tests/dataset/img11.jpg,Yes,0.2535,0.5258,0.712,0.1371,6.2509,0.5237,0.0609,0.349,0.1851,60.8244,0.6085
-deepface/tests/dataset/img18.jpg,deepface/tests/dataset/img19.jpg,Yes,0.1043,0.3254,0.4567,0.1248,6.2382,0.4996,0.2563,0.7159,0.1712,60.1675,0.5851
-deepface/tests/dataset/img18.jpg,deepface/tests/dataset/img67.jpg,Yes,0.2197,0.4691,0.6629,0.2387,8.7124,0.6909,0.3072,0.7838,0.1839,58.9528,0.6065
-deepface/tests/dataset/img19.jpg,deepface/tests/dataset/img67.jpg,Yes,0.1466,0.3965,0.5416,0.1321,6.5557,0.514,0.1504,0.5485,0.1517,55.8044,0.5508
-deepface/tests/dataset/img20.jpg,deepface/tests/dataset/img21.jpg,Yes,0.0641,0.2068,0.3581,0.1052,5.4253,0.4586,0.1118,0.4729,0.2209,58.7235,0.6646
-deepface/tests/dataset/img34.jpg,deepface/tests/dataset/img35.jpg,Yes,0.0959,0.2628,0.4381,0.2538,8.7003,0.7124,0.3727,0.8634,0.3244,78.4397,0.8055
-deepface/tests/dataset/img34.jpg,deepface/tests/dataset/img36.jpg,Yes,0.1553,0.2918,0.5573,0.1861,7.5793,0.6101,0.399,0.8933,0.2923,61.625,0.7646
-deepface/tests/dataset/img34.jpg,deepface/tests/dataset/img37.jpg,Yes,0.104,0.2651,0.4562,0.1192,6.0818,0.4882,0.4158,0.912,0.2853,62.1217,0.7554
-deepface/tests/dataset/img35.jpg,deepface/tests/dataset/img36.jpg,Yes,0.2322,0.3945,0.6814,0.2049,7.6366,0.6401,0.38,0.8717,0.2991,74.4219,0.7735
-deepface/tests/dataset/img35.jpg,deepface/tests/dataset/img37.jpg,Yes,0.1684,0.3516,0.5804,0.186,7.2991,0.6099,0.1662,0.5766,0.164,58.1125,0.5727
-deepface/tests/dataset/img36.jpg,deepface/tests/dataset/img37.jpg,Yes,0.1084,0.2715,0.4655,0.1338,6.3075,0.5173,0.2909,0.7627,0.2687,54.7311,0.7331
-deepface/tests/dataset/img22.jpg,deepface/tests/dataset/img23.jpg,Yes,0.3637,0.4569,0.8528,0.3501,9.9752,0.8368,0.1651,0.5746,0.1649,42.2178,0.5742
-deepface/tests/dataset/img13.jpg,deepface/tests/dataset/img14.jpg,Yes,0.086,0.3384,0.4148,0.1104,5.3711,0.47,0.0952,0.4363,0.2043,61.8532,0.6392
-deepface/tests/dataset/img13.jpg,deepface/tests/dataset/img15.jpg,Yes,0.1879,0.5589,0.6131,0.2317,7.9283,0.6808,0.3202,0.8003,0.3665,81.975,0.8562
-deepface/tests/dataset/img13.jpg,deepface/tests/dataset/img57.jpg,Yes,0.1204,0.3952,0.4907,0.1897,7.1445,0.616,0.4599,0.9591,0.3266,82.6217,0.8082
-deepface/tests/dataset/img13.jpg,deepface/tests/dataset/img58.jpg,Yes,0.1748,0.524,0.5913,0.2264,7.7484,0.6729,0.5006,1.0006,0.3476,75.6494,0.8338
-deepface/tests/dataset/img14.jpg,deepface/tests/dataset/img15.jpg,Yes,0.1969,0.571,0.6275,0.2322,7.8197,0.6815,0.3409,0.8257,0.4076,89.3521,0.9029
-deepface/tests/dataset/img14.jpg,deepface/tests/dataset/img57.jpg,Yes,0.1815,0.4206,0.6025,0.128,5.7838,0.5059,0.4251,0.9221,0.3284,84.7328,0.8105
-deepface/tests/dataset/img14.jpg,deepface/tests/dataset/img58.jpg,Yes,0.2071,0.5609,0.6436,0.2125,7.384,0.6519,0.4993,0.9993,0.3848,83.0627,0.8772
-deepface/tests/dataset/img15.jpg,deepface/tests/dataset/img57.jpg,Yes,0.198,0.5753,0.6293,0.2073,7.5025,0.6439,0.3957,0.8896,0.3881,91.551,0.881
-deepface/tests/dataset/img15.jpg,deepface/tests/dataset/img58.jpg,Yes,0.1109,0.4424,0.4709,0.1106,5.4445,0.4702,0.2815,0.7503,0.4153,85.5012,0.9114
-deepface/tests/dataset/img57.jpg,deepface/tests/dataset/img58.jpg,Yes,0.1581,0.5045,0.5624,0.1452,6.2094,0.5389,0.213,0.6528,0.2184,67.7741,0.6609
-deepface/tests/dataset/img29.jpg,deepface/tests/dataset/img30.jpg,Yes,0.142,0.28,0.5329,0.1759,7.1649,0.5931,0.3237,0.8046,0.272,59.7856,0.7375
-deepface/tests/dataset/img29.jpg,deepface/tests/dataset/img31.jpg,Yes,0.1525,0.2777,0.5523,0.1588,6.8613,0.5636,0.5027,1.0027,0.2,49.2171,0.6324
-deepface/tests/dataset/img29.jpg,deepface/tests/dataset/img32.jpg,Yes,0.1807,0.481,0.6011,0.1997,7.8571,0.632,0.4602,0.9594,0.3084,60.7837,0.7854
-deepface/tests/dataset/img29.jpg,deepface/tests/dataset/img33.jpg,Yes,0.1757,0.3177,0.5927,0.2406,8.3798,0.6937,0.3446,0.8302,0.1679,47.9061,0.5795
-deepface/tests/dataset/img30.jpg,deepface/tests/dataset/img31.jpg,Yes,0.1141,0.2453,0.4776,0.1654,6.8805,0.5751,0.3189,0.7986,0.1897,51.344,0.6159
-deepface/tests/dataset/img30.jpg,deepface/tests/dataset/img32.jpg,Yes,0.1567,0.4575,0.5597,0.1757,7.2731,0.5929,0.1712,0.5851,0.242,57.849,0.6957
-deepface/tests/dataset/img30.jpg,deepface/tests/dataset/img33.jpg,Yes,0.1548,0.2997,0.5565,0.2074,7.6356,0.644,0.1744,0.5906,0.2601,61.9643,0.7213
-deepface/tests/dataset/img31.jpg,deepface/tests/dataset/img32.jpg,Yes,0.1402,0.4725,0.5295,0.1009,5.5583,0.4493,0.2098,0.6478,0.2023,51.0814,0.6361
-deepface/tests/dataset/img31.jpg,deepface/tests/dataset/img33.jpg,Yes,0.0895,0.2296,0.4232,0.1873,7.3261,0.6121,0.1871,0.6118,0.229,56.6939,0.6768
-deepface/tests/dataset/img32.jpg,deepface/tests/dataset/img33.jpg,Yes,0.2035,0.4953,0.638,0.2415,8.5176,0.6949,0.2426,0.6965,0.2768,62.1742,0.744
-deepface/tests/dataset/img8.jpg,deepface/tests/dataset/img9.jpg,Yes,0.3147,0.45,0.7933,0.1976,7.3714,0.6287,0.0997,0.4466,0.1695,48.8942,0.5822
-deepface/tests/dataset/img8.jpg,deepface/tests/dataset/img47.jpg,Yes,0.3638,0.4564,0.853,0.1976,7.2952,0.6287,0.0931,0.4314,0.1869,54.8324,0.6114
-deepface/tests/dataset/img8.jpg,deepface/tests/dataset/img48.jpg,Yes,0.3068,0.442,0.7834,0.2593,8.2334,0.7201,0.1319,0.5136,0.2194,55.6994,0.6624
-deepface/tests/dataset/img8.jpg,deepface/tests/dataset/img49.jpg,Yes,0.2353,0.4246,0.686,0.1797,6.8592,0.5996,0.1472,0.5426,0.1904,57.1813,0.617
-deepface/tests/dataset/img8.jpg,deepface/tests/dataset/img50.jpg,Yes,0.3583,0.5144,0.8465,0.24,8.2435,0.6928,0.132,0.5138,0.138,40.4616,0.5253
-deepface/tests/dataset/img8.jpg,deepface/tests/dataset/img51.jpg,Yes,0.3446,0.4498,0.8301,0.1666,6.7177,0.5772,0.1413,0.5317,0.1656,46.6621,0.5756
-deepface/tests/dataset/img9.jpg,deepface/tests/dataset/img47.jpg,Yes,0.3153,0.4374,0.7941,0.1772,6.9625,0.5953,0.1591,0.5641,0.1795,54.801,0.5992
-deepface/tests/dataset/img9.jpg,deepface/tests/dataset/img48.jpg,Yes,0.3537,0.4845,0.8411,0.1723,6.7796,0.5871,0.1234,0.4969,0.1795,52.6507,0.5992
-deepface/tests/dataset/img9.jpg,deepface/tests/dataset/img49.jpg,Yes,0.2072,0.4029,0.6437,0.1954,7.2154,0.6251,0.1529,0.553,0.1311,48.2847,0.5121
-deepface/tests/dataset/img9.jpg,deepface/tests/dataset/img50.jpg,Yes,0.2662,0.4509,0.7296,0.2576,8.5935,0.7177,0.1531,0.5533,0.1205,41.6412,0.491
-deepface/tests/dataset/img9.jpg,deepface/tests/dataset/img51.jpg,Yes,0.3282,0.4507,0.8102,0.2371,8.0755,0.6887,0.1873,0.612,0.1817,51.7388,0.6029
-deepface/tests/dataset/img47.jpg,deepface/tests/dataset/img48.jpg,Yes,0.345,0.4542,0.8307,0.1613,6.4777,0.5679,0.1419,0.5328,0.1649,52.6864,0.5742
-deepface/tests/dataset/img47.jpg,deepface/tests/dataset/img49.jpg,Yes,0.257,0.4382,0.717,0.1944,7.1101,0.6236,0.1089,0.4667,0.2415,66.6307,0.695
-deepface/tests/dataset/img47.jpg,deepface/tests/dataset/img50.jpg,Yes,0.1844,0.3737,0.6073,0.215,7.7872,0.6558,0.1817,0.6029,0.2052,57.2133,0.6406
-deepface/tests/dataset/img47.jpg,deepface/tests/dataset/img51.jpg,Yes,0.1979,0.3274,0.6291,0.1303,5.926,0.5106,0.0939,0.4334,0.1209,44.911,0.4918
-deepface/tests/dataset/img48.jpg,deepface/tests/dataset/img49.jpg,Yes,0.2917,0.4744,0.7639,0.232,7.6321,0.6812,0.1067,0.462,0.2183,61.9241,0.6608
-deepface/tests/dataset/img48.jpg,deepface/tests/dataset/img50.jpg,Yes,0.3985,0.5478,0.8927,0.2745,8.6847,0.7409,0.2245,0.6701,0.2181,55.6337,0.6605
-deepface/tests/dataset/img48.jpg,deepface/tests/dataset/img51.jpg,Yes,0.3408,0.4563,0.8255,0.1586,6.4477,0.5633,0.1734,0.5888,0.2082,55.6445,0.6452
-deepface/tests/dataset/img49.jpg,deepface/tests/dataset/img50.jpg,Yes,0.2073,0.4183,0.6439,0.2437,8.1889,0.6982,0.1738,0.5896,0.1949,57.7545,0.6243
-deepface/tests/dataset/img49.jpg,deepface/tests/dataset/img51.jpg,Yes,0.2694,0.4491,0.7341,0.2076,7.3716,0.6444,0.1414,0.5318,0.2283,62.518,0.6758
-deepface/tests/dataset/img50.jpg,deepface/tests/dataset/img51.jpg,Yes,0.2505,0.4295,0.7079,0.2299,8.07,0.6781,0.1894,0.6155,0.1715,47.5665,0.5857
-deepface/tests/dataset/img16.jpg,deepface/tests/dataset/img17.jpg,Yes,0.2545,0.3759,0.7135,0.1493,6.5661,0.5465,0.2749,0.7414,0.1528,47.8128,0.5528
-deepface/tests/dataset/img16.jpg,deepface/tests/dataset/img59.jpg,Yes,0.1796,0.4352,0.5993,0.3095,9.6361,0.7868,0.4173,0.9136,0.247,61.4867,0.7028
-deepface/tests/dataset/img16.jpg,deepface/tests/dataset/img61.jpg,Yes,0.1779,0.3234,0.5965,0.1863,7.2985,0.6105,0.1407,0.5305,0.1643,53.2032,0.5732
-deepface/tests/dataset/img16.jpg,deepface/tests/dataset/img62.jpg,Yes,0.106,0.2509,0.4604,0.2243,8.1191,0.6698,0.3857,0.8783,0.1953,57.434,0.6249
-deepface/tests/dataset/img17.jpg,deepface/tests/dataset/img59.jpg,Yes,0.2519,0.5106,0.7099,0.2846,9.3099,0.7544,0.3877,0.8806,0.2994,62.5416,0.7739
-deepface/tests/dataset/img17.jpg,deepface/tests/dataset/img61.jpg,Yes,0.2507,0.3495,0.708,0.1992,7.6132,0.6313,0.1867,0.6111,0.2101,58.2095,0.6482
-deepface/tests/dataset/img17.jpg,deepface/tests/dataset/img62.jpg,Yes,0.2533,0.3415,0.7118,0.2672,8.9292,0.731,0.3356,0.8193,0.252,62.3621,0.7099
-deepface/tests/dataset/img59.jpg,deepface/tests/dataset/img61.jpg,Yes,0.192,0.4543,0.6196,0.4417,11.5466,0.9399,0.3558,0.8435,0.1808,54.8373,0.6014
-deepface/tests/dataset/img59.jpg,deepface/tests/dataset/img62.jpg,Yes,0.1123,0.3893,0.4738,0.2974,9.5874,0.7713,0.5393,1.0386,0.1934,55.9836,0.6219
-deepface/tests/dataset/img61.jpg,deepface/tests/dataset/img62.jpg,Yes,0.1251,0.253,0.5002,0.2245,8.1525,0.6701,0.4072,0.9024,0.1757,55.867,0.5928
-deepface/tests/dataset/img26.jpg,deepface/tests/dataset/img27.jpg,Yes,0.3059,0.5758,0.7822,0.3444,9.7537,0.8299,0.1815,0.6026,0.2396,69.4496,0.6922
-deepface/tests/dataset/img26.jpg,deepface/tests/dataset/img28.jpg,Yes,0.343,0.5503,0.8282,0.3556,10.2896,0.8433,0.1662,0.5766,0.205,60.0105,0.6403
-deepface/tests/dataset/img26.jpg,deepface/tests/dataset/img42.jpg,Yes,0.3852,0.542,0.8778,0.3278,9.7855,0.8097,0.2831,0.7524,0.2523,66.2702,0.7104
-deepface/tests/dataset/img26.jpg,deepface/tests/dataset/img43.jpg,Yes,0.3254,0.5271,0.8067,0.2825,8.887,0.7517,0.2876,0.7585,0.3443,79.1342,0.8299
-deepface/tests/dataset/img26.jpg,deepface/tests/dataset/img44.jpg,Yes,0.3645,0.5029,0.8539,0.2248,7.9975,0.6706,0.2646,0.7274,0.2572,68.2216,0.7173
-deepface/tests/dataset/img26.jpg,deepface/tests/dataset/img45.jpg,Yes,0.283,0.4775,0.7523,0.2537,8.5109,0.7124,0.3277,0.8096,0.2726,70.5843,0.7384
-deepface/tests/dataset/img26.jpg,deepface/tests/dataset/img46.jpg,Yes,0.447,0.5967,0.9456,0.4372,11.0907,0.9351,0.3544,0.8419,0.3079,73.7249,0.7848
-deepface/tests/dataset/img27.jpg,deepface/tests/dataset/img28.jpg,Yes,0.2847,0.5707,0.7546,0.2178,7.8688,0.6601,0.1205,0.491,0.232,66.1474,0.6811
-deepface/tests/dataset/img27.jpg,deepface/tests/dataset/img42.jpg,Yes,0.328,0.5946,0.8099,0.2829,8.8485,0.7523,0.3721,0.8627,0.2376,66.8304,0.6893
-deepface/tests/dataset/img27.jpg,deepface/tests/dataset/img43.jpg,Yes,0.3781,0.65,0.8696,0.2827,8.6093,0.7519,0.2004,0.633,0.2924,75.1537,0.7647
-deepface/tests/dataset/img27.jpg,deepface/tests/dataset/img44.jpg,Yes,0.3385,0.5968,0.8229,0.2597,8.3408,0.7207,0.2941,0.7669,0.2314,66.8603,0.6803
-deepface/tests/dataset/img27.jpg,deepface/tests/dataset/img45.jpg,Yes,0.2302,0.5087,0.6785,0.147,6.2958,0.5422,0.2088,0.6463,0.2035,63.0117,0.6379
-deepface/tests/dataset/img27.jpg,deepface/tests/dataset/img46.jpg,Yes,0.3461,0.6141,0.832,0.388,10.1318,0.881,0.264,0.7266,0.2241,65.3424,0.6694
-deepface/tests/dataset/img28.jpg,deepface/tests/dataset/img42.jpg,Yes,0.2442,0.4668,0.6988,0.1991,7.7026,0.631,0.2848,0.7547,0.2583,62.2885,0.7187
-deepface/tests/dataset/img28.jpg,deepface/tests/dataset/img43.jpg,Yes,0.2159,0.4542,0.657,0.2239,8.0122,0.6692,0.2194,0.6624,0.2833,67.7766,0.7527
-deepface/tests/dataset/img28.jpg,deepface/tests/dataset/img44.jpg,Yes,0.2802,0.4883,0.7486,0.1697,7.0317,0.5826,0.2753,0.742,0.2378,61.8227,0.6897
-deepface/tests/dataset/img28.jpg,deepface/tests/dataset/img45.jpg,Yes,0.3044,0.5286,0.7803,0.1768,7.1867,0.5946,0.267,0.7307,0.2683,66.1764,0.7326
-deepface/tests/dataset/img28.jpg,deepface/tests/dataset/img46.jpg,Yes,0.426,0.6222,0.923,0.3338,9.8004,0.817,0.2481,0.7044,0.3072,68.9752,0.7838
-deepface/tests/dataset/img42.jpg,deepface/tests/dataset/img43.jpg,Yes,0.2018,0.4174,0.6353,0.2418,8.227,0.6954,0.1678,0.5794,0.1483,49.1175,0.5446
-deepface/tests/dataset/img42.jpg,deepface/tests/dataset/img44.jpg,Yes,0.1685,0.3458,0.5805,0.119,5.8252,0.4879,0.2432,0.6975,0.0957,39.352,0.4375
-deepface/tests/dataset/img42.jpg,deepface/tests/dataset/img45.jpg,Yes,0.2004,0.4027,0.6331,0.1378,6.2772,0.5251,0.1982,0.6296,0.1742,53.3531,0.5903
-deepface/tests/dataset/img42.jpg,deepface/tests/dataset/img46.jpg,Yes,0.2253,0.4245,0.6713,0.1946,7.4093,0.6239,0.1761,0.5934,0.1568,49.1856,0.5601
-deepface/tests/dataset/img43.jpg,deepface/tests/dataset/img44.jpg,Yes,0.2049,0.4137,0.6402,0.2238,7.7899,0.6691,0.1748,0.5912,0.1553,51.4113,0.5573
-deepface/tests/dataset/img43.jpg,deepface/tests/dataset/img45.jpg,Yes,0.2298,0.4524,0.6779,0.2281,7.8811,0.6754,0.0531,0.3257,0.1801,55.7173,0.6001
-deepface/tests/dataset/img43.jpg,deepface/tests/dataset/img46.jpg,Yes,0.3731,0.5738,0.8638,0.3741,10.0121,0.865,0.1394,0.5281,0.2184,60.1165,0.6609
-deepface/tests/dataset/img44.jpg,deepface/tests/dataset/img45.jpg,Yes,0.1743,0.3671,0.5903,0.1052,5.4022,0.4587,0.1636,0.572,0.1275,46.7067,0.505
-deepface/tests/dataset/img44.jpg,deepface/tests/dataset/img46.jpg,Yes,0.2682,0.4468,0.7324,0.2225,7.7975,0.667,0.1984,0.6299,0.1569,50.7309,0.5602
-deepface/tests/dataset/img45.jpg,deepface/tests/dataset/img46.jpg,Yes,0.2818,0.486,0.7507,0.2239,7.8397,0.6692,0.1379,0.5252,0.193,56.6925,0.6213
-deepface/tests/dataset/img24.jpg,deepface/tests/dataset/img25.jpg,Yes,0.1197,0.2833,0.4893,0.1419,6.4307,0.5327,0.1666,0.5773,0.2083,60.7717,0.6454
-deepface/tests/dataset/img21.jpg,deepface/tests/dataset/img17.jpg,No,0.4907,0.531,0.9907,0.6285,13.4397,1.1212,0.807,1.2704,0.3363,67.5896,0.8201
-deepface/tests/dataset/img23.jpg,deepface/tests/dataset/img47.jpg,No,0.5671,0.563,1.065,0.6961,13.8325,1.1799,0.1334,0.5166,0.2008,56.6182,0.6337
-deepface/tests/dataset/img16.jpg,deepface/tests/dataset/img24.jpg,No,0.6046,0.5757,1.0997,0.9105,16.3487,1.3494,0.2078,0.6447,0.2218,57.4046,0.666
-deepface/tests/dataset/img50.jpg,deepface/tests/dataset/img16.jpg,No,0.7308,0.7317,1.2089,1.0868,17.7134,1.4743,0.3578,0.846,0.2254,57.4293,0.6715
-deepface/tests/dataset/img41.jpg,deepface/tests/dataset/img18.jpg,No,0.4197,0.569,0.9162,0.8173,13.1177,1.2786,0.6457,1.1364,0.3401,75.8425,0.8247
-deepface/tests/dataset/img12.jpg,deepface/tests/dataset/img32.jpg,No,0.7555,0.9708,1.2293,1.0896,18.6004,1.4762,0.4448,0.9432,0.2547,60.7653,0.7138
-deepface/tests/dataset/img51.jpg,deepface/tests/dataset/img26.jpg,No,0.506,0.5807,1.006,0.7329,14.3648,1.2107,0.2928,0.7652,0.2226,61.9764,0.6672
-deepface/tests/dataset/img13.jpg,deepface/tests/dataset/img27.jpg,No,0.688,0.9511,1.1731,0.9559,15.8763,1.3827,0.3366,0.8205,0.2086,63.7428,0.6459
-deepface/tests/dataset/img35.jpg,deepface/tests/dataset/img33.jpg,No,0.2131,0.3838,0.6528,0.5762,12.621,1.0735,0.3323,0.8153,0.2895,74.4074,0.7609
-deepface/tests/dataset/img34.jpg,deepface/tests/dataset/img44.jpg,No,0.7964,0.6879,1.262,0.9531,16.8504,1.3806,0.4968,0.9968,0.2565,63.8992,0.7162
-deepface/tests/dataset/img8.jpg,deepface/tests/dataset/img61.jpg,No,0.8548,0.6996,1.3075,0.9485,16.2825,1.3773,0.6479,1.1383,0.259,64.0582,0.7198
-deepface/tests/dataset/img53.jpg,deepface/tests/dataset/img4.jpg,No,0.5862,0.6454,1.0828,0.8624,16.0416,1.3133,0.3185,0.7982,0.2397,65.712,0.6924
-deepface/tests/dataset/img54.jpg,deepface/tests/dataset/img2.jpg,No,0.6948,0.9246,1.1788,0.9568,16.4217,1.3833,0.3481,0.8344,0.2497,64.7938,0.7067
-deepface/tests/dataset/img43.jpg,deepface/tests/dataset/img24.jpg,No,0.7757,0.7407,1.2456,1.0007,16.8769,1.4147,0.4194,0.9159,0.3961,77.6798,0.8901
-deepface/tests/dataset/img6.jpg,deepface/tests/dataset/img20.jpg,No,0.6784,0.7154,1.1648,0.9864,16.5342,1.4045,0.2043,0.6392,0.2499,67.3658,0.707
-deepface/tests/dataset/img40.jpg,deepface/tests/dataset/img20.jpg,No,0.474,0.4904,0.9736,0.7949,14.8341,1.2609,0.4776,0.9773,0.2192,56.6904,0.6621
-deepface/tests/dataset/img29.jpg,deepface/tests/dataset/img49.jpg,No,0.725,0.7156,1.2041,1.2676,18.7008,1.5922,0.3254,0.8068,0.1968,58.1537,0.6274
-deepface/tests/dataset/img38.jpg,deepface/tests/dataset/img29.jpg,No,0.5496,0.5428,1.0484,1.1766,18.8394,1.534,0.2956,0.769,0.323,68.2188,0.8037
-deepface/tests/dataset/img7.jpg,deepface/tests/dataset/img20.jpg,No,0.7791,0.7506,1.2482,0.945,16.0728,1.3748,0.2922,0.7645,0.2063,58.285,0.6424
-deepface/tests/dataset/img54.jpg,deepface/tests/dataset/img10.jpg,No,0.6852,0.8904,1.1707,0.9223,16.2459,1.3582,0.3508,0.8377,0.2699,67.3228,0.7347
-deepface/tests/dataset/img17.jpg,deepface/tests/dataset/img43.jpg,No,0.7785,0.7344,1.2478,0.8234,15.1735,1.2833,0.8461,1.3009,0.3715,74.2351,0.862
-deepface/tests/dataset/img56.jpg,deepface/tests/dataset/img47.jpg,No,0.5798,0.6885,1.0769,0.9515,16.1507,1.3795,0.2527,0.7109,0.1453,51.4537,0.5391
-deepface/tests/dataset/img10.jpg,deepface/tests/dataset/img15.jpg,No,0.7144,1.0202,1.1953,1.1267,17.5833,1.5012,0.7384,1.2152,0.404,87.858,0.8989
-deepface/tests/dataset/img21.jpg,deepface/tests/dataset/img61.jpg,No,0.5642,0.5883,1.0623,0.7305,14.4227,1.2088,0.5523,1.051,0.3206,73.1845,0.8008
-deepface/tests/dataset/img34.jpg,deepface/tests/dataset/img47.jpg,No,0.6442,0.5952,1.1351,1.0884,17.8754,1.4754,0.6225,1.1158,0.2549,64.7586,0.714
-deepface/tests/dataset/img11.jpg,deepface/tests/dataset/img51.jpg,No,0.5459,0.6938,1.0448,0.7452,14.4984,1.2208,0.1807,0.6012,0.179,58.3078,0.5983
-deepface/tests/dataset/img4.jpg,deepface/tests/dataset/img14.jpg,No,0.7235,0.8162,1.2029,1.0599,16.8526,1.4559,0.4242,0.9211,0.26,72.3704,0.7211
-deepface/tests/dataset/img38.jpg,deepface/tests/dataset/img14.jpg,No,0.5044,0.637,1.0044,0.9856,16.5161,1.404,0.2733,0.7393,0.354,80.6472,0.8415
-deepface/tests/dataset/img19.jpg,deepface/tests/dataset/img47.jpg,No,0.5752,0.6917,1.0726,1.0042,17.1669,1.4172,0.354,0.8414,0.1709,59.1711,0.5846
-deepface/tests/dataset/img55.jpg,deepface/tests/dataset/img14.jpg,No,0.6473,0.7275,1.1378,0.9052,15.7543,1.3455,0.2127,0.6523,0.2293,67.2542,0.6771
-deepface/tests/dataset/img20.jpg,deepface/tests/dataset/img33.jpg,No,0.4886,0.541,0.9885,0.9202,16.051,1.3566,0.6114,1.1058,0.253,62.6318,0.7113
-deepface/tests/dataset/img39.jpg,deepface/tests/dataset/img62.jpg,No,0.4634,0.5606,0.9627,0.8783,16.0858,1.3254,0.7776,1.2471,0.329,70.4788,0.8112
-deepface/tests/dataset/img54.jpg,deepface/tests/dataset/img58.jpg,No,0.6048,0.9477,1.0998,0.8084,15.0301,1.2716,0.6403,1.1316,0.3272,69.1393,0.809
-deepface/tests/dataset/img11.jpg,deepface/tests/dataset/img9.jpg,No,0.6643,0.7784,1.1527,0.899,16.0335,1.3409,0.2452,0.7002,0.1639,56.0631,0.5725
-deepface/tests/dataset/img6.jpg,deepface/tests/dataset/img46.jpg,No,0.5766,0.7054,1.0738,0.9264,15.9036,1.3611,0.1341,0.5179,0.2298,64.5324,0.6779
-deepface/tests/dataset/img7.jpg,deepface/tests/dataset/img59.jpg,No,0.7679,0.8729,1.2393,1.0242,17.2778,1.4312,0.7789,1.2481,0.3103,69.694,0.7878
-deepface/tests/dataset/img7.jpg,deepface/tests/dataset/img35.jpg,No,0.8227,0.8096,1.2827,1.0357,16.7157,1.4392,0.4864,0.9863,0.2401,68.9468,0.693
-deepface/tests/dataset/img5.jpg,deepface/tests/dataset/img19.jpg,No,0.7052,0.752,1.1876,0.9084,16.1781,1.3479,0.2462,0.7016,0.1449,58.8831,0.5384
-deepface/tests/dataset/img55.jpg,deepface/tests/dataset/img8.jpg,No,0.4891,0.5451,0.989,0.7908,14.9832,1.2576,0.2408,0.6939,0.2341,63.666,0.6843
-deepface/tests/dataset/img22.jpg,deepface/tests/dataset/img51.jpg,No,0.5201,0.5378,1.0199,0.6262,13.2133,1.1191,0.1456,0.5397,0.2985,60.8239,0.7726
-deepface/tests/dataset/img4.jpg,deepface/tests/dataset/img15.jpg,No,0.7147,0.9872,1.1956,1.0641,17.2349,1.4588,0.6229,1.1162,0.4049,89.7221,0.8998
-deepface/tests/dataset/img19.jpg,deepface/tests/dataset/img29.jpg,No,0.3605,0.5646,0.8492,0.6901,14.6314,1.1748,0.1803,0.6005,0.2709,71.9655,0.7361
-deepface/tests/dataset/img20.jpg,deepface/tests/dataset/img28.jpg,No,0.5807,0.6843,1.0777,0.8133,15.3844,1.2754,0.1274,0.5048,0.1841,53.6094,0.6067
-deepface/tests/dataset/img55.jpg,deepface/tests/dataset/img13.jpg,No,0.6366,0.8086,1.1283,0.8832,15.8044,1.3291,0.3343,0.8177,0.177,57.373,0.5949
-deepface/tests/dataset/img34.jpg,deepface/tests/dataset/img22.jpg,No,0.7842,0.6655,1.2523,1.137,18.5595,1.508,0.4797,0.9795,0.2457,56.695,0.7011
-deepface/tests/dataset/img67.jpg,deepface/tests/dataset/img58.jpg,No,0.5051,0.8463,1.0051,0.8713,16.0723,1.3201,0.5281,1.0277,0.276,67.6933,0.743
-deepface/tests/dataset/img18.jpg,deepface/tests/dataset/img9.jpg,No,0.7493,0.7683,1.2242,1.0774,17.7057,1.4679,0.5343,1.0337,0.2113,62.0197,0.65
-deepface/tests/dataset/img11.jpg,deepface/tests/dataset/img58.jpg,No,0.7495,1.0309,1.2243,1.0359,16.9461,1.4394,0.6411,1.1324,0.2259,65.3131,0.6721
-deepface/tests/dataset/img18.jpg,deepface/tests/dataset/img42.jpg,No,0.8335,0.8332,1.2911,1.0838,17.9617,1.4723,0.4051,0.9001,0.2449,66.4075,0.6999
-deepface/tests/dataset/img18.jpg,deepface/tests/dataset/img13.jpg,No,0.476,0.7428,0.9757,1.1589,18.2018,1.5224,0.306,0.7823,0.1879,59.4531,0.6129
-deepface/tests/dataset/img4.jpg,deepface/tests/dataset/img32.jpg,No,0.7116,0.8739,1.193,1.0402,17.6777,1.4424,0.6456,1.1363,0.2896,71.6141,0.761
-deepface/tests/dataset/img67.jpg,deepface/tests/dataset/img37.jpg,No,0.4644,0.652,0.9638,0.6683,14.5099,1.1561,0.2355,0.6862,0.2475,61.9234,0.7036
-deepface/tests/dataset/img41.jpg,deepface/tests/dataset/img7.jpg,No,0.8444,0.7812,1.2666,0.9357,16.3278,1.368,0.4702,1.459,0.4919,67.9214,0.7892
-deepface/tests/dataset/img11.jpg,deepface/tests/dataset/img27.jpg,No,0.6496,0.8811,1.1398,0.9364,16.0727,1.3685,0.2416,0.6951,0.2127,66.7336,0.6523
-deepface/tests/dataset/img20.jpg,deepface/tests/dataset/img47.jpg,No,0.6418,0.6011,1.1329,1.0579,16.9991,1.4546,0.31,0.7874,0.1754,54.6104,0.5924
-deepface/tests/dataset/img2.jpg,deepface/tests/dataset/img44.jpg,No,0.4815,0.6806,0.9814,0.7396,14.1679,1.2162,0.2009,0.6338,0.1836,57.4368,0.606
-deepface/tests/dataset/img28.jpg,deepface/tests/dataset/img24.jpg,No,0.7851,0.7588,1.2531,0.9406,16.8964,1.3715,0.5353,1.0347,0.2609,60.6589,0.7224
-deepface/tests/dataset/img67.jpg,deepface/tests/dataset/img43.jpg,No,0.691,0.8328,1.1756,0.9621,16.9417,1.3872,0.3176,0.797,0.3072,72.9213,0.7838
-deepface/tests/dataset/img18.jpg,deepface/tests/dataset/img51.jpg,No,0.668,0.7024,1.1558,1.1051,17.8105,1.4867,0.2508,0.7083,0.1882,58.3932,0.6135
-deepface/tests/dataset/img11.jpg,deepface/tests/dataset/img24.jpg,No,0.79,0.801,1.257,1.1173,18.2579,1.4949,0.3437,0.829,0.3096,74.5014,0.7869
-deepface/tests/dataset/img67.jpg,deepface/tests/dataset/img29.jpg,No,0.5389,0.6762,1.0382,0.8354,16.2507,1.2926,0.1501,0.5479,0.2668,63.7773,0.7305
-deepface/tests/dataset/img29.jpg,deepface/tests/dataset/img59.jpg,No,0.4237,0.6225,0.9205,0.5002,12.4131,1.0002,0.6375,1.1292,0.2637,58.2849,0.7262
-deepface/tests/dataset/img41.jpg,deepface/tests/dataset/img24.jpg,No,0.5431,0.5391,1.0422,1.1194,18.4041,1.4962,0.8286,1.2873,0.4458,74.1332,0.9442
-deepface/tests/dataset/img35.jpg,deepface/tests/dataset/img27.jpg,No,0.821,0.9129,1.2814,0.964,15.9831,1.3885,0.4812,0.9811,0.3061,80.9221,0.7824
-deepface/tests/dataset/img39.jpg,deepface/tests/dataset/img67.jpg,No,0.5513,0.7255,1.0501,0.9839,17.4219,1.4028,0.8181,1.2792,0.2914,66.5717,0.7634
-deepface/tests/dataset/img39.jpg,deepface/tests/dataset/img12.jpg,No,0.6435,0.8102,1.1344,0.7661,15.2245,1.2378,0.7472,1.2224,0.2716,61.7006,0.737
-deepface/tests/dataset/img41.jpg,deepface/tests/dataset/img46.jpg,No,0.8116,0.7634,1.2028,1.1264,17.9427,1.5009,0.9219,1.3578,0.3511,70.3501,0.838
-deepface/tests/dataset/img32.jpg,deepface/tests/dataset/img27.jpg,No,0.7197,0.9593,1.1997,0.7295,14.4944,1.2079,0.5619,1.0601,0.2725,70.5338,0.7382
-deepface/tests/dataset/img40.jpg,deepface/tests/dataset/img11.jpg,No,0.7205,0.7563,1.2004,0.9367,16.3131,1.3687,0.5427,1.0418,0.186,59.4748,0.61
-deepface/tests/dataset/img41.jpg,deepface/tests/dataset/img22.jpg,No,0.5579,0.6466,1.2024,1.0076,17.2122,1.4196,0.7998,1.2648,0.392,65.4579,0.8854
-deepface/tests/dataset/img2.jpg,deepface/tests/dataset/img35.jpg,No,0.8303,0.9037,1.2887,1.0988,17.1897,1.4824,0.498,0.998,0.2992,78.1653,0.7736
-deepface/tests/dataset/img5.jpg,deepface/tests/dataset/img45.jpg,No,0.5247,0.6013,1.0244,0.8827,15.3713,1.3287,0.218,0.6603,0.2322,72.2019,0.6814
-deepface/tests/dataset/img58.jpg,deepface/tests/dataset/img59.jpg,No,0.5937,0.9226,1.0896,0.9931,16.9142,1.4093,0.3525,0.8396,0.3095,68.0277,0.7868
-deepface/tests/dataset/img40.jpg,deepface/tests/dataset/img45.jpg,No,0.772,0.6976,1.2426,1.0516,17.0626,1.4503,0.5487,1.0475,0.2628,63.7285,0.725
-deepface/tests/dataset/img41.jpg,deepface/tests/dataset/img3.jpg,No,0.6417,0.6822,1.1329,0.832,15.8921,1.29,1.0374,1.4404,0.2312,54.5718,0.68
-deepface/tests/dataset/img40.jpg,deepface/tests/dataset/img67.jpg,No,0.4138,0.5942,0.9098,0.948,16.9509,1.3769,0.5121,1.012,0.2455,61.9071,0.7008
-deepface/tests/dataset/img4.jpg,deepface/tests/dataset/img50.jpg,No,0.5776,0.6934,1.0748,0.816,15.3649,1.2775,0.3515,0.8385,0.2072,61.657,0.6437
-deepface/tests/dataset/img67.jpg,deepface/tests/dataset/img47.jpg,No,0.5726,0.692,1.0701,0.9987,17.2907,1.4133,0.4099,0.9054,0.1723,55.0701,0.587
-deepface/tests/dataset/img55.jpg,deepface/tests/dataset/img20.jpg,No,0.684,0.6408,1.1696,0.924,16.3035,1.3594,0.2156,0.6566,0.2111,61.919,0.6498
-deepface/tests/dataset/img13.jpg,deepface/tests/dataset/img33.jpg,No,0.4625,0.7042,0.9617,0.8709,15.4791,1.3198,0.5609,1.0591,0.3643,76.6864,0.8536
-deepface/tests/dataset/img41.jpg,deepface/tests/dataset/img58.jpg,No,0.5732,0.8464,1.0707,0.7511,16.6216,1.4011,0.5091,1.009,0.3653,71.3439,0.8548
-deepface/tests/dataset/img19.jpg,deepface/tests/dataset/img48.jpg,No,0.8186,0.8431,1.2795,1.1082,17.769,1.4888,0.3914,0.8848,0.2363,68.307,0.6875
-deepface/tests/dataset/img18.jpg,deepface/tests/dataset/img49.jpg,No,0.6614,0.7617,1.1501,0.9935,16.5922,1.4096,0.427,0.9241,0.28,73.8384,0.7483
-deepface/tests/dataset/img10.jpg,deepface/tests/dataset/img19.jpg,No,0.603,0.7998,1.0982,0.9508,16.8085,1.379,0.3546,0.8422,0.2352,69.7597,0.6859
-deepface/tests/dataset/img48.jpg,deepface/tests/dataset/img17.jpg,No,0.8174,0.6679,1.2786,0.922,15.8462,1.3579,0.7438,1.2196,0.2545,59.7077,0.7134
-deepface/tests/dataset/img55.jpg,deepface/tests/dataset/img2.jpg,No,0.6454,0.7751,1.1362,1.0674,17.3381,1.4611,0.1279,0.5058,0.1983,61.7554,0.6298
-deepface/tests/dataset/img41.jpg,deepface/tests/dataset/img48.jpg,No,0.7325,0.7072,1.2605,0.8198,15.0575,1.2805,0.9352,1.3676,0.3504,69.8577,0.8371
-deepface/tests/dataset/img30.jpg,deepface/tests/dataset/img44.jpg,No,0.8834,0.7196,1.3292,0.8683,15.5513,1.3178,0.563,1.0611,0.363,75.7833,0.8521
-deepface/tests/dataset/img6.jpg,deepface/tests/dataset/img29.jpg,No,0.7666,0.7464,1.2382,1.0057,17.0345,1.4183,0.3434,0.8287,0.2411,64.6435,0.6943
-deepface/tests/dataset/img19.jpg,deepface/tests/dataset/img26.jpg,No,0.6542,0.7763,1.1439,0.9204,16.7702,1.3568,0.2292,0.677,0.262,73.7273,0.7239
-deepface/tests/dataset/img29.jpg,deepface/tests/dataset/img50.jpg,No,0.6879,0.692,1.1729,1.3134,19.7708,1.6207,0.5038,1.0038,0.2577,54.3931,0.7179
-deepface/tests/dataset/img35.jpg,deepface/tests/dataset/img49.jpg,No,0.8339,0.8186,1.2915,1.2099,17.7753,1.5555,0.5957,1.0915,0.3315,82.3474,0.8142
-deepface/tests/dataset/img22.jpg,deepface/tests/dataset/img28.jpg,No,0.6313,0.7037,1.1236,0.8177,15.5314,1.2789,0.2031,0.6373,0.2271,55.2529,0.6739
-deepface/tests/dataset/img21.jpg,deepface/tests/dataset/img16.jpg,No,0.5678,0.6114,1.0657,0.6376,13.417,1.1293,0.4173,0.9136,0.2696,65.0241,0.7343
-deepface/tests/dataset/img21.jpg,deepface/tests/dataset/img9.jpg,No,0.7653,0.7211,1.2372,1.0502,17.1485,1.4493,0.5726,1.0701,0.3059,68.2225,0.7822
-deepface/tests/dataset/img2.jpg,deepface/tests/dataset/img22.jpg,No,0.6866,0.7895,1.1718,1.0005,16.6324,1.4145,0.1955,0.6253,0.3061,69.9331,0.7824
-deepface/tests/dataset/img12.jpg,deepface/tests/dataset/img29.jpg,No,0.78,0.8337,1.249,1.1016,18.4797,1.4843,0.3404,0.8251,0.3293,67.3331,0.8115
-deepface/tests/dataset/img6.jpg,deepface/tests/dataset/img37.jpg,No,0.7532,0.7788,1.2273,1.0976,17.7567,1.4816,0.2647,0.7275,0.331,74.5559,0.8137
-deepface/tests/dataset/img6.jpg,deepface/tests/dataset/img16.jpg,No,0.7516,0.7581,1.226,1.0332,16.9971,1.4375,0.3815,0.8735,0.2859,72.0572,0.7561
-deepface/tests/dataset/img38.jpg,deepface/tests/dataset/img33.jpg,No,0.4588,0.5085,0.958,1.2465,19.0695,1.5789,0.657,1.1463,0.3722,76.6896,0.8628
-deepface/tests/dataset/img35.jpg,deepface/tests/dataset/img32.jpg,No,0.2651,0.5459,0.7282,0.5427,12.6429,1.0418,0.409,0.9045,0.2546,69.5802,0.7136
-deepface/tests/dataset/img54.jpg,deepface/tests/dataset/img48.jpg,No,0.4528,0.678,0.9516,0.8385,15.166,1.295,0.2238,0.669,0.218,56.5099,0.6603
-deepface/tests/dataset/img1.jpg,deepface/tests/dataset/img23.jpg,No,0.5305,0.5523,1.03,0.7766,14.6983,1.2463,0.1967,0.6272,0.2144,53.347,0.6549
-deepface/tests/dataset/img39.jpg,deepface/tests/dataset/img33.jpg,No,0.5132,0.6067,1.0131,1.1197,17.8246,1.4965,0.2379,0.6898,0.2301,55.7862,0.6783
-deepface/tests/dataset/img3.jpg,deepface/tests/dataset/img48.jpg,No,0.4123,0.5581,0.908,0.7879,14.8183,1.2553,0.2125,0.6519,0.2177,56.6639,0.6598
-deepface/tests/dataset/img43.jpg,deepface/tests/dataset/img25.jpg,No,0.7819,0.7991,1.2505,0.9007,15.601,1.3422,0.4363,0.9341,0.3555,81.219,0.8432
-deepface/tests/dataset/img14.jpg,deepface/tests/dataset/img9.jpg,No,0.7257,0.7829,1.2047,0.8679,15.1696,1.3175,0.5752,1.0725,0.2493,67.0315,0.7061
-deepface/tests/dataset/img6.jpg,deepface/tests/dataset/img47.jpg,No,0.5391,0.6276,1.0383,0.7885,14.6406,1.2558,0.1013,0.4501,0.1756,57.5202,0.5926
-deepface/tests/dataset/img18.jpg,deepface/tests/dataset/img28.jpg,No,0.8293,0.8828,1.2878,1.1151,18.3899,1.4934,0.497,0.997,0.2323,64.8263,0.6816
-deepface/tests/dataset/img7.jpg,deepface/tests/dataset/img57.jpg,No,0.7468,0.815,1.2221,1.1241,17.3821,1.4994,0.6916,1.1761,0.2244,68.912,0.6699
-deepface/tests/dataset/img48.jpg,deepface/tests/dataset/img26.jpg,No,0.5877,0.646,1.0842,0.9734,16.2582,1.3953,0.3102,0.7876,0.2059,60.3497,0.6417
-deepface/tests/dataset/img19.jpg,deepface/tests/dataset/img34.jpg,No,0.2957,0.5193,0.7691,0.5281,12.9854,1.0277,0.5987,1.0943,0.2628,71.5029,0.725
-deepface/tests/dataset/img41.jpg,deepface/tests/dataset/img37.jpg,No,0.4337,0.5351,0.9314,0.8568,16.0356,1.309,0.684,1.1696,0.3654,65.8114,0.8548
-deepface/tests/dataset/img1.jpg,deepface/tests/dataset/img32.jpg,No,0.6985,0.8184,1.182,0.9682,16.9113,1.3915,0.5654,1.0634,0.3173,65.953,0.7967
-deepface/tests/dataset/img12.jpg,deepface/tests/dataset/img57.jpg,No,0.6424,0.8305,1.1335,0.8361,15.6851,1.2931,0.5927,1.0888,0.2943,77.8234,0.7672
-deepface/tests/dataset/img55.jpg,deepface/tests/dataset/img5.jpg,No,0.662,0.6012,1.1507,0.9931,16.5792,1.4093,0.137,0.5234,0.2182,70.8567,0.6606
-deepface/tests/dataset/img47.jpg,deepface/tests/dataset/img61.jpg,No,0.6896,0.603,1.1744,0.98,16.5069,1.4,0.5598,1.0581,0.187,57.8252,0.6115
-deepface/tests/dataset/img33.jpg,deepface/tests/dataset/img49.jpg,No,0.8253,0.7753,1.2848,1.0329,16.5833,1.4373,0.6695,1.1572,0.1992,58.9069,0.6313
-deepface/tests/dataset/img54.jpg,deepface/tests/dataset/img1.jpg,No,0.5922,0.7522,1.0883,0.9398,16.3902,1.371,0.2515,0.7092,0.2836,62.9648,0.7532
-deepface/tests/dataset/img29.jpg,deepface/tests/dataset/img25.jpg,No,0.5458,0.5846,1.0448,0.9074,16.167,1.3472,0.622,1.1153,0.2743,68.4542,0.7407
-deepface/tests/dataset/img55.jpg,deepface/tests/dataset/img67.jpg,No,0.6649,0.7541,1.1531,1.1444,18.95,1.5129,0.3094,0.7866,0.2195,63.9684,0.6625
-deepface/tests/dataset/img53.jpg,deepface/tests/dataset/img30.jpg,No,0.9492,0.7325,1.3778,0.9241,16.5521,1.3595,0.5533,1.052,0.2955,62.208,0.7687
-deepface/tests/dataset/img6.jpg,deepface/tests/dataset/img25.jpg,No,0.8285,0.8131,1.2872,0.8051,14.8877,1.2689,0.4267,0.9238,0.3226,79.803,0.8032
-deepface/tests/dataset/img4.jpg,deepface/tests/dataset/img43.jpg,No,0.6285,0.7443,1.1211,0.838,15.1848,1.2946,0.212,0.6511,0.2685,71.4046,0.7329
-deepface/tests/dataset/img39.jpg,deepface/tests/dataset/img27.jpg,No,0.7176,0.8685,1.198,0.8199,14.9449,1.2805,0.8286,1.2873,0.285,71.6832,0.755
-deepface/tests/dataset/img36.jpg,deepface/tests/dataset/img23.jpg,No,0.6223,0.5866,1.1156,1.0693,17.5747,1.4624,0.4266,0.9237,0.32,58.9248,0.7999
-deepface/tests/dataset/img4.jpg,deepface/tests/dataset/img45.jpg,No,0.6021,0.7106,1.0973,0.9407,16.2744,1.3716,0.2162,0.6576,0.2166,64.3341,0.6582
-deepface/tests/dataset/img38.jpg,deepface/tests/dataset/img19.jpg,No,0.356,0.5607,0.8437,0.9843,17.485,1.403,0.1858,0.6097,0.2867,75.4126,0.7572
-deepface/tests/dataset/img55.jpg,deepface/tests/dataset/img17.jpg,No,0.7135,0.6076,1.1946,0.944,16.691,1.374,0.7449,1.2205,0.2951,70.5113,0.7682
-deepface/tests/dataset/img9.jpg,deepface/tests/dataset/img59.jpg,No,0.8449,0.8766,1.2999,1.1333,18.3376,1.5055,0.8844,1.33,0.3088,67.5783,0.7859
-deepface/tests/dataset/img58.jpg,deepface/tests/dataset/img49.jpg,No,0.5999,0.8901,1.0953,0.9147,15.3098,1.3526,0.4925,0.9925,0.2266,63.0835,0.6733
-deepface/tests/dataset/img56.jpg,deepface/tests/dataset/img59.jpg,No,0.7694,0.9166,1.2405,1.0062,17.304,1.4186,0.8703,1.3193,0.2966,70.5446,0.7702
-deepface/tests/dataset/img4.jpg,deepface/tests/dataset/img8.jpg,No,0.5753,0.6478,1.0727,0.842,15.2912,1.2977,0.3808,0.8727,0.1878,59.2,0.6129
-deepface/tests/dataset/img16.jpg,deepface/tests/dataset/img25.jpg,No,0.5927,0.6271,1.0887,0.9862,16.5907,1.4044,0.286,0.7563,0.1702,56.0079,0.5835
-deepface/tests/dataset/img50.jpg,deepface/tests/dataset/img45.jpg,No,0.5692,0.6912,1.067,0.8581,15.6737,1.3101,0.3278,0.8097,0.2383,60.6426,0.6903
-deepface/tests/dataset/img38.jpg,deepface/tests/dataset/img31.jpg,No,0.4739,0.4751,0.9736,1.1148,18.1862,1.4932,0.6661,1.1542,0.331,70.516,0.8136
-deepface/tests/dataset/img13.jpg,deepface/tests/dataset/img51.jpg,No,0.5639,0.7621,1.062,0.8047,14.7361,1.2686,0.4,0.8945,0.2308,60.6072,0.6795
-deepface/tests/dataset/img1.jpg,deepface/tests/dataset/img33.jpg,No,0.7127,0.6418,1.1939,0.9433,16.1933,1.3736,0.6509,1.1409,0.2684,62.7672,0.7326
-deepface/tests/dataset/img53.jpg,deepface/tests/dataset/img16.jpg,No,0.8344,0.7073,1.2918,0.9023,16.3918,1.3433,0.4153,0.9114,0.3045,65.6394,0.7803
-deepface/tests/dataset/img53.jpg,deepface/tests/dataset/img23.jpg,No,0.4644,0.5199,0.9637,0.7267,14.6939,1.2056,0.1784,0.5973,0.2774,55.6833,0.7448
\ No newline at end of file
diff --git a/tests/dataset/img1.jpg b/tests/dataset/img1.jpg
deleted file mode 100644
index 7ceab4f..0000000
Binary files a/tests/dataset/img1.jpg and /dev/null differ
diff --git a/tests/dataset/img10.jpg b/tests/dataset/img10.jpg
deleted file mode 100644
index 267d401..0000000
Binary files a/tests/dataset/img10.jpg and /dev/null differ
diff --git a/tests/dataset/img11.jpg b/tests/dataset/img11.jpg
deleted file mode 100644
index 0ebe708..0000000
Binary files a/tests/dataset/img11.jpg and /dev/null differ
diff --git a/tests/dataset/img11_reflection.jpg b/tests/dataset/img11_reflection.jpg
deleted file mode 100644
index 24b29f2..0000000
Binary files a/tests/dataset/img11_reflection.jpg and /dev/null differ
diff --git a/tests/dataset/img12.jpg b/tests/dataset/img12.jpg
deleted file mode 100644
index e85fac0..0000000
Binary files a/tests/dataset/img12.jpg and /dev/null differ
diff --git a/tests/dataset/img13.jpg b/tests/dataset/img13.jpg
deleted file mode 100644
index 74ed70a..0000000
Binary files a/tests/dataset/img13.jpg and /dev/null differ
diff --git a/tests/dataset/img14.jpg b/tests/dataset/img14.jpg
deleted file mode 100644
index 1f527fb..0000000
Binary files a/tests/dataset/img14.jpg and /dev/null differ
diff --git a/tests/dataset/img15.jpg b/tests/dataset/img15.jpg
deleted file mode 100644
index c3e85a3..0000000
Binary files a/tests/dataset/img15.jpg and /dev/null differ
diff --git a/tests/dataset/img16.jpg b/tests/dataset/img16.jpg
deleted file mode 100644
index 0774887..0000000
Binary files a/tests/dataset/img16.jpg and /dev/null differ
diff --git a/tests/dataset/img17.jpg b/tests/dataset/img17.jpg
deleted file mode 100644
index 7d37ff5..0000000
Binary files a/tests/dataset/img17.jpg and /dev/null differ
diff --git a/tests/dataset/img18.jpg b/tests/dataset/img18.jpg
deleted file mode 100644
index 6141407..0000000
Binary files a/tests/dataset/img18.jpg and /dev/null differ
diff --git a/tests/dataset/img19.jpg b/tests/dataset/img19.jpg
deleted file mode 100644
index 45433dc..0000000
Binary files a/tests/dataset/img19.jpg and /dev/null differ
diff --git a/tests/dataset/img2.jpg b/tests/dataset/img2.jpg
deleted file mode 100644
index 05683ea..0000000
Binary files a/tests/dataset/img2.jpg and /dev/null differ
diff --git a/tests/dataset/img20.jpg b/tests/dataset/img20.jpg
deleted file mode 100644
index 6e85175..0000000
Binary files a/tests/dataset/img20.jpg and /dev/null differ
diff --git a/tests/dataset/img21.jpg b/tests/dataset/img21.jpg
deleted file mode 100644
index 1f90aa4..0000000
Binary files a/tests/dataset/img21.jpg and /dev/null differ
diff --git a/tests/dataset/img22.jpg b/tests/dataset/img22.jpg
deleted file mode 100644
index 0d18c94..0000000
Binary files a/tests/dataset/img22.jpg and /dev/null differ
diff --git a/tests/dataset/img23.jpg b/tests/dataset/img23.jpg
deleted file mode 100644
index c96eb04..0000000
Binary files a/tests/dataset/img23.jpg and /dev/null differ
diff --git a/tests/dataset/img24.jpg b/tests/dataset/img24.jpg
deleted file mode 100644
index d134ed5..0000000
Binary files a/tests/dataset/img24.jpg and /dev/null differ
diff --git a/tests/dataset/img25.jpg b/tests/dataset/img25.jpg
deleted file mode 100644
index d2d867c..0000000
Binary files a/tests/dataset/img25.jpg and /dev/null differ
diff --git a/tests/dataset/img26.jpg b/tests/dataset/img26.jpg
deleted file mode 100644
index cad2093..0000000
Binary files a/tests/dataset/img26.jpg and /dev/null differ
diff --git a/tests/dataset/img27.jpg b/tests/dataset/img27.jpg
deleted file mode 100644
index b0bc802..0000000
Binary files a/tests/dataset/img27.jpg and /dev/null differ
diff --git a/tests/dataset/img28.jpg b/tests/dataset/img28.jpg
deleted file mode 100644
index a643428..0000000
Binary files a/tests/dataset/img28.jpg and /dev/null differ
diff --git a/tests/dataset/img29.jpg b/tests/dataset/img29.jpg
deleted file mode 100644
index 8383924..0000000
Binary files a/tests/dataset/img29.jpg and /dev/null differ
diff --git a/tests/dataset/img3.jpg b/tests/dataset/img3.jpg
deleted file mode 100644
index 08e8dcc..0000000
Binary files a/tests/dataset/img3.jpg and /dev/null differ
diff --git a/tests/dataset/img30.jpg b/tests/dataset/img30.jpg
deleted file mode 100644
index f271132..0000000
Binary files a/tests/dataset/img30.jpg and /dev/null differ
diff --git a/tests/dataset/img31.jpg b/tests/dataset/img31.jpg
deleted file mode 100644
index 342d6fd..0000000
Binary files a/tests/dataset/img31.jpg and /dev/null differ
diff --git a/tests/dataset/img32.jpg b/tests/dataset/img32.jpg
deleted file mode 100644
index 8df9dd7..0000000
Binary files a/tests/dataset/img32.jpg and /dev/null differ
diff --git a/tests/dataset/img33.jpg b/tests/dataset/img33.jpg
deleted file mode 100644
index c6412be..0000000
Binary files a/tests/dataset/img33.jpg and /dev/null differ
diff --git a/tests/dataset/img34.jpg b/tests/dataset/img34.jpg
deleted file mode 100644
index 47ea382..0000000
Binary files a/tests/dataset/img34.jpg and /dev/null differ
diff --git a/tests/dataset/img35.jpg b/tests/dataset/img35.jpg
deleted file mode 100644
index b20c97d..0000000
Binary files a/tests/dataset/img35.jpg and /dev/null differ
diff --git a/tests/dataset/img36.jpg b/tests/dataset/img36.jpg
deleted file mode 100644
index 0d4ea95..0000000
Binary files a/tests/dataset/img36.jpg and /dev/null differ
diff --git a/tests/dataset/img37.jpg b/tests/dataset/img37.jpg
deleted file mode 100644
index 9026e8c..0000000
Binary files a/tests/dataset/img37.jpg and /dev/null differ
diff --git a/tests/dataset/img38.jpg b/tests/dataset/img38.jpg
deleted file mode 100644
index b9bb7f1..0000000
Binary files a/tests/dataset/img38.jpg and /dev/null differ
diff --git a/tests/dataset/img39.jpg b/tests/dataset/img39.jpg
deleted file mode 100644
index 751b664..0000000
Binary files a/tests/dataset/img39.jpg and /dev/null differ
diff --git a/tests/dataset/img4.jpg b/tests/dataset/img4.jpg
deleted file mode 100644
index b767bd0..0000000
Binary files a/tests/dataset/img4.jpg and /dev/null differ
diff --git a/tests/dataset/img40.jpg b/tests/dataset/img40.jpg
deleted file mode 100644
index 71ebe3b..0000000
Binary files a/tests/dataset/img40.jpg and /dev/null differ
diff --git a/tests/dataset/img41.jpg b/tests/dataset/img41.jpg
deleted file mode 100644
index cad0827..0000000
Binary files a/tests/dataset/img41.jpg and /dev/null differ
diff --git a/tests/dataset/img42.jpg b/tests/dataset/img42.jpg
deleted file mode 100644
index 1d42636..0000000
Binary files a/tests/dataset/img42.jpg and /dev/null differ
diff --git a/tests/dataset/img43.jpg b/tests/dataset/img43.jpg
deleted file mode 100644
index 5aaedfe..0000000
Binary files a/tests/dataset/img43.jpg and /dev/null differ
diff --git a/tests/dataset/img44.jpg b/tests/dataset/img44.jpg
deleted file mode 100644
index 4cc86d5..0000000
Binary files a/tests/dataset/img44.jpg and /dev/null differ
diff --git a/tests/dataset/img45.jpg b/tests/dataset/img45.jpg
deleted file mode 100644
index 2c9da24..0000000
Binary files a/tests/dataset/img45.jpg and /dev/null differ
diff --git a/tests/dataset/img46.jpg b/tests/dataset/img46.jpg
deleted file mode 100644
index 5d49cb3..0000000
Binary files a/tests/dataset/img46.jpg and /dev/null differ
diff --git a/tests/dataset/img47.jpg b/tests/dataset/img47.jpg
deleted file mode 100644
index 0cc3179..0000000
Binary files a/tests/dataset/img47.jpg and /dev/null differ
diff --git a/tests/dataset/img48.jpg b/tests/dataset/img48.jpg
deleted file mode 100644
index d3ae481..0000000
Binary files a/tests/dataset/img48.jpg and /dev/null differ
diff --git a/tests/dataset/img49.jpg b/tests/dataset/img49.jpg
deleted file mode 100644
index 5f6d3c9..0000000
Binary files a/tests/dataset/img49.jpg and /dev/null differ
diff --git a/tests/dataset/img5.jpg b/tests/dataset/img5.jpg
deleted file mode 100644
index 29b6a2f..0000000
Binary files a/tests/dataset/img5.jpg and /dev/null differ
diff --git a/tests/dataset/img50.jpg b/tests/dataset/img50.jpg
deleted file mode 100644
index 3abe0b0..0000000
Binary files a/tests/dataset/img50.jpg and /dev/null differ
diff --git a/tests/dataset/img51.jpg b/tests/dataset/img51.jpg
deleted file mode 100644
index 9679499..0000000
Binary files a/tests/dataset/img51.jpg and /dev/null differ
diff --git a/tests/dataset/img53.jpg b/tests/dataset/img53.jpg
deleted file mode 100644
index 1487a32..0000000
Binary files a/tests/dataset/img53.jpg and /dev/null differ
diff --git a/tests/dataset/img54.jpg b/tests/dataset/img54.jpg
deleted file mode 100644
index f736571..0000000
Binary files a/tests/dataset/img54.jpg and /dev/null differ
diff --git a/tests/dataset/img55.jpg b/tests/dataset/img55.jpg
deleted file mode 100644
index 9ed4612..0000000
Binary files a/tests/dataset/img55.jpg and /dev/null differ
diff --git a/tests/dataset/img56.jpg b/tests/dataset/img56.jpg
deleted file mode 100644
index 79ac53f..0000000
Binary files a/tests/dataset/img56.jpg and /dev/null differ
diff --git a/tests/dataset/img57.jpg b/tests/dataset/img57.jpg
deleted file mode 100644
index 175fef4..0000000
Binary files a/tests/dataset/img57.jpg and /dev/null differ
diff --git a/tests/dataset/img58.jpg b/tests/dataset/img58.jpg
deleted file mode 100644
index dcee1b8..0000000
Binary files a/tests/dataset/img58.jpg and /dev/null differ
diff --git a/tests/dataset/img59.jpg b/tests/dataset/img59.jpg
deleted file mode 100644
index d394873..0000000
Binary files a/tests/dataset/img59.jpg and /dev/null differ
diff --git a/tests/dataset/img6.jpg b/tests/dataset/img6.jpg
deleted file mode 100644
index 03dcd3e..0000000
Binary files a/tests/dataset/img6.jpg and /dev/null differ
diff --git a/tests/dataset/img61.jpg b/tests/dataset/img61.jpg
deleted file mode 100644
index 30c4939..0000000
Binary files a/tests/dataset/img61.jpg and /dev/null differ
diff --git a/tests/dataset/img62.jpg b/tests/dataset/img62.jpg
deleted file mode 100644
index 74a7e98..0000000
Binary files a/tests/dataset/img62.jpg and /dev/null differ
diff --git a/tests/dataset/img67.jpg b/tests/dataset/img67.jpg
deleted file mode 100644
index f09699d..0000000
Binary files a/tests/dataset/img67.jpg and /dev/null differ
diff --git a/tests/dataset/img7.jpg b/tests/dataset/img7.jpg
deleted file mode 100644
index 7cf10cc..0000000
Binary files a/tests/dataset/img7.jpg and /dev/null differ
diff --git a/tests/dataset/img8.jpg b/tests/dataset/img8.jpg
deleted file mode 100644
index 53a55e9..0000000
Binary files a/tests/dataset/img8.jpg and /dev/null differ
diff --git a/tests/dataset/img9.jpg b/tests/dataset/img9.jpg
deleted file mode 100644
index d4ea20f..0000000
Binary files a/tests/dataset/img9.jpg and /dev/null differ
diff --git a/tests/dataset/master.csv b/tests/dataset/master.csv
deleted file mode 100644
index 5b05a57..0000000
--- a/tests/dataset/master.csv
+++ /dev/null
@@ -1,301 +0,0 @@
-file_x,file_y,Decision
-img20.jpg,img21.jpg,Yes
-img16.jpg,img17.jpg,Yes
-img3.jpg,img12.jpg,Yes
-img22.jpg,img23.jpg,Yes
-img24.jpg,img25.jpg,Yes
-img1.jpg,img2.jpg,Yes
-img1.jpg,img4.jpg,Yes
-img1.jpg,img5.jpg,Yes
-img1.jpg,img6.jpg,Yes
-img1.jpg,img7.jpg,Yes
-img1.jpg,img10.jpg,Yes
-img1.jpg,img11.jpg,Yes
-img2.jpg,img4.jpg,Yes
-img2.jpg,img5.jpg,Yes
-img2.jpg,img6.jpg,Yes
-img2.jpg,img7.jpg,Yes
-img2.jpg,img10.jpg,Yes
-img2.jpg,img11.jpg,Yes
-img4.jpg,img5.jpg,Yes
-img4.jpg,img6.jpg,Yes
-img4.jpg,img7.jpg,Yes
-img4.jpg,img10.jpg,Yes
-img4.jpg,img11.jpg,Yes
-img5.jpg,img6.jpg,Yes
-img5.jpg,img7.jpg,Yes
-img5.jpg,img10.jpg,Yes
-img5.jpg,img11.jpg,Yes
-img6.jpg,img7.jpg,Yes
-img6.jpg,img10.jpg,Yes
-img6.jpg,img11.jpg,Yes
-img7.jpg,img10.jpg,Yes
-img7.jpg,img11.jpg,Yes
-img10.jpg,img11.jpg,Yes
-img13.jpg,img14.jpg,Yes
-img13.jpg,img15.jpg,Yes
-img14.jpg,img15.jpg,Yes
-img18.jpg,img19.jpg,Yes
-img8.jpg,img9.jpg,Yes
-img20.jpg,img16.jpg,No
-img20.jpg,img17.jpg,No
-img21.jpg,img16.jpg,No
-img21.jpg,img17.jpg,No
-img20.jpg,img3.jpg,No
-img20.jpg,img12.jpg,No
-img21.jpg,img3.jpg,No
-img21.jpg,img12.jpg,No
-img20.jpg,img22.jpg,No
-img20.jpg,img23.jpg,No
-img21.jpg,img22.jpg,No
-img21.jpg,img23.jpg,No
-img20.jpg,img24.jpg,No
-img20.jpg,img25.jpg,No
-img21.jpg,img24.jpg,No
-img21.jpg,img25.jpg,No
-img20.jpg,img1.jpg,No
-img20.jpg,img2.jpg,No
-img20.jpg,img4.jpg,No
-img20.jpg,img5.jpg,No
-img20.jpg,img6.jpg,No
-img20.jpg,img7.jpg,No
-img20.jpg,img10.jpg,No
-img20.jpg,img11.jpg,No
-img21.jpg,img1.jpg,No
-img21.jpg,img2.jpg,No
-img21.jpg,img4.jpg,No
-img21.jpg,img5.jpg,No
-img21.jpg,img6.jpg,No
-img21.jpg,img7.jpg,No
-img21.jpg,img10.jpg,No
-img21.jpg,img11.jpg,No
-img20.jpg,img13.jpg,No
-img20.jpg,img14.jpg,No
-img20.jpg,img15.jpg,No
-img21.jpg,img13.jpg,No
-img21.jpg,img14.jpg,No
-img21.jpg,img15.jpg,No
-img20.jpg,img18.jpg,No
-img20.jpg,img19.jpg,No
-img21.jpg,img18.jpg,No
-img21.jpg,img19.jpg,No
-img20.jpg,img8.jpg,No
-img20.jpg,img9.jpg,No
-img21.jpg,img8.jpg,No
-img21.jpg,img9.jpg,No
-img16.jpg,img3.jpg,No
-img16.jpg,img12.jpg,No
-img17.jpg,img3.jpg,No
-img17.jpg,img12.jpg,No
-img16.jpg,img22.jpg,No
-img16.jpg,img23.jpg,No
-img17.jpg,img22.jpg,No
-img17.jpg,img23.jpg,No
-img16.jpg,img24.jpg,No
-img16.jpg,img25.jpg,No
-img17.jpg,img24.jpg,No
-img17.jpg,img25.jpg,No
-img16.jpg,img1.jpg,No
-img16.jpg,img2.jpg,No
-img16.jpg,img4.jpg,No
-img16.jpg,img5.jpg,No
-img16.jpg,img6.jpg,No
-img16.jpg,img7.jpg,No
-img16.jpg,img10.jpg,No
-img16.jpg,img11.jpg,No
-img17.jpg,img1.jpg,No
-img17.jpg,img2.jpg,No
-img17.jpg,img4.jpg,No
-img17.jpg,img5.jpg,No
-img17.jpg,img6.jpg,No
-img17.jpg,img7.jpg,No
-img17.jpg,img10.jpg,No
-img17.jpg,img11.jpg,No
-img16.jpg,img13.jpg,No
-img16.jpg,img14.jpg,No
-img16.jpg,img15.jpg,No
-img17.jpg,img13.jpg,No
-img17.jpg,img14.jpg,No
-img17.jpg,img15.jpg,No
-img16.jpg,img18.jpg,No
-img16.jpg,img19.jpg,No
-img17.jpg,img18.jpg,No
-img17.jpg,img19.jpg,No
-img16.jpg,img8.jpg,No
-img16.jpg,img9.jpg,No
-img17.jpg,img8.jpg,No
-img17.jpg,img9.jpg,No
-img3.jpg,img22.jpg,No
-img3.jpg,img23.jpg,No
-img12.jpg,img22.jpg,No
-img12.jpg,img23.jpg,No
-img3.jpg,img24.jpg,No
-img3.jpg,img25.jpg,No
-img12.jpg,img24.jpg,No
-img12.jpg,img25.jpg,No
-img3.jpg,img1.jpg,No
-img3.jpg,img2.jpg,No
-img3.jpg,img4.jpg,No
-img3.jpg,img5.jpg,No
-img3.jpg,img6.jpg,No
-img3.jpg,img7.jpg,No
-img3.jpg,img10.jpg,No
-img3.jpg,img11.jpg,No
-img12.jpg,img1.jpg,No
-img12.jpg,img2.jpg,No
-img12.jpg,img4.jpg,No
-img12.jpg,img5.jpg,No
-img12.jpg,img6.jpg,No
-img12.jpg,img7.jpg,No
-img12.jpg,img10.jpg,No
-img12.jpg,img11.jpg,No
-img3.jpg,img13.jpg,No
-img3.jpg,img14.jpg,No
-img3.jpg,img15.jpg,No
-img12.jpg,img13.jpg,No
-img12.jpg,img14.jpg,No
-img12.jpg,img15.jpg,No
-img3.jpg,img18.jpg,No
-img3.jpg,img19.jpg,No
-img12.jpg,img18.jpg,No
-img12.jpg,img19.jpg,No
-img3.jpg,img8.jpg,No
-img3.jpg,img9.jpg,No
-img12.jpg,img8.jpg,No
-img12.jpg,img9.jpg,No
-img22.jpg,img24.jpg,No
-img22.jpg,img25.jpg,No
-img23.jpg,img24.jpg,No
-img23.jpg,img25.jpg,No
-img22.jpg,img1.jpg,No
-img22.jpg,img2.jpg,No
-img22.jpg,img4.jpg,No
-img22.jpg,img5.jpg,No
-img22.jpg,img6.jpg,No
-img22.jpg,img7.jpg,No
-img22.jpg,img10.jpg,No
-img22.jpg,img11.jpg,No
-img23.jpg,img1.jpg,No
-img23.jpg,img2.jpg,No
-img23.jpg,img4.jpg,No
-img23.jpg,img5.jpg,No
-img23.jpg,img6.jpg,No
-img23.jpg,img7.jpg,No
-img23.jpg,img10.jpg,No
-img23.jpg,img11.jpg,No
-img22.jpg,img13.jpg,No
-img22.jpg,img14.jpg,No
-img22.jpg,img15.jpg,No
-img23.jpg,img13.jpg,No
-img23.jpg,img14.jpg,No
-img23.jpg,img15.jpg,No
-img22.jpg,img18.jpg,No
-img22.jpg,img19.jpg,No
-img23.jpg,img18.jpg,No
-img23.jpg,img19.jpg,No
-img22.jpg,img8.jpg,No
-img22.jpg,img9.jpg,No
-img23.jpg,img8.jpg,No
-img23.jpg,img9.jpg,No
-img24.jpg,img1.jpg,No
-img24.jpg,img2.jpg,No
-img24.jpg,img4.jpg,No
-img24.jpg,img5.jpg,No
-img24.jpg,img6.jpg,No
-img24.jpg,img7.jpg,No
-img24.jpg,img10.jpg,No
-img24.jpg,img11.jpg,No
-img25.jpg,img1.jpg,No
-img25.jpg,img2.jpg,No
-img25.jpg,img4.jpg,No
-img25.jpg,img5.jpg,No
-img25.jpg,img6.jpg,No
-img25.jpg,img7.jpg,No
-img25.jpg,img10.jpg,No
-img25.jpg,img11.jpg,No
-img24.jpg,img13.jpg,No
-img24.jpg,img14.jpg,No
-img24.jpg,img15.jpg,No
-img25.jpg,img13.jpg,No
-img25.jpg,img14.jpg,No
-img25.jpg,img15.jpg,No
-img24.jpg,img18.jpg,No
-img24.jpg,img19.jpg,No
-img25.jpg,img18.jpg,No
-img25.jpg,img19.jpg,No
-img24.jpg,img8.jpg,No
-img24.jpg,img9.jpg,No
-img25.jpg,img8.jpg,No
-img25.jpg,img9.jpg,No
-img1.jpg,img13.jpg,No
-img1.jpg,img14.jpg,No
-img1.jpg,img15.jpg,No
-img2.jpg,img13.jpg,No
-img2.jpg,img14.jpg,No
-img2.jpg,img15.jpg,No
-img4.jpg,img13.jpg,No
-img4.jpg,img14.jpg,No
-img4.jpg,img15.jpg,No
-img5.jpg,img13.jpg,No
-img5.jpg,img14.jpg,No
-img5.jpg,img15.jpg,No
-img6.jpg,img13.jpg,No
-img6.jpg,img14.jpg,No
-img6.jpg,img15.jpg,No
-img7.jpg,img13.jpg,No
-img7.jpg,img14.jpg,No
-img7.jpg,img15.jpg,No
-img10.jpg,img13.jpg,No
-img10.jpg,img14.jpg,No
-img10.jpg,img15.jpg,No
-img11.jpg,img13.jpg,No
-img11.jpg,img14.jpg,No
-img11.jpg,img15.jpg,No
-img1.jpg,img18.jpg,No
-img1.jpg,img19.jpg,No
-img2.jpg,img18.jpg,No
-img2.jpg,img19.jpg,No
-img4.jpg,img18.jpg,No
-img4.jpg,img19.jpg,No
-img5.jpg,img18.jpg,No
-img5.jpg,img19.jpg,No
-img6.jpg,img18.jpg,No
-img6.jpg,img19.jpg,No
-img7.jpg,img18.jpg,No
-img7.jpg,img19.jpg,No
-img10.jpg,img18.jpg,No
-img10.jpg,img19.jpg,No
-img11.jpg,img18.jpg,No
-img11.jpg,img19.jpg,No
-img1.jpg,img8.jpg,No
-img1.jpg,img9.jpg,No
-img2.jpg,img8.jpg,No
-img2.jpg,img9.jpg,No
-img4.jpg,img8.jpg,No
-img4.jpg,img9.jpg,No
-img5.jpg,img8.jpg,No
-img5.jpg,img9.jpg,No
-img6.jpg,img8.jpg,No
-img6.jpg,img9.jpg,No
-img7.jpg,img8.jpg,No
-img7.jpg,img9.jpg,No
-img10.jpg,img8.jpg,No
-img10.jpg,img9.jpg,No
-img11.jpg,img8.jpg,No
-img11.jpg,img9.jpg,No
-img13.jpg,img18.jpg,No
-img13.jpg,img19.jpg,No
-img14.jpg,img18.jpg,No
-img14.jpg,img19.jpg,No
-img15.jpg,img18.jpg,No
-img15.jpg,img19.jpg,No
-img13.jpg,img8.jpg,No
-img13.jpg,img9.jpg,No
-img14.jpg,img8.jpg,No
-img14.jpg,img9.jpg,No
-img15.jpg,img8.jpg,No
-img15.jpg,img9.jpg,No
-img18.jpg,img8.jpg,No
-img18.jpg,img9.jpg,No
-img19.jpg,img8.jpg,No
-img19.jpg,img9.jpg,No
\ No newline at end of file
diff --git a/tests/dataset/selfie-many-people.jpg b/tests/dataset/selfie-many-people.jpg
deleted file mode 100644
index a6fed51..0000000
Binary files a/tests/dataset/selfie-many-people.jpg and /dev/null differ
diff --git a/tests/face-recognition-how.py b/tests/face-recognition-how.py
deleted file mode 100644
index cb26f2f..0000000
--- a/tests/face-recognition-how.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# 3rd party dependencies
-import matplotlib.pyplot as plt
-import numpy as np
-import cv2
-
-# project dependencies
-from deepface import DeepFace
-from deepface.modules import verification
-from deepface.models.FacialRecognition import FacialRecognition
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# ----------------------------------------------
-# build face recognition model
-
-model_name = "VGG-Face"
-
-model: FacialRecognition = DeepFace.build_model(task="facial_recognition", model_name=model_name)
-
-target_size = model.input_shape
-
-logger.info(f"target_size: {target_size}")
-
-# ----------------------------------------------
-# load images and find embeddings
-
-img1 = DeepFace.extract_faces(img_path="dataset/img1.jpg")[0]["face"]
-img1 = cv2.resize(img1, target_size)
-img1 = np.expand_dims(img1, axis=0) # to (1, 224, 224, 3)
-img1_representation = model.forward(img1)
-
-img2 = DeepFace.extract_faces(img_path="dataset/img3.jpg")[0]["face"]
-img2 = cv2.resize(img2, target_size)
-img2 = np.expand_dims(img2, axis=0)
-img2_representation = model.forward(img2)
-
-img1_representation = np.array(img1_representation)
-img2_representation = np.array(img2_representation)
-
-# ----------------------------------------------
-# distance between two images - euclidean distance formula
-distance_vector = np.square(img1_representation - img2_representation)
-current_distance = np.sqrt(distance_vector.sum())
-logger.info(f"Euclidean distance: {current_distance}")
-
-threshold = verification.find_threshold(model_name=model_name, distance_metric="euclidean")
-logger.info(f"Threshold for {model_name}-euclidean pair is {threshold}")
-
-if current_distance < threshold:
- logger.info(
- f"This pair is same person because its distance {current_distance}"
- f" is less than threshold {threshold}"
- )
-else:
- logger.info(
- f"This pair is different persons because its distance {current_distance}"
- f" is greater than threshold {threshold}"
- )
-# ----------------------------------------------
-# expand vectors to be shown better in graph
-
-img1_graph = []
-img2_graph = []
-distance_graph = []
-
-for i in range(0, 200):
- img1_graph.append(img1_representation)
- img2_graph.append(img2_representation)
- distance_graph.append(distance_vector)
-
-img1_graph = np.array(img1_graph)
-img2_graph = np.array(img2_graph)
-distance_graph = np.array(distance_graph)
-
-# ----------------------------------------------
-# plotting
-
-fig = plt.figure()
-
-ax1 = fig.add_subplot(3, 2, 1)
-plt.imshow(img1[0])
-plt.axis("off")
-
-ax2 = fig.add_subplot(3, 2, 2)
-im = plt.imshow(img1_graph, interpolation="nearest", cmap=plt.cm.ocean)
-plt.colorbar()
-
-ax3 = fig.add_subplot(3, 2, 3)
-plt.imshow(img2[0])
-plt.axis("off")
-
-ax4 = fig.add_subplot(3, 2, 4)
-im = plt.imshow(img2_graph, interpolation="nearest", cmap=plt.cm.ocean)
-plt.colorbar()
-
-ax5 = fig.add_subplot(3, 2, 5)
-plt.text(0.35, 0, f"Distance: {current_distance}")
-plt.axis("off")
-
-ax6 = fig.add_subplot(3, 2, 6)
-im = plt.imshow(distance_graph, interpolation="nearest", cmap=plt.cm.ocean)
-plt.colorbar()
-
-plt.show()
-
-# ----------------------------------------------
diff --git a/tests/overlay.py b/tests/overlay.py
deleted file mode 100644
index 99cd977..0000000
--- a/tests/overlay.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# 3rd party dependencies
-import cv2
-import matplotlib.pyplot as plt
-
-# project dependencies
-from deepface.modules import streaming
-from deepface import DeepFace
-
-img_path = "dataset/img1.jpg"
-img = cv2.imread(img_path)
-
-overlay_img_path = "dataset/img6.jpg"
-face_objs = DeepFace.extract_faces(overlay_img_path)
-overlay_img = face_objs[0]["face"][:, :, ::-1] * 255
-
-overlay_img = cv2.resize(overlay_img, (112, 112))
-
-raw_img = img.copy()
-
-demographies = DeepFace.analyze(img_path=img_path, actions=("age", "gender", "emotion"))
-demography = demographies[0]
-
-x = demography["region"]["x"]
-y = demography["region"]["y"]
-w = demography["region"]["w"]
-h = demography["region"]["h"]
-
-img = streaming.highlight_facial_areas(img=img, faces_coordinates=[(x, y, w, h)])
-
-img = streaming.overlay_emotion(
- img=img,
- emotion_probas=demography["emotion"],
- x=x,
- y=y,
- w=w,
- h=h,
-)
-
-img = streaming.overlay_age_gender(
- img=img,
- apparent_age=demography["age"],
- gender=demography["dominant_gender"][0:1],
- x=x,
- y=y,
- w=w,
- h=h,
-)
-
-img = streaming.overlay_identified_face(
- img=img,
- target_img=overlay_img,
- label="angelina",
- x=x,
- y=y,
- w=w,
- h=h,
-)
-
-plt.imshow(img[:, :, ::-1])
-plt.show()
diff --git a/tests/stream.py b/tests/stream.py
deleted file mode 100644
index d0cd3c9..0000000
--- a/tests/stream.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from deepface import DeepFace
-
-DeepFace.stream("dataset", enable_face_analysis=False, anti_spoofing=True) # opencv
-# DeepFace.stream("dataset", detector_backend = 'opencv')
-# DeepFace.stream("dataset", detector_backend = 'ssd')
-# DeepFace.stream("dataset", detector_backend = 'mtcnn')
-# DeepFace.stream("dataset", detector_backend = 'dlib')
-# DeepFace.stream("dataset", detector_backend = 'retinaface')
diff --git a/tests/test_analyze.py b/tests/test_analyze.py
deleted file mode 100644
index bad4426..0000000
--- a/tests/test_analyze.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# 3rd party dependencies
-import cv2
-
-# project dependencies
-from deepface import DeepFace
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-
-detectors = ["opencv", "mtcnn"]
-
-
-def test_standard_analyze():
- img = "dataset/img4.jpg"
- demography_objs = DeepFace.analyze(img, silent=True)
- for demography in demography_objs:
- logger.debug(demography)
- assert demography["age"] > 20 and demography["age"] < 40
- assert demography["dominant_gender"] == "Woman"
- logger.info("✅ test standard analyze done")
-
-
-def test_analyze_with_all_actions_as_tuple():
- img = "dataset/img4.jpg"
- demography_objs = DeepFace.analyze(
- img, actions=("age", "gender", "race", "emotion"), silent=True
- )
-
- for demography in demography_objs:
- logger.debug(f"Demography: {demography}")
- age = demography["age"]
- gender = demography["dominant_gender"]
- race = demography["dominant_race"]
- emotion = demography["dominant_emotion"]
- logger.debug(f"Age: {age}")
- logger.debug(f"Gender: {gender}")
- logger.debug(f"Race: {race}")
- logger.debug(f"Emotion: {emotion}")
- assert demography.get("age") is not None
- assert demography.get("dominant_gender") is not None
- assert demography.get("dominant_race") is not None
- assert demography.get("dominant_emotion") is not None
-
- logger.info("✅ test analyze for all actions as tuple done")
-
-
-def test_analyze_with_all_actions_as_list():
- img = "dataset/img4.jpg"
- demography_objs = DeepFace.analyze(
- img, actions=["age", "gender", "race", "emotion"], silent=True
- )
-
- for demography in demography_objs:
- logger.debug(f"Demography: {demography}")
- age = demography["age"]
- gender = demography["dominant_gender"]
- race = demography["dominant_race"]
- emotion = demography["dominant_emotion"]
- logger.debug(f"Age: {age}")
- logger.debug(f"Gender: {gender}")
- logger.debug(f"Race: {race}")
- logger.debug(f"Emotion: {emotion}")
- assert demography.get("age") is not None
- assert demography.get("dominant_gender") is not None
- assert demography.get("dominant_race") is not None
- assert demography.get("dominant_emotion") is not None
-
- logger.info("✅ test analyze for all actions as array done")
-
-
-def test_analyze_for_some_actions():
- img = "dataset/img4.jpg"
- demography_objs = DeepFace.analyze(img, ["age", "gender"], silent=True)
-
- for demography in demography_objs:
- age = demography["age"]
- gender = demography["dominant_gender"]
-
- logger.debug(f"Age: { age }")
- logger.debug(f"Gender: {gender}")
-
- assert demography.get("age") is not None
- assert demography.get("dominant_gender") is not None
-
- # these are not in actions
- assert demography.get("dominant_race") is None
- assert demography.get("dominant_emotion") is None
-
- logger.info("✅ test analyze for some actions done")
-
-
-def test_analyze_for_preloaded_image():
- img = cv2.imread("dataset/img1.jpg")
- resp_objs = DeepFace.analyze(img, silent=True)
- for resp_obj in resp_objs:
- logger.debug(resp_obj)
- assert resp_obj["age"] > 20 and resp_obj["age"] < 40
- assert resp_obj["dominant_gender"] == "Woman"
-
- logger.info("✅ test analyze for pre-loaded image done")
-
-
-def test_analyze_for_different_detectors():
- img_paths = [
- "dataset/img1.jpg",
- "dataset/img5.jpg",
- "dataset/img6.jpg",
- "dataset/img8.jpg",
- "dataset/img1.jpg",
- "dataset/img2.jpg",
- "dataset/img1.jpg",
- "dataset/img2.jpg",
- "dataset/img6.jpg",
- "dataset/img6.jpg",
- ]
-
- for img_path in img_paths:
- for detector in detectors:
- results = DeepFace.analyze(
- img_path, actions=("gender",), detector_backend=detector, enforce_detection=False
- )
- for result in results:
- logger.debug(result)
-
- # validate keys
- assert "gender" in result.keys()
- assert "dominant_gender" in result.keys() and result["dominant_gender"] in [
- "Man",
- "Woman",
- ]
-
- # validate probabilities
- if result["dominant_gender"] == "Man":
- assert result["gender"]["Man"] > result["gender"]["Woman"]
- else:
- assert result["gender"]["Man"] < result["gender"]["Woman"]
diff --git a/tests/test_api.py b/tests/test_api.py
deleted file mode 100644
index ef2db73..0000000
--- a/tests/test_api.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# built-in dependencies
-import base64
-import unittest
-
-# project dependencies
-from deepface.api.src.app import create_app
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-
-class TestVerifyEndpoint(unittest.TestCase):
- def setUp(self):
- app = create_app()
- app.config["DEBUG"] = True
- app.config["TESTING"] = True
- self.app = app.test_client()
-
- def test_tp_verify(self):
- data = {
- "img1_path": "dataset/img1.jpg",
- "img2_path": "dataset/img2.jpg",
- }
- response = self.app.post("/verify", json=data)
- assert response.status_code == 200
- result = response.json
- logger.debug(result)
-
- assert result.get("verified") is not None
- assert result.get("model") is not None
- assert result.get("similarity_metric") is not None
- assert result.get("detector_backend") is not None
- assert result.get("distance") is not None
- assert result.get("threshold") is not None
- assert result.get("facial_areas") is not None
-
- assert result.get("verified") is True
-
- logger.info("✅ true-positive verification api test is done")
-
- def test_tn_verify(self):
- data = {
- "img1_path": "dataset/img1.jpg",
- "img2_path": "dataset/img2.jpg",
- }
- response = self.app.post("/verify", json=data)
- assert response.status_code == 200
- result = response.json
- logger.debug(result)
-
- assert result.get("verified") is not None
- assert result.get("model") is not None
- assert result.get("similarity_metric") is not None
- assert result.get("detector_backend") is not None
- assert result.get("distance") is not None
- assert result.get("threshold") is not None
- assert result.get("facial_areas") is not None
-
- assert result.get("verified") is True
-
- logger.info("✅ true-negative verification api test is done")
-
- def test_represent(self):
- data = {
- "img": "dataset/img1.jpg",
- }
- response = self.app.post("/represent", json=data)
- assert response.status_code == 200
- result = response.json
- logger.debug(result)
- assert result.get("results") is not None
- assert isinstance(result["results"], list) is True
- assert len(result["results"]) > 0
- for i in result["results"]:
- assert i.get("embedding") is not None
- assert isinstance(i.get("embedding"), list) is True
- assert len(i.get("embedding")) == 4096
- assert i.get("face_confidence") is not None
- assert i.get("facial_area") is not None
-
- logger.info("✅ representation api test is done (for image path)")
-
- def test_represent_encoded(self):
- image_path = "dataset/img1.jpg"
- with open(image_path, "rb") as image_file:
- encoded_string = "data:image/jpeg;base64," + \
- base64.b64encode(image_file.read()).decode("utf8")
-
- data = {
- "model_name": "Facenet",
- "detector_backend": "mtcnn",
- "img": encoded_string
- }
-
- response = self.app.post("/represent", json=data)
- assert response.status_code == 200
- result = response.json
- logger.debug(result)
- assert result.get("results") is not None
- assert isinstance(result["results"], list) is True
- assert len(result["results"]) > 0
- for i in result["results"]:
- assert i.get("embedding") is not None
- assert isinstance(i.get("embedding"), list) is True
- assert len(i.get("embedding")) == 128
- assert i.get("face_confidence") is not None
- assert i.get("facial_area") is not None
-
- logger.info("✅ representation api test is done (for encoded image)")
-
- def test_represent_url(self):
- data = {
- "model_name": "Facenet",
- "detector_backend": "mtcnn",
- "img": "https://github.com/serengil/deepface/blob/master/tests/dataset/couple.jpg?raw=true"
- }
-
- response = self.app.post("/represent", json=data)
- assert response.status_code == 200
- result = response.json
- logger.debug(result)
- assert result.get("results") is not None
- assert isinstance(result["results"], list) is True
- assert len(result["results"]) == 2 # 2 faces are in the image link
- for i in result["results"]:
- assert i.get("embedding") is not None
- assert isinstance(i.get("embedding"), list) is True
- assert len(i.get("embedding")) == 128
- assert i.get("face_confidence") is not None
- assert i.get("facial_area") is not None
-
- logger.info("✅ representation api test is done (for image url)")
-
- def test_analyze(self):
- data = {
- "img": "dataset/img1.jpg",
- }
- response = self.app.post("/analyze", json=data)
- assert response.status_code == 200
- result = response.json
- logger.debug(result)
- assert result.get("results") is not None
- assert isinstance(result["results"], list) is True
- assert len(result["results"]) > 0
- for i in result["results"]:
- assert i.get("age") is not None
- assert isinstance(i.get("age"), (int, float))
- assert i.get("dominant_gender") is not None
- assert i.get("dominant_gender") in ["Man", "Woman"]
- assert i.get("dominant_emotion") is not None
- assert i.get("dominant_race") is not None
-
- logger.info("✅ analyze api test is done")
-
- def test_analyze_inputformats(self):
- image_path = "dataset/couple.jpg"
- with open(image_path, "rb") as image_file:
- encoded_image = "data:image/jpeg;base64," + \
- base64.b64encode(image_file.read()).decode("utf8")
-
- image_sources = [
- # image path
- image_path,
- # image url
- f"https://github.com/serengil/deepface/blob/master/tests/{image_path}?raw=true",
- # encoded image
- encoded_image
- ]
-
- results = []
- for img in image_sources:
- data = {
- "img": img,
- }
- response = self.app.post("/analyze", json=data)
-
- assert response.status_code == 200
- result = response.json
- results.append(result)
-
- assert result.get("results") is not None
- assert isinstance(result["results"], list) is True
- assert len(result["results"]) > 0
- for i in result["results"]:
- assert i.get("age") is not None
- assert isinstance(i.get("age"), (int, float))
- assert i.get("dominant_gender") is not None
- assert i.get("dominant_gender") in ["Man", "Woman"]
- assert i.get("dominant_emotion") is not None
- assert i.get("dominant_race") is not None
-
- assert len(results[0]["results"]) == len(results[1]["results"])\
- and len(results[0]["results"]) == len(results[2]["results"])
-
- for i in range(len(results[0]['results'])):
- assert results[0]["results"][i]["dominant_emotion"] == results[1]["results"][i]["dominant_emotion"]\
- and results[0]["results"][i]["dominant_emotion"] == results[2]["results"][i]["dominant_emotion"]
-
- assert results[0]["results"][i]["dominant_gender"] == results[1]["results"][i]["dominant_gender"]\
- and results[0]["results"][i]["dominant_gender"] == results[2]["results"][i]["dominant_gender"]
-
- assert results[0]["results"][i]["dominant_race"] == results[1]["results"][i]["dominant_race"]\
- and results[0]["results"][i]["dominant_race"] == results[2]["results"][i]["dominant_race"]
-
- logger.info("✅ different inputs test is done")
-
- def test_invalid_verify(self):
- data = {
- "img1_path": "dataset/invalid_1.jpg",
- "img2_path": "dataset/invalid_2.jpg",
- }
- response = self.app.post("/verify", json=data)
- assert response.status_code == 400
- logger.info("✅ invalid verification request api test is done")
-
- def test_invalid_represent(self):
- data = {
- "img": "dataset/invalid_1.jpg",
- }
- response = self.app.post("/represent", json=data)
- assert response.status_code == 400
- logger.info("✅ invalid represent request api test is done")
-
- def test_invalid_analyze(self):
- data = {
- "img": "dataset/invalid.jpg",
- }
- response = self.app.post("/analyze", json=data)
- assert response.status_code == 400
diff --git a/tests/test_commons.py b/tests/test_commons.py
deleted file mode 100644
index 01a2210..0000000
--- a/tests/test_commons.py
+++ /dev/null
@@ -1,252 +0,0 @@
-# built-in dependencies
-import os
-from unittest import mock
-import pytest
-
-# project dependencies
-from deepface.commons import folder_utils, weight_utils, package_utils
-from deepface.commons.logger import Logger
-
-# pylint: disable=unused-argument
-
-logger = Logger()
-
-tf_version = package_utils.get_tf_major_version()
-
-# conditional imports
-if tf_version == 1:
- from keras.models import Sequential
- from keras.layers import (
- Dropout,
- Dense,
- )
-else:
- from tensorflow.keras.models import Sequential
- from tensorflow.keras.layers import (
- Dropout,
- Dense,
- )
-
-
-def test_loading_broken_weights():
- home = folder_utils.get_deepface_home()
- weight_file = os.path.join(home, ".deepface/weights/vgg_face_weights.h5")
-
- # construct a dummy model
- model = Sequential()
-
- # Add layers to the model
- model.add(
- Dense(units=64, activation="relu", input_shape=(100,))
- ) # Input layer with 100 features
- model.add(Dropout(0.5)) # Dropout layer to prevent overfitting
- model.add(Dense(units=32, activation="relu")) # Hidden layer
- model.add(Dense(units=10, activation="softmax")) # Output layer with 10 classes
-
- # vgg's weights cannot be loaded to this model
- with pytest.raises(
- ValueError, match="An exception occurred while loading the pre-trained weights from"
- ):
- model = weight_utils.load_model_weights(model=model, weight_file=weight_file)
-
- logger.info("✅ test loading broken weight file is done")
-
-
-@mock.patch("deepface.commons.folder_utils.get_deepface_home") # Update with your actual module
-@mock.patch("gdown.download") # Mocking gdown's download function
-@mock.patch("os.path.isfile") # Mocking os.path.isfile
-@mock.patch("os.makedirs") # Mocking os.makedirs to avoid FileNotFoundError
-@mock.patch("zipfile.ZipFile") # Mocking the ZipFile class
-@mock.patch("bz2.BZ2File") # Mocking the BZ2File class
-@mock.patch("builtins.open", new_callable=mock.mock_open()) # Mocking open
-class TestDownloadWeightFeature:
- def test_download_weights_for_available_file(
- self,
- mock_open,
- mock_zipfile,
- mock_bz2file,
- mock_makedirs,
- mock_isfile,
- mock_gdown,
- mock_get_deepface_home,
- ):
- mock_isfile.return_value = True
- mock_get_deepface_home.return_value = "/mock/home"
-
- file_name = "model_weights.h5"
- source_url = "http://example.com/model_weights.zip"
-
- result = weight_utils.download_weights_if_necessary(file_name, source_url)
-
- assert result == os.path.join("/mock/home", ".deepface/weights", file_name)
-
- mock_gdown.assert_not_called()
- mock_zipfile.assert_not_called()
- mock_bz2file.assert_not_called()
- logger.info("✅ test download weights for available file is done")
-
- def test_download_weights_if_necessary_gdown_failure(
- self,
- mock_open,
- mock_zipfile,
- mock_bz2file,
- mock_makedirs,
- mock_isfile,
- mock_gdown,
- mock_get_deepface_home,
- ):
- # Setting up the mock return values
- mock_get_deepface_home.return_value = "/mock/home"
- mock_isfile.return_value = False # Simulate file not being present
-
- file_name = "model_weights.h5"
- source_url = "http://example.com/model_weights.h5"
-
- # Simulate gdown.download raising an exception
- mock_gdown.side_effect = Exception("Download failed!")
-
- # Call the function and check for ValueError
- with pytest.raises(
- ValueError,
- match=f"⛓️💥 An exception occurred while downloading {file_name} from {source_url}.",
- ):
- weight_utils.download_weights_if_necessary(file_name, source_url)
-
- logger.info("✅ test for downloading weights while gdown fails done")
-
- def test_download_weights_if_necessary_no_compression(
- self,
- mock_open,
- mock_zipfile,
- mock_bz2file,
- mock_makedirs,
- mock_isfile,
- mock_gdown,
- mock_get_deepface_home,
- ):
- # Setting up the mock return values
- mock_get_deepface_home.return_value = "/mock/home"
- mock_isfile.return_value = False # Simulate file not being present
-
- file_name = "model_weights.h5"
- source_url = "http://example.com/model_weights.h5"
-
- # Call the function
- result = weight_utils.download_weights_if_necessary(file_name, source_url)
-
- # Assert that gdown.download was called with the correct parameters
- mock_gdown.assert_called_once_with(
- source_url, "/mock/home/.deepface/weights/model_weights.h5", quiet=False
- )
-
- # Assert that the return value is correct
- assert result == "/mock/home/.deepface/weights/model_weights.h5"
-
- # Assert that zipfile.ZipFile and bz2.BZ2File were not called
- mock_zipfile.assert_not_called()
- mock_bz2file.assert_not_called()
-
- logger.info("✅ test download weights with no compression is done")
-
- def test_download_weights_if_necessary_zip(
- self,
- mock_open,
- mock_zipfile,
- mock_bz2file,
- mock_makedirs,
- mock_isfile,
- mock_gdown,
- mock_get_deepface_home,
- ):
- # Setting up the mock return values
- mock_get_deepface_home.return_value = "/mock/home"
- mock_isfile.return_value = False # Simulate file not being present
-
- file_name = "model_weights.h5"
- source_url = "http://example.com/model_weights.zip"
- compress_type = "zip"
-
- # Call the function
- result = weight_utils.download_weights_if_necessary(file_name, source_url, compress_type)
-
- # Assert that gdown.download was called with the correct parameters
- mock_gdown.assert_called_once_with(
- source_url, "/mock/home/.deepface/weights/model_weights.h5.zip", quiet=False
- )
-
- # Simulate the unzipping behavior
- mock_zipfile.return_value.__enter__.return_value.extractall = mock.Mock()
-
- # Call the function again to simulate unzipping
- with mock_zipfile.return_value as zip_ref:
- zip_ref.extractall("/mock/home/.deepface/weights")
-
- # Assert that the zip file was unzipped correctly
- zip_ref.extractall.assert_called_once_with("/mock/home/.deepface/weights")
-
- # Assert that the return value is correct
- assert result == "/mock/home/.deepface/weights/model_weights.h5"
-
- logger.info("✅ test download weights for zip is done")
-
- def test_download_weights_if_necessary_bz2(
- self,
- mock_open,
- mock_zipfile,
- mock_bz2file,
- mock_makedirs,
- mock_isfile,
- mock_gdown,
- mock_get_deepface_home,
- ):
-
- # Setting up the mock return values
- mock_get_deepface_home.return_value = "/mock/home"
- mock_isfile.return_value = False # Simulate file not being present
-
- file_name = "model_weights.h5"
- source_url = "http://example.com/model_weights.bz2"
- compress_type = "bz2"
-
- # Simulate the download success
- mock_gdown.return_value = None
-
- # Simulate the BZ2 file reading behavior
- mock_bz2file.return_value.__enter__.return_value.read.return_value = b"fake data"
-
- # Call the function under test
- result = weight_utils.download_weights_if_necessary(file_name, source_url, compress_type)
-
- # Assert that gdown.download was called with the correct parameters
- mock_gdown.assert_called_once_with(
- source_url, "/mock/home/.deepface/weights/model_weights.h5.bz2", quiet=False
- )
-
- # Ensure open() is called once for writing the decompressed data
- mock_open.assert_called_once_with("/mock/home/.deepface/weights/model_weights.h5", "wb")
-
- # TODO: find a way to check write is called
-
- # Assert that the return value is correct
- assert result == "/mock/home/.deepface/weights/model_weights.h5"
-
- logger.info("✅ test download weights for bz2 is done")
-
- def test_download_weights_for_non_supported_compress_type(
- self,
- mock_open,
- mock_zipfile,
- mock_bz2file,
- mock_makedirs,
- mock_isfile,
- mock_gdown,
- mock_get_deepface_home,
- ):
- mock_isfile.return_value = False
-
- file_name = "model_weights.h5"
- source_url = "http://example.com/model_weights.bz2"
- compress_type = "7z"
- with pytest.raises(ValueError, match="unimplemented compress type - 7z"):
- _ = weight_utils.download_weights_if_necessary(file_name, source_url, compress_type)
- logger.info("✅ test download weights for unsupported compress type is done")
diff --git a/tests/test_enforce_detection.py b/tests/test_enforce_detection.py
deleted file mode 100644
index 5360563..0000000
--- a/tests/test_enforce_detection.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# 3rd party dependencies
-import pytest
-import numpy as np
-
-# project dependencies
-from deepface import DeepFace
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-
-def test_enabled_enforce_detection_for_non_facial_input():
- black_img = np.zeros([224, 224, 3])
-
- with pytest.raises(ValueError):
- DeepFace.represent(img_path=black_img)
-
- with pytest.raises(ValueError):
- DeepFace.verify(img1_path=black_img, img2_path=black_img)
-
- logger.info("✅ enabled enforce detection with non facial input tests done")
-
-
-def test_disabled_enforce_detection_for_non_facial_input_on_represent():
- black_img = np.zeros([224, 224, 3])
- objs = DeepFace.represent(img_path=black_img, enforce_detection=False)
-
- assert isinstance(objs, list)
- assert len(objs) > 0
- assert isinstance(objs[0], dict)
- assert "embedding" in objs[0].keys()
- assert "facial_area" in objs[0].keys()
- assert isinstance(objs[0]["facial_area"], dict)
- assert "x" in objs[0]["facial_area"].keys()
- assert "y" in objs[0]["facial_area"].keys()
- assert "w" in objs[0]["facial_area"].keys()
- assert "h" in objs[0]["facial_area"].keys()
- assert isinstance(objs[0]["embedding"], list)
- assert len(objs[0]["embedding"]) == 4096 # embedding of VGG-Face
-
- logger.info("✅ disabled enforce detection with non facial input test for represent tests done")
-
-
-def test_disabled_enforce_detection_for_non_facial_input_on_verify():
- black_img = np.zeros([224, 224, 3])
- obj = DeepFace.verify(img1_path=black_img, img2_path=black_img, enforce_detection=False)
- assert isinstance(obj, dict)
-
- logger.info("✅ disabled enforce detection with non facial input test for verify tests done")
diff --git a/tests/test_extract_faces.py b/tests/test_extract_faces.py
deleted file mode 100644
index ba05ab4..0000000
--- a/tests/test_extract_faces.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# built-in dependencies
-import base64
-
-# 3rd party dependencies
-import cv2
-import numpy as np
-import pytest
-
-# project dependencies
-from deepface import DeepFace
-from deepface.commons import image_utils
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-detectors = ["opencv", "mtcnn", "ssd"]
-
-
-def test_different_detectors():
- img_path = "dataset/img11.jpg"
- img = cv2.imread(img_path)
- height, width, _ = img.shape
-
- for detector in detectors:
- img_objs = DeepFace.extract_faces(img_path=img_path, detector_backend=detector)
- for img_obj in img_objs:
- assert "face" in img_obj.keys()
- assert "facial_area" in img_obj.keys()
- assert isinstance(img_obj["facial_area"], dict)
- assert "x" in img_obj["facial_area"].keys()
- assert "y" in img_obj["facial_area"].keys()
- assert "w" in img_obj["facial_area"].keys()
- assert "h" in img_obj["facial_area"].keys()
- # is left eye set with respect to the person instead of observer
- assert "left_eye" in img_obj["facial_area"].keys()
- assert "right_eye" in img_obj["facial_area"].keys()
- right_eye = img_obj["facial_area"]["right_eye"]
- left_eye = img_obj["facial_area"]["left_eye"]
-
- # left eye and right eye must be tuple
- assert isinstance(left_eye, tuple)
- assert isinstance(right_eye, tuple)
-
- # right eye should be the right eye of the person
- assert left_eye[0] > right_eye[0]
-
- # left eye and right eye must be int not to have problem in api
- assert isinstance(left_eye[0], int)
- assert isinstance(left_eye[1], int)
- assert isinstance(right_eye[0], int)
- assert isinstance(right_eye[1], int)
-
- # confidence must be float, not numpy not to have problem in api
- assert "confidence" in img_obj.keys()
- type_conf = type(img_obj["confidence"])
- assert isinstance(
- img_obj["confidence"], float
- ), f"confidence type must be float but it is {type_conf}"
-
- # we added black pixeled borders to image because if faces are close to border,
- # then alignment moves them to outside of the image. adding this borders may
- # cause to miscalculate the facial area. check it is restored correctly.
- x = img_obj["facial_area"]["x"]
- y = img_obj["facial_area"]["y"]
- w = img_obj["facial_area"]["w"]
- h = img_obj["facial_area"]["h"]
-
- assert x < width
- assert x + w < width
- assert y < height
- assert y + h < height
- assert left_eye[0] < height
- assert right_eye[0] < height
- assert left_eye[1] < width
- assert right_eye[1] < width
-
- img = img_obj["face"]
- assert img.shape[0] > 0 and img.shape[1] > 0
- logger.info(f"✅ extract_faces for {detector} backend test is done")
-
-
-def test_backends_for_enforced_detection_with_non_facial_inputs():
- black_img = np.zeros([224, 224, 3])
- for detector in detectors:
- with pytest.raises(ValueError):
- _ = DeepFace.extract_faces(img_path=black_img, detector_backend=detector)
- logger.info("✅ extract_faces for enforced detection and non-facial image test is done")
-
-
-def test_backends_for_not_enforced_detection_with_non_facial_inputs():
- black_img = np.zeros([224, 224, 3])
- for detector in detectors:
- objs = DeepFace.extract_faces(
- img_path=black_img, detector_backend=detector, enforce_detection=False
- )
- assert objs[0]["face"].shape == (224, 224, 3)
- logger.info("✅ extract_faces for not enforced detection and non-facial image test is done")
-
-
-def test_file_types_while_loading_base64():
- img1_path = "dataset/img47.jpg"
- img1_base64 = image_to_base64(image_path=img1_path)
-
- with pytest.raises(ValueError, match="Input image can be jpg or png, but it is"):
- _ = image_utils.load_image_from_base64(uri=img1_base64)
-
- img2_path = "dataset/img1.jpg"
- img2_base64 = image_to_base64(image_path=img2_path)
-
- img2 = image_utils.load_image_from_base64(uri=img2_base64)
- # 3 dimensional image should be loaded
- assert len(img2.shape) == 3
-
-
-def image_to_base64(image_path):
- with open(image_path, "rb") as image_file:
- encoded_string = base64.b64encode(image_file.read()).decode("utf-8")
- return "data:image/jpeg," + encoded_string
-
-
-def test_facial_coordinates_are_in_borders():
- img_path = "dataset/selfie-many-people.jpg"
- img = cv2.imread(img_path)
- height, width, _ = img.shape
-
- results = DeepFace.extract_faces(img_path=img_path)
-
- assert len(results) > 0
-
- for result in results:
- facial_area = result["facial_area"]
-
- x = facial_area["x"]
- y = facial_area["y"]
- w = facial_area["w"]
- h = facial_area["h"]
-
- assert x >= 0
- assert y >= 0
- assert x + w < width
- assert y + h < height
-
- logger.info("✅ facial area coordinates are all in image borders")
diff --git a/tests/test_find.py b/tests/test_find.py
deleted file mode 100644
index ffea91b..0000000
--- a/tests/test_find.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# built-in dependencies
-import os
-
-# 3rd party dependencies
-import cv2
-import pandas as pd
-
-# project dependencies
-from deepface import DeepFace
-from deepface.modules import verification
-from deepface.commons import image_utils
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-
-threshold = verification.find_threshold(model_name="VGG-Face", distance_metric="cosine")
-
-
-def test_find_with_exact_path():
- img_path = os.path.join("dataset", "img1.jpg")
- dfs = DeepFace.find(img_path=img_path, db_path="dataset", silent=True)
- assert len(dfs) > 0
- for df in dfs:
- assert isinstance(df, pd.DataFrame)
-
- # one is img1.jpg itself
- identity_df = df[df["identity"] == img_path]
- assert identity_df.shape[0] > 0
-
- # validate reproducability
- assert identity_df["distance"].values[0] < threshold
-
- df = df[df["identity"] != img_path]
- logger.debug(df.head())
- assert df.shape[0] > 0
- logger.info("✅ test find for exact path done")
-
-
-def test_find_with_array_input():
- img_path = os.path.join("dataset", "img1.jpg")
- img1 = cv2.imread(img_path)
- dfs = DeepFace.find(img1, db_path="dataset", silent=True)
- assert len(dfs) > 0
- for df in dfs:
- assert isinstance(df, pd.DataFrame)
-
- # one is img1.jpg itself
- identity_df = df[df["identity"] == img_path]
- assert identity_df.shape[0] > 0
-
- # validate reproducability
- assert identity_df["distance"].values[0] < threshold
-
- df = df[df["identity"] != img_path]
- logger.debug(df.head())
- assert df.shape[0] > 0
-
- logger.info("✅ test find for array input done")
-
-
-def test_find_with_extracted_faces():
- img_path = os.path.join("dataset", "img1.jpg")
- face_objs = DeepFace.extract_faces(img_path)
- img = face_objs[0]["face"]
- dfs = DeepFace.find(img, db_path="dataset", detector_backend="skip", silent=True)
- assert len(dfs) > 0
- for df in dfs:
- assert isinstance(df, pd.DataFrame)
-
- # one is img1.jpg itself
- identity_df = df[df["identity"] == img_path]
- assert identity_df.shape[0] > 0
-
- # validate reproducability
- assert identity_df["distance"].values[0] < threshold
-
- df = df[df["identity"] != img_path]
- logger.debug(df.head())
- assert df.shape[0] > 0
- logger.info("✅ test find for extracted face input done")
-
-
-def test_filetype_for_find():
- """
- only images as jpg and png can be loaded into database
- """
- img_path = os.path.join("dataset", "img1.jpg")
- dfs = DeepFace.find(img_path=img_path, db_path="dataset", silent=True)
-
- df = dfs[0]
-
- # img47 is webp even though its extension is jpg
- assert df[df["identity"] == "dataset/img47.jpg"].shape[0] == 0
-
-
-def test_filetype_for_find_bulk_embeddings():
- imgs = image_utils.list_images("dataset")
-
- assert len(imgs) > 0
-
- # img47 is webp even though its extension is jpg
- assert "dataset/img47.jpg" not in imgs
-
-
-def test_find_without_refresh_database():
- import shutil, hashlib
-
- img_path = os.path.join("dataset", "img1.jpg")
-
- # 1. Calculate hash of the .pkl file;
- # 2. Move random image to the temporary created directory;
- # 3. As a result, there will be a difference between the .pkl file and the disk files;
- # 4. If refresh_database=False, then .pkl file should not be updated.
- # Recalculate hash and compare it with the hash from pt. 1;
- # 5. After successful check, the image will be moved back to the original destination;
-
- pkl_path = "dataset/ds_model_vggface_detector_opencv_aligned_normalization_base_expand_0.pkl"
- with open(pkl_path, "rb") as f:
- hash_before = hashlib.sha256(f.read())
-
- image_name = "img28.jpg"
- tmp_dir = "dataset/temp_image"
- os.mkdir(tmp_dir)
- shutil.move(os.path.join("dataset", image_name), os.path.join(tmp_dir, image_name))
-
- dfs = DeepFace.find(img_path=img_path, db_path="dataset", silent=True, refresh_database=False)
-
- with open(pkl_path, "rb") as f:
- hash_after = hashlib.sha256(f.read())
-
- shutil.move(os.path.join(tmp_dir, image_name), os.path.join("dataset", image_name))
- os.rmdir(tmp_dir)
-
- assert hash_before.hexdigest() == hash_after.hexdigest()
-
- logger.info("✅ .pkl hashes before and after the recognition process are the same")
-
- assert len(dfs) > 0
- for df in dfs:
- assert isinstance(df, pd.DataFrame)
-
- # one is img1.jpg itself
- identity_df = df[df["identity"] == img_path]
- assert identity_df.shape[0] > 0
-
- # validate reproducability
- assert identity_df["distance"].values[0] < threshold
-
- df = df[df["identity"] != img_path]
- logger.debug(df.head())
- assert df.shape[0] > 0
- logger.info("✅ test find without refresh database done")
diff --git a/tests/test_represent.py b/tests/test_represent.py
deleted file mode 100644
index 085dff2..0000000
--- a/tests/test_represent.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# built-in dependencies
-import cv2
-
-# project dependencies
-from deepface import DeepFace
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-
-def test_standard_represent():
- img_path = "dataset/img1.jpg"
- embedding_objs = DeepFace.represent(img_path)
- for embedding_obj in embedding_objs:
- embedding = embedding_obj["embedding"]
- logger.debug(f"Function returned {len(embedding)} dimensional vector")
- assert len(embedding) == 4096
- logger.info("✅ test standard represent function done")
-
-
-def test_represent_for_skipped_detector_backend_with_image_path():
- face_img = "dataset/img5.jpg"
- img_objs = DeepFace.represent(img_path=face_img, detector_backend="skip")
- assert len(img_objs) >= 1
- img_obj = img_objs[0]
- assert "embedding" in img_obj.keys()
- assert "facial_area" in img_obj.keys()
- assert isinstance(img_obj["facial_area"], dict)
- assert "x" in img_obj["facial_area"].keys()
- assert "y" in img_obj["facial_area"].keys()
- assert "w" in img_obj["facial_area"].keys()
- assert "h" in img_obj["facial_area"].keys()
- assert "face_confidence" in img_obj.keys()
- logger.info("✅ test represent function for skipped detector and image path input backend done")
-
-
-def test_represent_for_skipped_detector_backend_with_preloaded_image():
- face_img = "dataset/img5.jpg"
- img = cv2.imread(face_img)
- img_objs = DeepFace.represent(img_path=img, detector_backend="skip")
- assert len(img_objs) >= 1
- img_obj = img_objs[0]
- assert "embedding" in img_obj.keys()
- assert "facial_area" in img_obj.keys()
- assert isinstance(img_obj["facial_area"], dict)
- assert "x" in img_obj["facial_area"].keys()
- assert "y" in img_obj["facial_area"].keys()
- assert "w" in img_obj["facial_area"].keys()
- assert "h" in img_obj["facial_area"].keys()
- assert "face_confidence" in img_obj.keys()
- logger.info("✅ test represent function for skipped detector and preloaded image done")
-
-
-def test_max_faces():
- # confirm that input image has more than one face
- results = DeepFace.represent(img_path="dataset/couple.jpg")
- assert len(results) > 1
-
- # test it with max faces arg
- max_faces = 1
- results = DeepFace.represent(img_path="dataset/couple.jpg", max_faces=max_faces)
- assert len(results) == max_faces
diff --git a/tests/test_singleton.py b/tests/test_singleton.py
deleted file mode 100644
index f2e4ea1..0000000
--- a/tests/test_singleton.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-
-def test_singleton_same_object():
- assert Logger() == Logger()
- logger.info("✅ id's of instances of \"singletoned\" class Logger are the same")
diff --git a/tests/test_verify.py b/tests/test_verify.py
deleted file mode 100644
index 2a6951b..0000000
--- a/tests/test_verify.py
+++ /dev/null
@@ -1,190 +0,0 @@
-# 3rd party dependencies
-import pytest
-import cv2
-
-# project dependencies
-from deepface import DeepFace
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-models = ["VGG-Face", "Facenet", "Facenet512", "ArcFace", "GhostFaceNet"]
-metrics = ["cosine", "euclidean", "euclidean_l2"]
-detectors = ["opencv", "mtcnn"]
-
-
-def test_different_facial_recognition_models():
- dataset = [
- ["dataset/img1.jpg", "dataset/img2.jpg", True],
- ["dataset/img5.jpg", "dataset/img6.jpg", True],
- ["dataset/img6.jpg", "dataset/img7.jpg", True],
- ["dataset/img8.jpg", "dataset/img9.jpg", True],
- ["dataset/img1.jpg", "dataset/img11.jpg", True],
- ["dataset/img2.jpg", "dataset/img11.jpg", True],
- ["dataset/img1.jpg", "dataset/img3.jpg", False],
- ["dataset/img2.jpg", "dataset/img3.jpg", False],
- ["dataset/img6.jpg", "dataset/img8.jpg", False],
- ["dataset/img6.jpg", "dataset/img9.jpg", False],
- ]
-
- expected_coverage = 97.53 # human level accuracy on LFW
- successful_tests = 0
- unsuccessful_tests = 0
- for model in models:
- for metric in metrics:
- for instance in dataset:
- img1 = instance[0]
- img2 = instance[1]
- result = instance[2]
-
- resp_obj = DeepFace.verify(img1, img2, model_name=model, distance_metric=metric)
-
- prediction = resp_obj["verified"]
- distance = round(resp_obj["distance"], 2)
- threshold = resp_obj["threshold"]
-
- if prediction is result:
- test_result_label = "✅"
- successful_tests += 1
- else:
- test_result_label = "❌"
- unsuccessful_tests += 1
-
- if prediction is True:
- classified_label = "same person"
- else:
- classified_label = "different persons"
-
- img1_alias = img1.split("/", maxsplit=1)[-1]
- img2_alias = img2.split("/", maxsplit=1)[-1]
-
- logger.debug(
- f"{test_result_label} Pair {img1_alias}-{img2_alias}"
- f" is {classified_label} based on {model}-{metric}"
- f" (Distance: {distance}, Threshold: {threshold})",
- )
-
- coverage_score = (100 * successful_tests) / (successful_tests + unsuccessful_tests)
- assert (
- coverage_score > expected_coverage
- ), f"⛔ facial recognition models test failed with {coverage_score} score"
-
- logger.info(f"✅ facial recognition models test passed with {coverage_score}")
-
-
-def test_different_face_detectors():
- for detector in detectors:
- res = DeepFace.verify("dataset/img1.jpg", "dataset/img2.jpg", detector_backend=detector)
- assert isinstance(res, dict)
- assert "verified" in res.keys()
- assert res["verified"] in [True, False]
- assert "distance" in res.keys()
- assert "threshold" in res.keys()
- assert "model" in res.keys()
- assert "detector_backend" in res.keys()
- assert "similarity_metric" in res.keys()
- assert "facial_areas" in res.keys()
- assert "img1" in res["facial_areas"].keys()
- assert "img2" in res["facial_areas"].keys()
- assert "x" in res["facial_areas"]["img1"].keys()
- assert "y" in res["facial_areas"]["img1"].keys()
- assert "w" in res["facial_areas"]["img1"].keys()
- assert "h" in res["facial_areas"]["img1"].keys()
- assert "x" in res["facial_areas"]["img2"].keys()
- assert "y" in res["facial_areas"]["img2"].keys()
- assert "w" in res["facial_areas"]["img2"].keys()
- assert "h" in res["facial_areas"]["img2"].keys()
- logger.info(f"✅ test verify for {detector} backend done")
-
-
-def test_verify_for_preloaded_image():
- img1 = cv2.imread("dataset/img1.jpg")
- img2 = cv2.imread("dataset/img2.jpg")
- res = DeepFace.verify(img1, img2)
- assert res["verified"] is True
- logger.info("✅ test verify for pre-loaded image done")
-
-
-def test_verify_for_precalculated_embeddings():
- model_name = "Facenet"
-
- img1_path = "dataset/img1.jpg"
- img2_path = "dataset/img2.jpg"
-
- img1_embedding = DeepFace.represent(img_path=img1_path, model_name=model_name)[0]["embedding"]
- img2_embedding = DeepFace.represent(img_path=img2_path, model_name=model_name)[0]["embedding"]
-
- result = DeepFace.verify(
- img1_path=img1_embedding, img2_path=img2_embedding, model_name=model_name, silent=True
- )
-
- assert result["verified"] is True
- assert result["distance"] < result["threshold"]
- assert result["model"] == model_name
- assert result["facial_areas"]["img1"] is not None
- assert result["facial_areas"]["img2"] is not None
-
- assert isinstance(result["facial_areas"]["img1"], dict)
- assert isinstance(result["facial_areas"]["img2"], dict)
-
- assert "x" in result["facial_areas"]["img1"].keys()
- assert "y" in result["facial_areas"]["img1"].keys()
- assert "w" in result["facial_areas"]["img1"].keys()
- assert "h" in result["facial_areas"]["img1"].keys()
- assert "left_eye" in result["facial_areas"]["img1"].keys()
- assert "right_eye" in result["facial_areas"]["img1"].keys()
-
- assert "x" in result["facial_areas"]["img2"].keys()
- assert "y" in result["facial_areas"]["img2"].keys()
- assert "w" in result["facial_areas"]["img2"].keys()
- assert "h" in result["facial_areas"]["img2"].keys()
- assert "left_eye" in result["facial_areas"]["img2"].keys()
- assert "right_eye" in result["facial_areas"]["img2"].keys()
-
- logger.info("✅ test verify for pre-calculated embeddings done")
-
-
-def test_verify_with_precalculated_embeddings_for_incorrect_model():
- # generate embeddings with VGG (default)
- img1_path = "dataset/img1.jpg"
- img2_path = "dataset/img2.jpg"
- img1_embedding = DeepFace.represent(img_path=img1_path)[0]["embedding"]
- img2_embedding = DeepFace.represent(img_path=img2_path)[0]["embedding"]
-
- with pytest.raises(
- ValueError,
- match="embeddings of Facenet should have 128 dimensions, but 1-th image has 4096 dimensions input",
- ):
- _ = DeepFace.verify(
- img1_path=img1_embedding, img2_path=img2_embedding, model_name="Facenet", silent=True
- )
-
- logger.info("✅ test verify with pre-calculated embeddings for incorrect model done")
-
-
-def test_verify_for_broken_embeddings():
- img1_embeddings = ["a", "b", "c"]
- img2_embeddings = [1, 2, 3]
-
- with pytest.raises(
- ValueError,
- match="When passing img1_path as a list, ensure that all its items are of type float.",
- ):
- _ = DeepFace.verify(img1_path=img1_embeddings, img2_path=img2_embeddings)
- logger.info("✅ test verify for broken embeddings content is done")
-
-
-def test_verify_for_nested_embeddings():
- """
- batch embeddings not supported
- """
- img1_embeddings = [[1, 2, 3], [4, 5, 6]]
- img2_path = "dataset/img1.jpg"
-
- with pytest.raises(
- ValueError,
- match="When passing img1_path as a list, ensure that all its items are of type float",
- ):
- _ = DeepFace.verify(img1_path=img1_embeddings, img2_path=img2_path)
-
- logger.info("✅ test verify for nested embeddings is done")
diff --git a/tests/test_version.py b/tests/test_version.py
deleted file mode 100644
index 21e0065..0000000
--- a/tests/test_version.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# built-in dependencies
-import json
-
-# project dependencies
-from deepface import DeepFace
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-
-def test_version():
- with open("../package_info.json", "r", encoding="utf-8") as f:
- package_info = json.load(f)
-
- assert DeepFace.__version__ == package_info["version"]
- logger.info("✅ versions are matching in both package_info.json and deepface/__init__.py")
diff --git a/tests/visual-test.py b/tests/visual-test.py
deleted file mode 100644
index 9149bc5..0000000
--- a/tests/visual-test.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# 3rd party dependencies
-import matplotlib.pyplot as plt
-
-# project dependencies
-from deepface import DeepFace
-from deepface.commons.logger import Logger
-
-logger = Logger()
-
-# some models (e.g. Dlib) and detectors (e.g. retinaface) do not have test cases
-# because they require to install huge packages
-# this module is for local runs
-
-model_names = [
- "VGG-Face",
- "Facenet",
- "Facenet512",
- "OpenFace",
- "DeepFace",
- "DeepID",
- "Dlib",
- "ArcFace",
- "SFace",
- "GhostFaceNet",
-]
-
-detector_backends = [
- "opencv",
- "ssd",
- "dlib",
- "mtcnn",
- "fastmtcnn",
- # "mediapipe", # crashed in mac
- "retinaface",
- "yunet",
- "yolov8",
- "centerface",
-]
-
-# verification
-for model_name in model_names:
- obj = DeepFace.verify(
- img1_path="dataset/img1.jpg", img2_path="dataset/img2.jpg", model_name=model_name
- )
- logger.info(obj)
- logger.info("---------------------")
-
-# represent
-for model_name in model_names:
- embedding_objs = DeepFace.represent(img_path="dataset/img1.jpg", model_name=model_name)
- for embedding_obj in embedding_objs:
- embedding = embedding_obj["embedding"]
- logger.info(f"{model_name} produced {len(embedding)}D vector")
-
-
-# find
-dfs = DeepFace.find(
- img_path="dataset/img1.jpg", db_path="dataset", model_name="Facenet", detector_backend="mtcnn"
-)
-for df in dfs:
- logger.info(df)
-
-expand_areas = [0]
-img_paths = ["dataset/img11.jpg", "dataset/img11_reflection.jpg"]
-for expand_area in expand_areas:
- for img_path in img_paths:
- # extract faces
- for detector_backend in detector_backends:
- face_objs = DeepFace.extract_faces(
- img_path=img_path,
- detector_backend=detector_backend,
- align=True,
- expand_percentage=expand_area,
- )
- for face_obj in face_objs:
- face = face_obj["face"]
- logger.info(f"testing {img_path} with {detector_backend}")
- logger.info(face_obj["facial_area"])
- logger.info(face_obj["confidence"])
-
- # we know opencv sometimes cannot find eyes
- if face_obj["facial_area"]["left_eye"] is not None:
- assert isinstance(face_obj["facial_area"]["left_eye"], tuple)
- assert isinstance(face_obj["facial_area"]["left_eye"][0], int)
- assert isinstance(face_obj["facial_area"]["left_eye"][1], int)
-
- if face_obj["facial_area"]["right_eye"] is not None:
- assert isinstance(face_obj["facial_area"]["right_eye"], tuple)
- assert isinstance(face_obj["facial_area"]["right_eye"][0], int)
- assert isinstance(face_obj["facial_area"]["right_eye"][1], int)
-
- # left eye is really the left eye of the person
- if (
- face_obj["facial_area"]["left_eye"] is not None
- and face_obj["facial_area"]["right_eye"] is not None
- ):
- re_x = face_obj["facial_area"]["right_eye"][0]
- le_x = face_obj["facial_area"]["left_eye"][0]
- assert re_x < le_x, "right eye must be the right eye of the person"
-
- type_conf = type(face_obj["confidence"])
- assert isinstance(
- face_obj["confidence"], float
- ), f"confidence type must be float but it is {type_conf}"
- assert face_obj["confidence"] <= 1
-
- plt.imshow(face)
- plt.axis("off")
- plt.show()
- logger.info("-----------")