Spaces:
Running
Running
Upload 164 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +44 -0
- .python-version +1 -0
- LICENSE +21 -0
- README.md +128 -13
- apps/gradio_app.py +116 -0
- apps/gradio_app/.gitkeep +0 -0
- apps/gradio_app/__init__.py +0 -0
- apps/gradio_app/components.py +135 -0
- apps/gradio_app/inference.py +93 -0
- apps/gradio_app/static/script.js +3 -0
- apps/gradio_app/static/style.css +229 -0
- assets/comparision.md +11 -0
- assets/examples/.gitkeep +0 -0
- ckpts/.gitignore +4 -0
- configs/accelerate_config.yaml +6 -0
- configs/image_classification_models_config.yaml +249 -0
- data/raw/.gitignore +4 -0
- data/reference_data/images/.gitkeep +0 -0
- data/reference_data/images/Akshay Kumar.jpg +3 -0
- data/reference_data/images/Alexandra Daddario.jpg +3 -0
- data/reference_data/images/Alia Bhatt.jpg +3 -0
- data/reference_data/images/Amitabh Bachchan.jpg +3 -0
- data/reference_data/images/Andy Samberg.jpg +3 -0
- data/reference_data/images/Anushka Sharma.jpg +3 -0
- data/reference_data/images/Billie Eilish.jpg +3 -0
- data/reference_data/images/Brad Pitt.jpg +3 -0
- data/reference_data/images/Camila Cabello.png +3 -0
- data/reference_data/images/Charlize Theron.jpg +3 -0
- data/reference_data/images/Claire Holt.png +3 -0
- data/reference_data/images/Courtney Cox.jpg +3 -0
- data/reference_data/images/Dwayne Johnson.jpg +3 -0
- data/reference_data/images/Elizabeth Olsen.jpg +3 -0
- data/reference_data/images/Ellen Degeneres.jpg +3 -0
- data/reference_data/images/Henry Cavill.jpg +3 -0
- data/reference_data/images/Hrithik Roshan.jpg +3 -0
- data/reference_data/images/Hugh Jackman.jpg +3 -0
- data/reference_data/images/Jessica Alba.jpg +3 -0
- data/reference_data/images/Kashyap.jpg +3 -0
- data/reference_data/images/Lisa Kudrow.jpg +3 -0
- data/reference_data/images/Margot Robbie.jpg +3 -0
- data/reference_data/images/Marmik.jpg +0 -0
- data/reference_data/images/Natalie Portman.jpg +3 -0
- data/reference_data/images/Priyanka Chopra.jpg +3 -0
- data/reference_data/images/Robert Downey Jr.jpg +0 -0
- data/reference_data/images/Roger Federer.jpg +3 -0
- data/reference_data/images/Tom Cruise.jpg +3 -0
- data/reference_data/images/Vijay Deverakonda.jpg +3 -0
- data/reference_data/images/Virat Kohli.jpg +3 -0
- data/reference_data/images/Zac Efron.jpg +3 -0
- data/reference_data/reference_image_data.json +0 -0
.gitattributes
CHANGED
@@ -77,3 +77,47 @@ SlimFace/data/reference_data/images/Tom[[:space:]]Cruise.jpg filter=lfs diff=lfs
|
|
77 |
SlimFace/data/reference_data/images/Vijay[[:space:]]Deverakonda.jpg filter=lfs diff=lfs merge=lfs -text
|
78 |
SlimFace/data/reference_data/images/Virat[[:space:]]Kohli.jpg filter=lfs diff=lfs merge=lfs -text
|
79 |
SlimFace/data/reference_data/images/Zac[[:space:]]Efron.jpg filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
SlimFace/data/reference_data/images/Vijay[[:space:]]Deverakonda.jpg filter=lfs diff=lfs merge=lfs -text
|
78 |
SlimFace/data/reference_data/images/Virat[[:space:]]Kohli.jpg filter=lfs diff=lfs merge=lfs -text
|
79 |
SlimFace/data/reference_data/images/Zac[[:space:]]Efron.jpg filter=lfs diff=lfs merge=lfs -text
|
80 |
+
data/reference_data/images/Akshay[[:space:]]Kumar.jpg filter=lfs diff=lfs merge=lfs -text
|
81 |
+
data/reference_data/images/Alexandra[[:space:]]Daddario.jpg filter=lfs diff=lfs merge=lfs -text
|
82 |
+
data/reference_data/images/Alia[[:space:]]Bhatt.jpg filter=lfs diff=lfs merge=lfs -text
|
83 |
+
data/reference_data/images/Amitabh[[:space:]]Bachchan.jpg filter=lfs diff=lfs merge=lfs -text
|
84 |
+
data/reference_data/images/Andy[[:space:]]Samberg.jpg filter=lfs diff=lfs merge=lfs -text
|
85 |
+
data/reference_data/images/Anushka[[:space:]]Sharma.jpg filter=lfs diff=lfs merge=lfs -text
|
86 |
+
data/reference_data/images/Billie[[:space:]]Eilish.jpg filter=lfs diff=lfs merge=lfs -text
|
87 |
+
data/reference_data/images/Brad[[:space:]]Pitt.jpg filter=lfs diff=lfs merge=lfs -text
|
88 |
+
data/reference_data/images/Camila[[:space:]]Cabello.png filter=lfs diff=lfs merge=lfs -text
|
89 |
+
data/reference_data/images/Charlize[[:space:]]Theron.jpg filter=lfs diff=lfs merge=lfs -text
|
90 |
+
data/reference_data/images/Claire[[:space:]]Holt.png filter=lfs diff=lfs merge=lfs -text
|
91 |
+
data/reference_data/images/Courtney[[:space:]]Cox.jpg filter=lfs diff=lfs merge=lfs -text
|
92 |
+
data/reference_data/images/Dwayne[[:space:]]Johnson.jpg filter=lfs diff=lfs merge=lfs -text
|
93 |
+
data/reference_data/images/Elizabeth[[:space:]]Olsen.jpg filter=lfs diff=lfs merge=lfs -text
|
94 |
+
data/reference_data/images/Ellen[[:space:]]Degeneres.jpg filter=lfs diff=lfs merge=lfs -text
|
95 |
+
data/reference_data/images/Henry[[:space:]]Cavill.jpg filter=lfs diff=lfs merge=lfs -text
|
96 |
+
data/reference_data/images/Hrithik[[:space:]]Roshan.jpg filter=lfs diff=lfs merge=lfs -text
|
97 |
+
data/reference_data/images/Hugh[[:space:]]Jackman.jpg filter=lfs diff=lfs merge=lfs -text
|
98 |
+
data/reference_data/images/Jessica[[:space:]]Alba.jpg filter=lfs diff=lfs merge=lfs -text
|
99 |
+
data/reference_data/images/Kashyap.jpg filter=lfs diff=lfs merge=lfs -text
|
100 |
+
data/reference_data/images/Lisa[[:space:]]Kudrow.jpg filter=lfs diff=lfs merge=lfs -text
|
101 |
+
data/reference_data/images/Margot[[:space:]]Robbie.jpg filter=lfs diff=lfs merge=lfs -text
|
102 |
+
data/reference_data/images/Natalie[[:space:]]Portman.jpg filter=lfs diff=lfs merge=lfs -text
|
103 |
+
data/reference_data/images/Priyanka[[:space:]]Chopra.jpg filter=lfs diff=lfs merge=lfs -text
|
104 |
+
data/reference_data/images/Roger[[:space:]]Federer.jpg filter=lfs diff=lfs merge=lfs -text
|
105 |
+
data/reference_data/images/Tom[[:space:]]Cruise.jpg filter=lfs diff=lfs merge=lfs -text
|
106 |
+
data/reference_data/images/Vijay[[:space:]]Deverakonda.jpg filter=lfs diff=lfs merge=lfs -text
|
107 |
+
data/reference_data/images/Virat[[:space:]]Kohli.jpg filter=lfs diff=lfs merge=lfs -text
|
108 |
+
data/reference_data/images/Zac[[:space:]]Efron.jpg filter=lfs diff=lfs merge=lfs -text
|
109 |
+
src/slimface/models/detection_models/mtcnn_pytorch/caffe_models/det2.caffemodel filter=lfs diff=lfs merge=lfs -text
|
110 |
+
src/slimface/models/detection_models/mtcnn_pytorch/caffe_models/det3.caffemodel filter=lfs diff=lfs merge=lfs -text
|
111 |
+
src/slimface/models/detection_models/mtcnn_pytorch/caffe_models/det4.caffemodel filter=lfs diff=lfs merge=lfs -text
|
112 |
+
src/slimface/models/detection_models/mtcnn_pytorch/images/example.png filter=lfs diff=lfs merge=lfs -text
|
113 |
+
src/slimface/models/detection_models/mtcnn_pytorch/images/office2.jpg filter=lfs diff=lfs merge=lfs -text
|
114 |
+
src/slimface/models/detection_models/mtcnn_pytorch/images/office4.jpg filter=lfs diff=lfs merge=lfs -text
|
115 |
+
tests/test_images/Cate[[:space:]]Blanchett.jpg filter=lfs diff=lfs merge=lfs -text
|
116 |
+
tests/test_images/Daniel[[:space:]]Day-Lewis.jpg filter=lfs diff=lfs merge=lfs -text
|
117 |
+
tests/test_images/dont_know.jpg filter=lfs diff=lfs merge=lfs -text
|
118 |
+
tests/test_images/Elon_Musk.jpg filter=lfs diff=lfs merge=lfs -text
|
119 |
+
tests/test_images/Gal[[:space:]]Gado.jpg filter=lfs diff=lfs merge=lfs -text
|
120 |
+
tests/test_images/Kate[[:space:]]Winslet.jpg filter=lfs diff=lfs merge=lfs -text
|
121 |
+
tests/test_images/Tom[[:space:]]Cruise.jpg filter=lfs diff=lfs merge=lfs -text
|
122 |
+
tests/test_images/Tom[[:space:]]Hanks.jpg filter=lfs diff=lfs merge=lfs -text
|
123 |
+
tests/test_images/Viola[[:space:]]Davis.jpg filter=lfs diff=lfs merge=lfs -text
|
.python-version
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3.11.11
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2025 Danh Tran
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
CHANGED
@@ -1,13 +1,128 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# SlimFace: Slim Face Recognition
|
2 |
+
|
3 |
+
> ## Credits and Citation
|
4 |
+
>
|
5 |
+
> ℹ️ This project is based on the [](https://github.com/otroshi/edgeface) by [](https://github.com/otroshi), and includes our own bug fixes and enhancements.
|
6 |
+
>
|
7 |
+
> If this project is helpful for your research, please consider citing the original paper:
|
8 |
+
>
|
9 |
+
> **Edgeface: Efficient face recognition model for edge devices**
|
10 |
+
> *George, Anjith and Ecabert, Christophe and Shahreza, Hatef Otroshi and Kotwal, Ketan and Marcel, Sebastien*
|
11 |
+
> *IEEE Transactions on Biometrics, Behavior, and Identity Science (2024)*
|
12 |
+
>
|
13 |
+
> **If you use this work in your research, please cite the original paper:**
|
14 |
+
> ```bibtex
|
15 |
+
> @article{edgeface,
|
16 |
+
> title={Edgeface: Efficient face recognition model for edge devices},
|
17 |
+
> author={George, Anjith and Ecabert, Christophe and Shahreza, Hatef Otroshi and Kotwal, Ketan and Marcel, Sebastien},
|
18 |
+
> journal={IEEE Transactions on Biometrics, Behavior, and Identity Science},
|
19 |
+
> year={2024}
|
20 |
+
> }
|
21 |
+
> ```
|
22 |
+
|
23 |
+
|
24 |
+
## Usage
|
25 |
+
### Clone Repositories
|
26 |
+
```bash
|
27 |
+
# Clone the repository
|
28 |
+
git clone https://github.com/danhtran2mind/SlimFace
|
29 |
+
|
30 |
+
# Navigate into the newly created 'slimface' directory.
|
31 |
+
cd SlimFace
|
32 |
+
```
|
33 |
+
### Install Dependencies
|
34 |
+
**If Open-CV (CV2) does not work, run below CLI**
|
35 |
+
```bash
|
36 |
+
sudo apt update
|
37 |
+
sudo apt install -y libglib2.0-0
|
38 |
+
sudo apt install -y libgl1-mesa-dev
|
39 |
+
```
|
40 |
+
### Default install Dependencies
|
41 |
+
```bash
|
42 |
+
pip install -r requirements/requirements.txt
|
43 |
+
```
|
44 |
+
### Other install Dependencies
|
45 |
+
- For My Compatible
|
46 |
+
```bash
|
47 |
+
pip install -r requirements/requirements_compatible.txt
|
48 |
+
```
|
49 |
+
- For `End2end Inference`
|
50 |
+
```bash
|
51 |
+
pip install -r requirements/requirements_inference.txt
|
52 |
+
```
|
53 |
+
### Download Model Checkpoints
|
54 |
+
```bash
|
55 |
+
python scripts/download_ckpts.py
|
56 |
+
```
|
57 |
+
### Setup Third Party
|
58 |
+
```bash
|
59 |
+
python scripts/setup_third_party.py
|
60 |
+
```
|
61 |
+
## Data Preparation
|
62 |
+
|
63 |
+
## Pre-trained Model preparation
|
64 |
+
For detailed instructions on how to process and manage your data effectively, refer to the [Full guide for data processing](./docs/data_processing.md).
|
65 |
+
|
66 |
+
This is fast usage for dataset preparation
|
67 |
+
```bash
|
68 |
+
python scripts/process_dataset.py
|
69 |
+
```
|
70 |
+
## Training
|
71 |
+
|
72 |
+
1. Configure the default settings for Accelerate:
|
73 |
+
```bash
|
74 |
+
accelerate config default
|
75 |
+
```
|
76 |
+
|
77 |
+
2. Launch the training script using Accelerate:
|
78 |
+
```bash
|
79 |
+
accelerate launch src/slimface/training/accelerate_train.py
|
80 |
+
```
|
81 |
+
|
82 |
+
For additional help, you can refer to the [Training Documentation](./docs/training/training_docs.md) for more details.
|
83 |
+
|
84 |
+
### Inference
|
85 |
+
#### Create Reference Images Data at `data/reference_data/images`
|
86 |
+
For each class, you store an image in `data/reference_data/images` folder which are maped with `index_to_class_mapping.json`.
|
87 |
+
|
88 |
+
The structure like:
|
89 |
+
```markdown
|
90 |
+
data/reference_data/images/
|
91 |
+
├── 'Robert Downey Jr.jpg'
|
92 |
+
├── 'Tom Cruise.jpg'
|
93 |
+
└── ...
|
94 |
+
```
|
95 |
+
|
96 |
+
|
97 |
+
### Create Reference Dictionary from `index_to_class_mapping.json`
|
98 |
+
|
99 |
+
#### Steps
|
100 |
+
1. Place `index_to_class_mapping.json` in the `ckpts` folder.
|
101 |
+
2. Ensure reference images are in `data/reference_data/images`. Missing images will be set to `""` in `reference_image_data.json` (default in `data/reference_data` folder).
|
102 |
+
3. Run one of the following commands:
|
103 |
+
|
104 |
+
#### Commands
|
105 |
+
- **Default** (Output: `data/reference_data/reference_image_data.json`):
|
106 |
+
```bash
|
107 |
+
python scripts/create_reference_image_path.py
|
108 |
+
```
|
109 |
+
- **Custom Paths**:
|
110 |
+
```bash
|
111 |
+
python scripts/create_reference_image_path.py \
|
112 |
+
--input <path_to_index_to_class_mapping.json> \
|
113 |
+
--output <path_to_tests/reference_image_data.json>
|
114 |
+
```
|
115 |
+
|
116 |
+
#### Manual Option
|
117 |
+
Edit `reference_image_data.json` directly to add image paths as dictionary values.
|
118 |
+
|
119 |
+
## Demostration
|
120 |
+
```bash
|
121 |
+
python apps/gradio_app.py
|
122 |
+
```
|
123 |
+
|
124 |
+
https://huggingface.co/spaces/danhtran2mind/SlimFace-demo
|
125 |
+
|
126 |
+
## Project Description
|
127 |
+
|
128 |
+
This repository is trained from [](https://github.com/danhtran2mind/edgeface), a fork of [](https://github.com/otroshi/edgeface), with numerous bug fixes and rewritten code for improved performance and stability.
|
apps/gradio_app.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from PIL import Image
|
3 |
+
from gradio_app.inference import run_inference
|
4 |
+
from gradio_app.components import (
|
5 |
+
CONTENT_DESCRIPTION, CONTENT_IN, CONTENT_OUT,
|
6 |
+
list_reference_files, list_mapping_files,
|
7 |
+
list_classifier_files, list_edgeface_files
|
8 |
+
)
|
9 |
+
|
10 |
+
def create_image_input_column():
|
11 |
+
"""Create the column for image input and output display."""
|
12 |
+
with gr.Column():
|
13 |
+
image_input = gr.Image(type="pil", label="Upload Image")
|
14 |
+
output = gr.HTML(label="Inference Results", elem_classes=["results-container"])
|
15 |
+
return image_input, output
|
16 |
+
|
17 |
+
def create_model_files_column():
|
18 |
+
"""Create the column for model file selection."""
|
19 |
+
with gr.Column():
|
20 |
+
with gr.Group(elem_classes=["section-group"]):
|
21 |
+
gr.Markdown("### Model Files", elem_classes=["section-title"])
|
22 |
+
ref_dict = gr.Dropdown(
|
23 |
+
choices=["Select a file"] + list_reference_files(),
|
24 |
+
label="Reference Dict JSON",
|
25 |
+
value="data/reference_data/reference_image_data.json"
|
26 |
+
)
|
27 |
+
index_map = gr.Dropdown(
|
28 |
+
choices=["Select a file"] + list_mapping_files(),
|
29 |
+
label="Index to Class Mapping JSON",
|
30 |
+
value="ckpts/index_to_class_mapping.json"
|
31 |
+
)
|
32 |
+
classifier_model = gr.Dropdown(
|
33 |
+
choices=["Select a file"] + list_classifier_files(),
|
34 |
+
label="Classifier Model (.pth)",
|
35 |
+
value="ckpts/SlimFace_efficientnet_b3_full_model.pth"
|
36 |
+
)
|
37 |
+
edgeface_model = gr.Dropdown(
|
38 |
+
choices=["Select a file"] + list_edgeface_files(),
|
39 |
+
label="EdgeFace Model (.pt)",
|
40 |
+
value="ckpts/idiap/edgeface_s_gamma_05.pt"
|
41 |
+
)
|
42 |
+
return ref_dict, index_map, classifier_model, edgeface_model
|
43 |
+
|
44 |
+
def create_settings_column():
|
45 |
+
"""Create the column for advanced settings."""
|
46 |
+
with gr.Column():
|
47 |
+
with gr.Group(elem_classes=["section-group"]):
|
48 |
+
gr.Markdown("### Advanced Settings", elem_classes=["section-title"])
|
49 |
+
algorithm = gr.Dropdown(
|
50 |
+
choices=["yolo", "mtcnn", "retinaface"],
|
51 |
+
label="Detection Algorithm",
|
52 |
+
value="yolo"
|
53 |
+
)
|
54 |
+
accelerator = gr.Dropdown(
|
55 |
+
choices=["auto", "cpu", "cuda", "mps"],
|
56 |
+
label="Accelerator",
|
57 |
+
value="auto"
|
58 |
+
)
|
59 |
+
resolution = gr.Slider(
|
60 |
+
minimum=128,
|
61 |
+
maximum=512,
|
62 |
+
step=32,
|
63 |
+
label="Image Resolution",
|
64 |
+
value=300
|
65 |
+
)
|
66 |
+
similarity_threshold = gr.Slider(
|
67 |
+
minimum=0.1,
|
68 |
+
maximum=1.0,
|
69 |
+
step=0.05,
|
70 |
+
label="Similarity Threshold",
|
71 |
+
value=0.3
|
72 |
+
)
|
73 |
+
return algorithm, accelerator, resolution, similarity_threshold
|
74 |
+
|
75 |
+
def create_interface():
|
76 |
+
"""Create the Gradio interface for SlimFace."""
|
77 |
+
with gr.Blocks(css="gradio_app/static/styles.css", theme=gr.themes.Soft()) as demo:
|
78 |
+
gr.Markdown("# SlimFace Demonstration")
|
79 |
+
gr.Markdown(CONTENT_DESCRIPTION)
|
80 |
+
gr.HTML(CONTENT_IN)
|
81 |
+
|
82 |
+
with gr.Row():
|
83 |
+
image_input, output = create_image_input_column()
|
84 |
+
ref_dict, index_map, classifier_model, edgeface_model = create_model_files_column()
|
85 |
+
|
86 |
+
with gr.Row():
|
87 |
+
algorithm, accelerator, resolution, similarity_threshold = create_settings_column()
|
88 |
+
|
89 |
+
with gr.Row():
|
90 |
+
submit_btn = gr.Button("Run Inference", variant="primary", elem_classes=["centered-button"])
|
91 |
+
|
92 |
+
submit_btn.click(
|
93 |
+
fn=run_inference,
|
94 |
+
inputs=[
|
95 |
+
image_input,
|
96 |
+
ref_dict,
|
97 |
+
index_map,
|
98 |
+
classifier_model,
|
99 |
+
edgeface_model,
|
100 |
+
algorithm,
|
101 |
+
accelerator,
|
102 |
+
resolution,
|
103 |
+
similarity_threshold
|
104 |
+
],
|
105 |
+
outputs=output
|
106 |
+
)
|
107 |
+
gr.Markdown(CONTENT_OUT)
|
108 |
+
return demo
|
109 |
+
|
110 |
+
def main():
|
111 |
+
"""Launch the Gradio interface."""
|
112 |
+
demo = create_interface()
|
113 |
+
demo.launch()
|
114 |
+
|
115 |
+
if __name__ == "__main__":
|
116 |
+
main()
|
apps/gradio_app/.gitkeep
ADDED
File without changes
|
apps/gradio_app/__init__.py
ADDED
File without changes
|
apps/gradio_app/components.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
# File listing functions
|
4 |
+
def list_reference_files():
|
5 |
+
ref_dir = "data/reference_data/"
|
6 |
+
try:
|
7 |
+
files = [os.path.join(ref_dir, f) for f in os.listdir(ref_dir) if f.endswith(".json")]
|
8 |
+
return files if files else ["No .json files found in data/reference_data/"]
|
9 |
+
except FileNotFoundError:
|
10 |
+
return ["Directory data/reference_data/ not found"]
|
11 |
+
except Exception as e:
|
12 |
+
return [f"Error listing files: {str(e)}"]
|
13 |
+
|
14 |
+
def list_mapping_files():
|
15 |
+
map_dir = "ckpts/"
|
16 |
+
try:
|
17 |
+
files = [os.path.join(map_dir, f) for f in os.listdir(map_dir) if f.endswith(".json")]
|
18 |
+
return files if files else ["No .json files found in ckpts/"]
|
19 |
+
except FileNotFoundError:
|
20 |
+
return ["Directory ckpts/ not found"]
|
21 |
+
except Exception as e:
|
22 |
+
return [f"Error listing files: {str(e)}"]
|
23 |
+
|
24 |
+
def list_classifier_files():
|
25 |
+
clf_dir = "ckpts/"
|
26 |
+
try:
|
27 |
+
files = [os.path.join(clf_dir, f) for f in os.listdir(clf_dir) if f.endswith(".pth")]
|
28 |
+
return files if files else ["No .pth files found in ckpts/"]
|
29 |
+
except FileNotFoundError:
|
30 |
+
return ["Directory ckpts/ not found"]
|
31 |
+
except Exception as e:
|
32 |
+
return [f"Error listing files: {str(e)}"]
|
33 |
+
|
34 |
+
def list_edgeface_files():
|
35 |
+
ef_dir = "ckpts/idiap/"
|
36 |
+
try:
|
37 |
+
files = [os.path.join(ef_dir, f) for f in os.listdir(ef_dir) if f.endswith(".pt")]
|
38 |
+
return files if files else ["No .pt files found in ckpts/idiap/"]
|
39 |
+
except FileNotFoundError:
|
40 |
+
return ["Directory ckpts/idiap/ not found"]
|
41 |
+
except Exception as e:
|
42 |
+
return [f"Error listing files: {str(e)}"]
|
43 |
+
|
44 |
+
CONTENT_DESCRIPTION = """
|
45 |
+
**SlimFace: Advanced Face Classification with TorchVision Backbones**
|
46 |
+
"""
|
47 |
+
CONTENT_IN = """
|
48 |
+
<style>
|
49 |
+
body {
|
50 |
+
font-family: Arial, sans-serif;
|
51 |
+
line-height: 1.6;
|
52 |
+
margin: 0; /* Remove default margin for full-width */
|
53 |
+
padding: 20px; /* Adjust padding for content spacing */
|
54 |
+
color: #333;
|
55 |
+
width: 100%; /* Ensure body takes full width */
|
56 |
+
box-sizing: border-box; /* Include padding in width calculation */
|
57 |
+
}
|
58 |
+
.attribution {
|
59 |
+
background-color: #f9f9f9;
|
60 |
+
padding: 20px;
|
61 |
+
border-radius: 8px;
|
62 |
+
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
63 |
+
}
|
64 |
+
.quote-container {
|
65 |
+
border-left: 5px solid #007bff;
|
66 |
+
padding-left: 15px;
|
67 |
+
margin-bottom: 15px;
|
68 |
+
font-style: italic;
|
69 |
+
}
|
70 |
+
.attribution p {
|
71 |
+
margin: 10px 0;
|
72 |
+
}
|
73 |
+
.badge {
|
74 |
+
display: inline-block;
|
75 |
+
border-radius: 4px;
|
76 |
+
text-decoration: none;
|
77 |
+
font-size: 14px;
|
78 |
+
transition: background-color 0.3s;
|
79 |
+
}
|
80 |
+
.badge:hover {
|
81 |
+
background-color: #0056b3;
|
82 |
+
}
|
83 |
+
.badge img {
|
84 |
+
vertical-align: middle;
|
85 |
+
margin-right: 5px;
|
86 |
+
}
|
87 |
+
.source {
|
88 |
+
color: #555;
|
89 |
+
}
|
90 |
+
</style>
|
91 |
+
<div class="quote-container">
|
92 |
+
<p>
|
93 |
+
This project leverages code from
|
94 |
+
<a class="badge" href="https://github.com/otroshi/edgeface">
|
95 |
+
<img src="https://img.shields.io/badge/Built%20on-otroshi%2Fedgeface-blue?style=flat&logo=github" alt="Built on edgeface">
|
96 |
+
</a>
|
97 |
+
by
|
98 |
+
<a class="badge" href="https://github.com/otroshi">
|
99 |
+
<img src="https://img.shields.io/badge/GitHub-Hatef_Otroshi-blue?style=flat&logo=github" alt="Hatef Otroshi">
|
100 |
+
</a>,
|
101 |
+
with our own bug fixes and enhancements available at
|
102 |
+
<a class="badge" href="https://github.com/danhtran2mind/edgeface/tree/main/face_alignment">
|
103 |
+
<img src="https://img.shields.io/badge/GitHub-danhtran2mind%2Fedgeface-blue?style=flat&logo=github" alt="Edgeface Enhancements">
|
104 |
+
</a>.
|
105 |
+
</p>
|
106 |
+
</div>
|
107 |
+
<p class="source">
|
108 |
+
For more information, you can follow below:<br>
|
109 |
+
Source code:
|
110 |
+
<a class="badge" href="https://github.com/danhtran2mind/SlimFace">
|
111 |
+
<img src="https://img.shields.io/badge/GitHub-danhtran2mind%2FSlimFace-blue?style=flat" alt="GitHub Repo">
|
112 |
+
,
|
113 |
+
</a>
|
114 |
+
Author:
|
115 |
+
<a class="badge" href="https://github.com/danhtran2mind">
|
116 |
+
<img src="https://img.shields.io/badge/GitHub-danhtran2mind-blue?style=flat" alt="GitHub Profile">
|
117 |
+
,
|
118 |
+
</a>
|
119 |
+
PyTorch Docs:
|
120 |
+
<a class="badge" href="https://docs.pytorch.org/vision/main/models.html">
|
121 |
+
<img src="https://img.shields.io/badge/PyTorch-Pretrain%20Model%20Docs-blue?style=flat" alt="PyTorch Docs">
|
122 |
+
</a>
|
123 |
+
</p>
|
124 |
+
"""
|
125 |
+
|
126 |
+
CONTENT_OUT = """
|
127 |
+
## More Information about SlimFace
|
128 |
+
|
129 |
+
SlimFace empowers developers to build high-accuracy face classification models using transfer learning, leveraging TorchVision's powerful pre-trained architectures. 🌟 It provides a flexible, efficient, and scalable solution for facial recognition, delivering top-tier performance for custom applications.
|
130 |
+
|
131 |
+
**Supported Architectures:**
|
132 |
+
- **EfficientNet**: B0-B7 and V2 (Small, Medium, Large) for balanced performance and efficiency. 📸
|
133 |
+
- **RegNet**: X/Y series (400MF to 128GF) for optimized computation across diverse hardware. 💻
|
134 |
+
- **Vision Transformers (ViT)**: B_16, B_32, H_14, L_16, L_32 for cutting-edge feature extraction. 🚀
|
135 |
+
"""
|
apps/gradio_app/inference.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
from PIL import Image
|
4 |
+
|
5 |
+
# Append the path to the inference script's directory
|
6 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'slimface', 'inference')))
|
7 |
+
|
8 |
+
from end2end_inference import inference_and_confirm
|
9 |
+
|
10 |
+
def run_inference(image, reference_dict_path, index_to_class_mapping_path, model_path,
|
11 |
+
edgeface_model_path="ckpts/idiap/edgeface_base.pt",
|
12 |
+
algorithm="yolo", accelerator="auto", resolution=224, similarity_threshold=0.6):
|
13 |
+
|
14 |
+
# Validate image input
|
15 |
+
if image is None:
|
16 |
+
return '<div class="error-message">Error: No image provided. Please upload an image.</div>'
|
17 |
+
|
18 |
+
# Define temporary image path
|
19 |
+
temp_image_path = os.path.join(os.path.dirname(__file__), "temp_data", "temp_image.jpg")
|
20 |
+
os.makedirs(os.path.dirname(temp_image_path), exist_ok=True)
|
21 |
+
|
22 |
+
# Save the image
|
23 |
+
try:
|
24 |
+
image.save(temp_image_path)
|
25 |
+
except Exception as e:
|
26 |
+
return f'<div class="error-message">Error saving image: {str(e)}</div>'
|
27 |
+
|
28 |
+
# Create args object to mimic command-line arguments
|
29 |
+
class Args:
|
30 |
+
def __init__(self):
|
31 |
+
self.unknown_image_path = temp_image_path
|
32 |
+
self.reference_dict_path = reference_dict_path.name if hasattr(reference_dict_path, 'name') else reference_dict_path
|
33 |
+
self.index_to_class_mapping_path = index_to_class_mapping_path.name if hasattr(index_to_class_mapping_path, 'name') else index_to_class_mapping_path
|
34 |
+
self.model_path = model_path.name if hasattr(model_path, 'name') else model_path
|
35 |
+
self.edgeface_model_path = edgeface_model_path.name if hasattr(edgeface_model_path, 'name') else edgeface_model_path
|
36 |
+
self.algorithm = algorithm
|
37 |
+
self.accelerator = accelerator
|
38 |
+
self.resolution = resolution
|
39 |
+
self.similarity_threshold = similarity_threshold
|
40 |
+
|
41 |
+
args = Args()
|
42 |
+
|
43 |
+
# Validate inputs
|
44 |
+
if not all([args.reference_dict_path, args.index_to_class_mapping_path, args.model_path]):
|
45 |
+
return '<div class="error-message">Error: Please provide all required files (reference dict, index-to-class mapping, and model).</div>'
|
46 |
+
|
47 |
+
try:
|
48 |
+
# Call the inference function from end2end_inference.py
|
49 |
+
results = inference_and_confirm(args)
|
50 |
+
|
51 |
+
# Format output as HTML for Gradio
|
52 |
+
output = '<div class="results-container">'
|
53 |
+
output += '<h2 class="result-title">Inference Results</h2>'
|
54 |
+
|
55 |
+
if not results:
|
56 |
+
output += '<div class="error-message">No results returned from inference.</div>'
|
57 |
+
else:
|
58 |
+
for idx, result in enumerate(results, 1):
|
59 |
+
output += '<div class="result-card">'
|
60 |
+
output += f'<h3 class="result-title">Result {idx}</h3>'
|
61 |
+
|
62 |
+
# Person Name
|
63 |
+
person_name = result.get('predicted_class', 'N/A')
|
64 |
+
output += f'<div class="result-item"><span class="label">Person Name</span><span class="value">{person_name}</span></div>'
|
65 |
+
|
66 |
+
# Confidence
|
67 |
+
confidence = result.get('confidence', 'N/A')
|
68 |
+
confidence_str = f'{confidence:.4f}' if isinstance(confidence, (int, float)) else 'N/A'
|
69 |
+
output += f'<div class="result-item"><span class="label">Confidence</span><span class="value">{confidence_str}</span></div>'
|
70 |
+
|
71 |
+
# Similarity with Reference Image
|
72 |
+
similarity = result.get('similarity', 'N/A')
|
73 |
+
similarity_str = f'{similarity:.4f}' if isinstance(similarity, (int, float)) else 'N/A'
|
74 |
+
output += f'<div class="result-item"><span class="label">Similarity with<br>Reference Image</span><span class="value">{similarity_str}</span></div>'
|
75 |
+
|
76 |
+
# Confirmed Person
|
77 |
+
confirmed = result.get('confirmed', 'N/A')
|
78 |
+
confirmed_class = 'confirmed-true' if confirmed is True else 'confirmed-false' if confirmed is False else ''
|
79 |
+
confirmed_str = str(confirmed) if confirmed is not None else 'N/A'
|
80 |
+
output += f'<div class="result-item"><span class="label">Confirmed Person</span><span class="value {confirmed_class}">{confirmed_str}</span></div>'
|
81 |
+
|
82 |
+
output += '</div>'
|
83 |
+
|
84 |
+
output += '</div>'
|
85 |
+
return output
|
86 |
+
|
87 |
+
except Exception as e:
|
88 |
+
return f'<div class="error-message">Error during inference: {str(e)}</div>'
|
89 |
+
|
90 |
+
finally:
|
91 |
+
# Clean up temporary image
|
92 |
+
if os.path.exists(temp_image_path):
|
93 |
+
os.remove(temp_image_path)
|
apps/gradio_app/static/script.js
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
// Placeholder for future JavaScript functionality
|
2 |
+
// Currently, no JavaScript is required for the Gradio app as interactions are handled by Gradio
|
3 |
+
console.log("SlimFace Gradio App JavaScript loaded");
|
apps/gradio_app/static/style.css
ADDED
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
body {
|
2 |
+
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
3 |
+
background: linear-gradient(145deg, #e2e8f0 0%, #b8c6db 100%);
|
4 |
+
margin: 0;
|
5 |
+
padding: 0;
|
6 |
+
min-height: 100vh;
|
7 |
+
color: #1a202c;
|
8 |
+
}
|
9 |
+
|
10 |
+
.gradio-container {
|
11 |
+
max-width: 1280px;
|
12 |
+
margin: 0 auto;
|
13 |
+
padding: 2.5rem 1.5rem;
|
14 |
+
box-sizing: border-box;
|
15 |
+
}
|
16 |
+
|
17 |
+
h1 {
|
18 |
+
color: #1a202c;
|
19 |
+
font-size: 2.75rem;
|
20 |
+
font-weight: 800;
|
21 |
+
text-align: center;
|
22 |
+
margin-bottom: 2.5rem;
|
23 |
+
letter-spacing: -0.025em;
|
24 |
+
background: linear-gradient(to right, #2b6cb0, #4a90e2);
|
25 |
+
-webkit-background-clip: text;
|
26 |
+
-webkit-text-fill-color: transparent;
|
27 |
+
}
|
28 |
+
|
29 |
+
.section-title {
|
30 |
+
color: #1a202c;
|
31 |
+
font-size: 1.5rem;
|
32 |
+
font-weight: 700;
|
33 |
+
margin-bottom: 1rem;
|
34 |
+
border-bottom: 2px solid #4a90e2;
|
35 |
+
padding-bottom: 0.5rem;
|
36 |
+
letter-spacing: -0.015em;
|
37 |
+
}
|
38 |
+
|
39 |
+
.section-group {
|
40 |
+
background: rgba(255, 255, 255, 0.95);
|
41 |
+
border-radius: 0.5rem;
|
42 |
+
padding: 1.5rem;
|
43 |
+
border: 1px solid rgba(226, 232, 240, 0.5);
|
44 |
+
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
|
45 |
+
}
|
46 |
+
|
47 |
+
.results-container {
|
48 |
+
display: flex;
|
49 |
+
flex-direction: column;
|
50 |
+
gap: 1.75rem;
|
51 |
+
padding: 2rem;
|
52 |
+
background: rgba(255, 255, 255, 0.95);
|
53 |
+
border-radius: 1.25rem;
|
54 |
+
box-shadow: 0 10px 20px rgba(0, 0, 0, 0.15), 0 4px 6px rgba(0, 0, 0, 0.1);
|
55 |
+
border: 1px solid rgba(226, 232, 240, 0.5);
|
56 |
+
backdrop-filter: blur(8px);
|
57 |
+
}
|
58 |
+
|
59 |
+
.result-card {
|
60 |
+
background: linear-gradient(145deg, #f7fafc, #edf2f7);
|
61 |
+
border-radius: 1rem;
|
62 |
+
padding: 2.25rem;
|
63 |
+
box-shadow: 0 6px 12px rgba(0, 0, 0, 0.1);
|
64 |
+
transition: transform 0.3s ease, box-shadow 0.3s ease, background 0.3s ease;
|
65 |
+
position: relative;
|
66 |
+
overflow: hidden;
|
67 |
+
}
|
68 |
+
|
69 |
+
.result-card:hover {
|
70 |
+
transform: translateY(-5px);
|
71 |
+
box-shadow: 0 10px 24px rgba(0, 0, 0, 0.15);
|
72 |
+
background: linear-gradient(145deg, #ffffff, #e6eefa);
|
73 |
+
}
|
74 |
+
|
75 |
+
.result-card::before {
|
76 |
+
content: '';
|
77 |
+
position: absolute;
|
78 |
+
top: 0;
|
79 |
+
left: 0;
|
80 |
+
width: 100%;
|
81 |
+
height: 4px;
|
82 |
+
background: linear-gradient(to right, #4a90e2, #63b3ed);
|
83 |
+
transition: height 0.3s ease;
|
84 |
+
}
|
85 |
+
|
86 |
+
.result-card:hover::before {
|
87 |
+
height: 8px;
|
88 |
+
}
|
89 |
+
|
90 |
+
.result-title {
|
91 |
+
color: #1a202c;
|
92 |
+
font-size: 1.875rem;
|
93 |
+
font-weight: 700;
|
94 |
+
margin-bottom: 1.5rem;
|
95 |
+
border-bottom: 3px solid #4a90e2;
|
96 |
+
padding-bottom: 0.75rem;
|
97 |
+
letter-spacing: -0.015em;
|
98 |
+
}
|
99 |
+
|
100 |
+
.result-item {
|
101 |
+
display: flex;
|
102 |
+
justify-content: space-between;
|
103 |
+
align-items: center;
|
104 |
+
margin: 1rem 0;
|
105 |
+
font-size: 1.125rem;
|
106 |
+
color: #2d3748;
|
107 |
+
line-height: 1.6;
|
108 |
+
}
|
109 |
+
|
110 |
+
.label {
|
111 |
+
font-weight: 600;
|
112 |
+
color: #2b6cb0;
|
113 |
+
text-align: left;
|
114 |
+
text-transform: uppercase;
|
115 |
+
font-size: 0.95rem;
|
116 |
+
letter-spacing: 0.05em;
|
117 |
+
flex: 0 0 auto;
|
118 |
+
}
|
119 |
+
|
120 |
+
.value {
|
121 |
+
color: #1a202c;
|
122 |
+
font-weight: 500;
|
123 |
+
text-align: right;
|
124 |
+
flex: 0 0 auto;
|
125 |
+
}
|
126 |
+
|
127 |
+
.value.confirmed-true {
|
128 |
+
color: #2f855a;
|
129 |
+
font-weight: 600;
|
130 |
+
background: #c6f6d5;
|
131 |
+
padding: 0.25rem 0.5rem;
|
132 |
+
border-radius: 0.375rem;
|
133 |
+
}
|
134 |
+
|
135 |
+
.value.confirmed-false {
|
136 |
+
color: #c53030;
|
137 |
+
font-weight: 600;
|
138 |
+
background: #fed7d7;
|
139 |
+
padding: 0.25rem 0.5rem;
|
140 |
+
border-radius: 0.375rem;
|
141 |
+
}
|
142 |
+
|
143 |
+
.error-message {
|
144 |
+
background: #fef2f2;
|
145 |
+
color: #9b2c2c;
|
146 |
+
padding: 1.75rem;
|
147 |
+
border-radius: 0.875rem;
|
148 |
+
margin: 1.25rem 0;
|
149 |
+
font-size: 1.125rem;
|
150 |
+
font-weight: 500;
|
151 |
+
border: 1px solid #e53e3e;
|
152 |
+
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
|
153 |
+
}
|
154 |
+
|
155 |
+
.centered-button {
|
156 |
+
display: block;
|
157 |
+
margin: 1rem auto;
|
158 |
+
background: #4a90e2;
|
159 |
+
color: white;
|
160 |
+
padding: 0.75rem 1.5rem;
|
161 |
+
border-radius: 0.5rem;
|
162 |
+
border: none;
|
163 |
+
font-size: 1rem;
|
164 |
+
font-weight: 600;
|
165 |
+
cursor: pointer;
|
166 |
+
transition: background 0.3s ease;
|
167 |
+
position: relative;
|
168 |
+
padding-left: 2.5rem;
|
169 |
+
width: 30%;
|
170 |
+
}
|
171 |
+
|
172 |
+
.centered-button:hover {
|
173 |
+
background: #2b6cb0;
|
174 |
+
}
|
175 |
+
|
176 |
+
.centered-button::after {
|
177 |
+
content: '🤔';
|
178 |
+
position: absolute;
|
179 |
+
left: 0.75rem;
|
180 |
+
top: 50%;
|
181 |
+
transform: translateY(-50%);
|
182 |
+
font-size: 1.2rem;
|
183 |
+
}
|
184 |
+
|
185 |
+
@media (max-width: 768px) {
|
186 |
+
.gradio-container {
|
187 |
+
padding: 1.5rem;
|
188 |
+
}
|
189 |
+
|
190 |
+
h1 {
|
191 |
+
font-size: 2rem;
|
192 |
+
}
|
193 |
+
|
194 |
+
.results-container {
|
195 |
+
padding: 1.5rem;
|
196 |
+
}
|
197 |
+
|
198 |
+
.result-card {
|
199 |
+
padding: 1.5rem;
|
200 |
+
}
|
201 |
+
|
202 |
+
.result-title {
|
203 |
+
font-size: 1.5rem;
|
204 |
+
}
|
205 |
+
|
206 |
+
.result-item {
|
207 |
+
font-size: 1rem;
|
208 |
+
flex-direction: column;
|
209 |
+
align-items: flex-start;
|
210 |
+
gap: 0.5rem;
|
211 |
+
}
|
212 |
+
|
213 |
+
.label, .value {
|
214 |
+
text-align: left;
|
215 |
+
}
|
216 |
+
|
217 |
+
.section-title {
|
218 |
+
font-size: 1.25rem;
|
219 |
+
}
|
220 |
+
|
221 |
+
.section-group {
|
222 |
+
padding: 1rem;
|
223 |
+
}
|
224 |
+
|
225 |
+
.centered-button {
|
226 |
+
padding: 0.5rem 1rem;
|
227 |
+
font-size: 0.9rem;
|
228 |
+
}
|
229 |
+
}
|
assets/comparision.md
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
slim_face_vit_b_16
|
2 |
+
Train loss: 0.0074, Train acc: 0.9980, Val loss: 0.2179, Val acc: 0.9336
|
3 |
+
|
4 |
+
efficientnet_b3
|
5 |
+
Train loss: 0.0014, Train acc: 1.0000, Val loss: 0.1931, Val acc: 0.9427
|
6 |
+
|
7 |
+
efficientnet_v2_s
|
8 |
+
Train loss: 0.0016, Train acc: 1.0000, Val loss: 0.2374, Val acc: 0.9375
|
9 |
+
|
10 |
+
regnet_y_800mf
|
11 |
+
Train loss: 0.0033, Train acc: 0.9997, Val loss: 0.3766, Val acc: 0.8906
|
assets/examples/.gitkeep
ADDED
File without changes
|
ckpts/.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Ignore everything in this directory
|
2 |
+
*
|
3 |
+
# Except this .gitignore file
|
4 |
+
!.gitignore
|
configs/accelerate_config.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
compute_environment: LOCAL_MACHINE
|
2 |
+
distributed_type: FSDP
|
3 |
+
num_processes: 4
|
4 |
+
mixed_precision: fp16
|
5 |
+
fsdp_config:
|
6 |
+
fsdp_offload_params: true
|
configs/image_classification_models_config.yaml
ADDED
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# For more details on models, see https://pytorch.org/vision/main/models.html
|
2 |
+
|
3 |
+
# EfficientNet models: Designed for efficiency with compound scaling of depth, width, and resolution.
|
4 |
+
# These models balance accuracy and computational efficiency, ideal for resource-constrained environments.
|
5 |
+
efficientnet_b0:
|
6 |
+
metrics:
|
7 |
+
Acc@1: 77.692 # Top-1 accuracy on ImageNet
|
8 |
+
Acc@5: 93.532 # Top-5 accuracy on ImageNet
|
9 |
+
GFLOPS: 0.39 # Computational complexity
|
10 |
+
Params: 5.3M # Number of parameters
|
11 |
+
model_fn: models.efficientnet_b0
|
12 |
+
resolution: 224 # Input image resolution
|
13 |
+
weights: models.EfficientNet_B0_Weights.IMAGENET1K_V1 # Pretrained weights on ImageNet
|
14 |
+
|
15 |
+
efficientnet_b1:
|
16 |
+
metrics:
|
17 |
+
Acc@1: 78.642
|
18 |
+
Acc@5: 94.186
|
19 |
+
GFLOPS: 0.69
|
20 |
+
Params: 7.8M
|
21 |
+
model_fn: models.efficientnet_b1
|
22 |
+
resolution: 240
|
23 |
+
weights: models.EfficientNet_B1_Weights.IMAGENET1K_V1
|
24 |
+
|
25 |
+
efficientnet_b2:
|
26 |
+
metrics:
|
27 |
+
Acc@1: 80.608
|
28 |
+
Acc@5: 95.31
|
29 |
+
GFLOPS: 1.09
|
30 |
+
Params: 9.1M
|
31 |
+
model_fn: models.efficientnet_b2
|
32 |
+
resolution: 260
|
33 |
+
weights: models.EfficientNet_B2_Weights.IMAGENET1K_V1
|
34 |
+
|
35 |
+
efficientnet_b3:
|
36 |
+
metrics:
|
37 |
+
Acc@1: 82.008
|
38 |
+
Acc@5: 96.054
|
39 |
+
GFLOPS: 1.83
|
40 |
+
Params: 12.2M
|
41 |
+
model_fn: models.efficientnet_b3
|
42 |
+
resolution: 300
|
43 |
+
weights: models.EfficientNet_B3_Weights.IMAGENET1K_V1
|
44 |
+
|
45 |
+
efficientnet_b4:
|
46 |
+
metrics:
|
47 |
+
Acc@1: 83.384
|
48 |
+
Acc@5: 96.594
|
49 |
+
GFLOPS: 4.39
|
50 |
+
Params: 19.3M
|
51 |
+
model_fn: models.efficientnet_b4
|
52 |
+
resolution: 380
|
53 |
+
weights: models.EfficientNet_B4_Weights.IMAGENET1K_V1
|
54 |
+
|
55 |
+
efficientnet_b5:
|
56 |
+
metrics:
|
57 |
+
Acc@1: 83.444
|
58 |
+
Acc@5: 96.628
|
59 |
+
GFLOPS: 10.27
|
60 |
+
Params: 30.4M
|
61 |
+
model_fn: models.efficientnet_b5
|
62 |
+
resolution: 456
|
63 |
+
weights: models.EfficientNet_B5_Weights.IMAGENET1K_V1
|
64 |
+
|
65 |
+
efficientnet_b6:
|
66 |
+
metrics:
|
67 |
+
Acc@1: 84.008
|
68 |
+
Acc@5: 96.916
|
69 |
+
GFLOPS: 19.07
|
70 |
+
Params: 43.0M
|
71 |
+
model_fn: models.efficientnet_b6
|
72 |
+
resolution: 528
|
73 |
+
weights: models.EfficientNet_B6_Weights.IMAGENET1K_V1
|
74 |
+
|
75 |
+
efficientnet_b7:
|
76 |
+
metrics:
|
77 |
+
Acc@1: 84.122
|
78 |
+
Acc@5: 96.908
|
79 |
+
GFLOPS: 37.75
|
80 |
+
Params: 66.3M
|
81 |
+
model_fn: models.efficientnet_b7
|
82 |
+
resolution: 600
|
83 |
+
weights: models.EfficientNet_B7_Weights.IMAGENET1K_V1
|
84 |
+
|
85 |
+
# EfficientNet V2 models: Improved training efficiency and performance over V1.
|
86 |
+
# These models use progressive learning and optimized scaling for better accuracy.
|
87 |
+
efficientnet_v2_l:
|
88 |
+
metrics:
|
89 |
+
Acc@1: 85.808
|
90 |
+
Acc@5: 97.788
|
91 |
+
GFLOPS: 56.08
|
92 |
+
Params: 118.5M
|
93 |
+
model_fn: models.efficientnet_v2_l
|
94 |
+
resolution: 480
|
95 |
+
weights: models.EfficientNet_V2_L_Weights.IMAGENET1K_V1
|
96 |
+
|
97 |
+
efficientnet_v2_m:
|
98 |
+
metrics:
|
99 |
+
Acc@1: 85.112
|
100 |
+
Acc@5: 97.156
|
101 |
+
GFLOPS: 24.58
|
102 |
+
Params: 54.1M
|
103 |
+
model_fn: models.efficientnet_v2_m
|
104 |
+
resolution: 480
|
105 |
+
weights: models.EfficientNet_V2_M_Weights.IMAGENET1K_V1
|
106 |
+
|
107 |
+
efficientnet_v2_s:
|
108 |
+
metrics:
|
109 |
+
Acc@1: 84.228
|
110 |
+
Acc@5: 96.878
|
111 |
+
GFLOPS: 8.37
|
112 |
+
Params: 21.5M
|
113 |
+
model_fn: models.efficientnet_v2_s
|
114 |
+
resolution: 384
|
115 |
+
weights: models.EfficientNet_V2_S_Weights.IMAGENET1K_V1
|
116 |
+
|
117 |
+
# RegNet models: Designed for scalability and efficiency with a focus on network design.
|
118 |
+
# These models optimize for both accuracy and computational efficiency.
|
119 |
+
regnet_y_128gf:
|
120 |
+
metrics:
|
121 |
+
Acc@1: 86.068 # High accuracy but computationally expensive
|
122 |
+
Acc@5: 97.844
|
123 |
+
GFLOPS: 127.52
|
124 |
+
Params: 644.8M
|
125 |
+
model_fn: models.regnet_y_128gf
|
126 |
+
resolution: 224
|
127 |
+
weights: models.RegNet_Y_128GF_Weights.IMAGENET1K_SWAG_LINEAR_V1
|
128 |
+
|
129 |
+
regnet_y_16gf:
|
130 |
+
metrics:
|
131 |
+
Acc@1: 82.886
|
132 |
+
Acc@5: 96.328
|
133 |
+
GFLOPS: 15.91
|
134 |
+
Params: 83.6M
|
135 |
+
model_fn: models.regnet_y_16gf
|
136 |
+
resolution: 224
|
137 |
+
weights: models.RegNet_Y_16GF_Weights.IMAGENET1K_V2
|
138 |
+
|
139 |
+
regnet_y_1_6gf:
|
140 |
+
metrics:
|
141 |
+
Acc@1: 80.876
|
142 |
+
Acc@5: 95.444
|
143 |
+
GFLOPS: 1.61
|
144 |
+
Params: 11.2M
|
145 |
+
model_fn: models.regnet_y_1_6gf
|
146 |
+
resolution: 224
|
147 |
+
weights: models.RegNet_Y_1_6GF_Weights.IMAGENET1K_V2
|
148 |
+
|
149 |
+
regnet_y_32gf:
|
150 |
+
metrics:
|
151 |
+
Acc@1: 83.368
|
152 |
+
Acc@5: 96.498
|
153 |
+
GFLOPS: 32.28
|
154 |
+
Params: 145.0M
|
155 |
+
model_fn: models.regnet_y_32gf
|
156 |
+
resolution: 224
|
157 |
+
weights: models.RegNet_Y_32GF_Weights.IMAGENET1K_V2
|
158 |
+
|
159 |
+
regnet_y_3_2gf:
|
160 |
+
metrics:
|
161 |
+
Acc@1: 81.982
|
162 |
+
Acc@5: 95.972
|
163 |
+
GFLOPS: 3.18
|
164 |
+
Params: 19.4M
|
165 |
+
model_fn: models.regnet_y_3_2gf
|
166 |
+
resolution: 224
|
167 |
+
weights: models.RegNet_Y_3_2GF_Weights.IMAGENET1K_V2
|
168 |
+
|
169 |
+
regnet_y_400mf:
|
170 |
+
metrics:
|
171 |
+
Acc@1: 75.804
|
172 |
+
Acc@5: 92.742
|
173 |
+
GFLOPS: 0.4
|
174 |
+
Params: 4.3M
|
175 |
+
model_fn: models.regnet_y_400mf
|
176 |
+
resolution: 224
|
177 |
+
weights: models.RegNet_Y_400MF_Weights.IMAGENET1K_V2
|
178 |
+
|
179 |
+
regnet_y_800mf:
|
180 |
+
metrics:
|
181 |
+
Acc@1: 78.828
|
182 |
+
Acc@5: 94.502
|
183 |
+
GFLOPS: 0.83
|
184 |
+
Params: 6.4M
|
185 |
+
model_fn: models.regnet_y_800mf
|
186 |
+
resolution: 224
|
187 |
+
weights: models.RegNet_Y_800MF_Weights.IMAGENET1K_V2
|
188 |
+
|
189 |
+
regnet_y_8gf:
|
190 |
+
metrics:
|
191 |
+
Acc@1: 82.828
|
192 |
+
Acc@5: 96.33
|
193 |
+
GFLOPS: 8.47
|
194 |
+
Params: 39.4M
|
195 |
+
model_fn: models.regnet_y_8gf
|
196 |
+
resolution: 224
|
197 |
+
weights: models.RegNet_Y_8GF_Weights.IMAGENET1K_V2
|
198 |
+
|
199 |
+
# Vision Transformer (ViT) models: Transformer-based architecture for image classification.
|
200 |
+
# These models excel in capturing long-range dependencies but require significant compute for larger variants.
|
201 |
+
vit_b_16:
|
202 |
+
metrics:
|
203 |
+
Acc@1: 81.072 # Base ViT model with balanced accuracy and efficiency
|
204 |
+
Acc@5: 95.318
|
205 |
+
GFLOPS: 17.56
|
206 |
+
Params: 86.6M
|
207 |
+
model_fn: models.vit_b_16
|
208 |
+
resolution: 224
|
209 |
+
weights: models.ViT_B_16_Weights.IMAGENET1K_V1
|
210 |
+
|
211 |
+
vit_b_32:
|
212 |
+
metrics:
|
213 |
+
Acc@1: 75.912 # Smaller patch size version of ViT, lower accuracy but fewer computations
|
214 |
+
Acc@5: 92.466
|
215 |
+
GFLOPS: 4.41
|
216 |
+
Params: 88.2M
|
217 |
+
model_fn: models.vit_b_32
|
218 |
+
resolution: 224
|
219 |
+
weights: models.ViT_B_32_Weights.IMAGENET1K_V1
|
220 |
+
|
221 |
+
vit_h_14:
|
222 |
+
metrics:
|
223 |
+
Acc@1: 88.552 # High-performance ViT model with very high accuracy and computational cost
|
224 |
+
Acc@5: 98.694
|
225 |
+
GFLOPS: 1016.72
|
226 |
+
Params: 633.5M
|
227 |
+
model_fn: models.vit_h_14
|
228 |
+
resolution: 224
|
229 |
+
weights: models.ViT_H_14_Weights.IMAGENET1K_SWAG_E2E_V1
|
230 |
+
|
231 |
+
vit_l_16:
|
232 |
+
metrics:
|
233 |
+
Acc@1: 79.662 # Larger ViT model with improved accuracy over base models
|
234 |
+
Acc@5: 94.638
|
235 |
+
GFLOPS: 61.55
|
236 |
+
Params: 304.3M
|
237 |
+
model_fn: models.vit_l_16
|
238 |
+
resolution: 224
|
239 |
+
weights: models.ViT_L_16_Weights.IMAGENET1K_V1
|
240 |
+
|
241 |
+
vit_l_32:
|
242 |
+
metrics:
|
243 |
+
Acc@1: 76.972 # Larger ViT with larger patch size, trading accuracy for reduced compute
|
244 |
+
Acc@5: 93.07
|
245 |
+
GFLOPS: 15.38
|
246 |
+
Params: 306.5M
|
247 |
+
model_fn: models.vit_l_32
|
248 |
+
resolution: 224
|
249 |
+
weights: models.ViT_L_32_Weights.IMAGENET1K_V1
|
data/raw/.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Ignore everything in this directory
|
2 |
+
*
|
3 |
+
# Except this .gitignore file
|
4 |
+
!.gitignore
|
data/reference_data/images/.gitkeep
ADDED
File without changes
|
data/reference_data/images/Akshay Kumar.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Alexandra Daddario.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Alia Bhatt.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Amitabh Bachchan.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Andy Samberg.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Anushka Sharma.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Billie Eilish.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Brad Pitt.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Camila Cabello.png
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Charlize Theron.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Claire Holt.png
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Courtney Cox.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Dwayne Johnson.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Elizabeth Olsen.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Ellen Degeneres.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Henry Cavill.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Hrithik Roshan.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Hugh Jackman.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Jessica Alba.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Kashyap.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Lisa Kudrow.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Margot Robbie.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Marmik.jpg
ADDED
![]() |
data/reference_data/images/Natalie Portman.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Priyanka Chopra.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Robert Downey Jr.jpg
ADDED
![]() |
data/reference_data/images/Roger Federer.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Tom Cruise.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Vijay Deverakonda.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Virat Kohli.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/images/Zac Efron.jpg
ADDED
![]() |
Git LFS Details
|
data/reference_data/reference_image_data.json
ADDED
File without changes
|