File size: 5,878 Bytes
5e8395e
c3143ea
0347340
988dde3
4eddd18
 
 
4ff0905
988dde3
 
1ed5ce5
4915b46
988dde3
 
 
 
 
 
 
 
 
0347340
 
988dde3
0347340
988dde3
0347340
988dde3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0347340
 
988dde3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4915b46
c3143ea
5e8395e
59ece0e
988dde3
0347340
988dde3
 
 
4915b46
988dde3
0347340
c3143ea
 
0347340
988dde3
54e742e
c3143ea
988dde3
0347340
 
 
 
 
 
 
 
 
 
 
 
 
988dde3
0347340
988dde3
 
 
 
0347340
 
988dde3
0347340
 
988dde3
0347340
988dde3
 
0347340
 
 
c3143ea
 
59ece0e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
import gradio as gr
import os
import yaml
import pandas as pd
from src.face_texture import GetFaceTexture
from src.face_symmetry import GetFaceSymmetry
from src.face_demographics import GetFaceDemographics
from src.face_proportions import GetFaceProportions
from PIL import Image as PILImage
from typing import List, Any


def get_results(image_input: PILImage.Image) -> List[Any]:
    demographics_dict = GetFaceDemographics().main(image_input)
    (
        ratios_dict,
        face_landmarks_image,
    ) = GetFaceProportions().main(image_input)
    face_symmetry_image, symmetry_dict = GetFaceSymmetry().main(image_input)
    face_image, face_texture_image, texture_dict = GetFaceTexture().main(image_input)

    results = {
        "Demographic predictions": demographics_dict,
        "Face proportions": ratios_dict,
        "Face symmetry metrics": symmetry_dict,
        "Face texture metrics": texture_dict,
    }

    return (
        results,
        face_image,
        face_landmarks_image,
        face_symmetry_image,
        face_texture_image,
    )


def concatenate_image(
    image_1: PILImage.Image, image_2: PILImage.Image
) -> PILImage.Image:
    image = PILImage.new("RGB", (image_1.width + image_2.width, image_1.height))
    image.paste(image_1, (0, 0))
    image.paste(image_2, (image_1.width, 0))
    return image


def get_dict_child_data(results_image: dict, image_number: int) -> dict:
    flattened_data = {"image": f"Face {image_number}"}
    for key, sub_dict in results_image.items():
        for sub_key, value in sub_dict.items():
            flattened_data[sub_key] = value
    return flattened_data


def output_fn(
    image_input_1: PILImage.Image, image_input_2: PILImage.Image
) -> List[Any]:
    with open("parameters.yml", "r") as file:
        data = yaml.safe_load(file)
        results_interpretation = data["results_interpretation"]

    if image_input_1 is not None and image_input_2 is not None:
        (
            results_image_1,
            face_image_1,
            face_landmarks_image_1,
            face_symmetry_image_1,
            face_texture_image_1,
        ) = get_results(image_input_1)
        (
            results_image_2,
            face_image_2,
            face_landmarks_image_2,
            face_symmetry_image_2,
            face_texture_image_2,
        ) = get_results(image_input_2)
        results_image_1, results_image_2 = get_dict_child_data(
            results_image_1, 1
        ), get_dict_child_data(results_image_2, 2)
        results_df = pd.DataFrame([results_image_1, results_image_2])
        face_image = concatenate_image(face_image_1, face_image_2)
        face_landmarks_image = concatenate_image(
            face_landmarks_image_1, face_landmarks_image_2
        )
        face_symmetry_image = concatenate_image(
            face_symmetry_image_1, face_symmetry_image_2
        )
        face_texture_image = concatenate_image(
            face_texture_image_1, face_texture_image_2
        )

    if image_input_1 == None and image_input_2 is not None:
        (
            results,
            face_image,
            face_landmarks_image,
            face_symmetry_image,
            face_texture_image,
        ) = get_results(image_input_2)
        results_df = pd.DataFrame([get_dict_child_data(results, 2)])

    if image_input_2 == None and image_input_1 is not None:
        (
            results,
            face_image,
            face_landmarks_image,
            face_symmetry_image,
            face_texture_image,
        ) = get_results(image_input_1)
        results_df = pd.DataFrame([get_dict_child_data(results, 1)])

    return (
        results_df,
        results_interpretation,
        face_image,
        face_landmarks_image,
        face_symmetry_image,
        face_texture_image,
    )


gigi_hadid = os.path.join(os.path.dirname(__file__), "data/gigi_hadid.webp")

iface = gr.Interface(
    fn=output_fn,
    inputs=[
        gr.Image(type="pil", label="Upload Face 1", value=gigi_hadid),
        gr.Image(type="pil", label="Upload Face 2"),
    ],
    outputs=[
        gr.DataFrame(label="Results"),
        gr.JSON(label="Results explainer"),
        gr.Image(type="pil", label="Extracted face"),
        gr.Image(type="pil", label="Face landmarks"),
        gr.Image(type="pil", label="Face symmetry"),
        gr.Image(type="pil", label="Extracted face texture"),
    ],
    title="Advanced Facial Feature Detector",
    description="""
    <!DOCTYPE html>
    <html lang="en">
    <head>
        <meta charset="UTF-8">
        <meta name="viewport" content="width=device-width, initial-scale=1.0">
        <title>JSON Output in HTML</title>
        <style>
            .section {
                margin-bottom: 20px;
            }
        </style>
    </head>
    <body>

    <div class="section">
        <font size="3">
        <h3><center>Turn your selfie into insights! Discover age and gender predictions, symmetry evaluations, and detailed proportions and texture analyses with our app.</center></h3>
        <hr style="margin-top: 20px; margin-bottom: 20px;">
        <p><strong>Instructions:</strong> Upload up to 2 photos. For optimal results, upload a clear front-facing image (see example). To do so, either drag and drop your photo or click <i>Upload Face</i>, then press <i>Submit</i>.</p>
        <p><strong>Other information:</strong></p>
        <ul>
            <li>The output computation requires approximately 5 to 30 seconds.</li>
            <li>No uploaded photo is stored.</li>
            <li>If an error occurs try again or try a different photo or angle.</li>
            <li>Once submitted, a section detailing the results and associated images will be displayed.</li>
        </ul>
        </font>  
    </div>
    </body>
    </html>
    """,
    theme=gr.themes.Soft(),
    live=False,
)

iface.launch()