jagilley commited on
Commit
d65283f
1 Parent(s): e8b02b5

add app py

Browse files
Files changed (1) hide show
  1. app.py +55 -0
app.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import clip
3
+ from PIL import Image
4
+ import gradio as gr
5
+
6
+ device = "cuda" if torch.cuda.is_available() else "cpu"
7
+ model, preprocess = clip.load("ViT-B/32", device=device)
8
+
9
+ def hotornot(image, gender):
10
+ image = Image.fromarray(image.astype("uint8"), "RGB")
11
+ image = preprocess(image).unsqueeze(0).to(device)
12
+ positive_terms = [f'a hot {gender}', f'a beautiful {gender}', f'an attractive {gender}']
13
+ negative_terms = [f'a gross {gender}', f'an ugly {gender}', f'a hideous {gender}']
14
+
15
+ pairs = list(zip(positive_terms, negative_terms))
16
+
17
+ def evaluate(terms):
18
+ text = clip.tokenize(terms).to(device)
19
+
20
+ with torch.no_grad():
21
+ logits_per_image, logits_per_text = model(image, text)
22
+ probs = logits_per_image.softmax(dim=-1).cpu().numpy()
23
+ return probs[0]
24
+
25
+ probs = [evaluate(pair) for pair in pairs]
26
+
27
+ positive_probs = [prob[0] for prob in probs]
28
+ negative_probs = [prob[1] for prob in probs]
29
+
30
+ print("+:", positive_probs)
31
+ print("-:", negative_probs)
32
+
33
+ hot_score = sum(positive_probs)/len(positive_probs)
34
+ ugly_score = sum(negative_probs)/len(negative_probs)
35
+ print(hot_score, ugly_score)
36
+ composite = ((hot_score - ugly_score)+1) * 50
37
+ composite = round(composite, 2)
38
+ return composite
39
+
40
+ iface = gr.Interface(
41
+ fn=hotornot,
42
+ inputs=[
43
+ gr.inputs.Image(label="Image"),
44
+ gr.inputs.Dropdown(
45
+ [
46
+ 'person', 'man', 'woman'
47
+ ],
48
+ default='person',
49
+ )
50
+ ],
51
+ outputs="number",
52
+ title="Hot or Not",
53
+ description="A simple hot or not app using OpenAI's CLIP model.",
54
+ )
55
+ iface.launch()