mrrahul011 commited on
Commit
fcf06b7
·
verified ·
1 Parent(s): fc37121

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -0
app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import assignment23
2
+ from assignment23 import make_train_valid_dfs
3
+ from assignment23 import get_image_embeddings
4
+ from assignment23 import inference_CLIP
5
+
6
+ import gradio as gr
7
+ import zipfile
8
+ import os
9
+ import pandas as pd
10
+ import subprocess
11
+
12
+
13
+ image_path = "./Images"
14
+ captions_path = "."
15
+ data_source = 'flickr8k.zip'
16
+
17
+ with zipfile.ZipFile(data_source, 'r') as zip_ref:
18
+ zip_ref.extractall('.')
19
+
20
+
21
+ cmd = "pwd"
22
+ output1 = subprocess.check_output(cmd, shell=True).decode("utf-8")
23
+
24
+
25
+
26
+ cmd = "ls -l"
27
+ output1 = subprocess.check_output(cmd, shell=True).decode("utf-8")
28
+
29
+
30
+
31
+
32
+ df = pd.read_csv("captions.txt")
33
+ df['id'] = [id_ for id_ in range(df.shape[0] // 5) for _ in range(5)]
34
+ df.to_csv("captions.csv", index=False)
35
+ df = pd.read_csv("captions.csv")
36
+
37
+
38
+ _, valid_df = make_train_valid_dfs()
39
+
40
+ model, image_embeddings = get_image_embeddings(valid_df, "best.pt")
41
+
42
+
43
+ examples = ["man and women on road"]
44
+
45
+ def greet(query_text):
46
+ print("Going to invoke inference_CLIP")
47
+ return inference_CLIP(query_text)
48
+
49
+ gallery = gr.Gallery(
50
+ label="CLIP result images", show_label=True, elem_id="gallery",
51
+ columns=[3], rows=[3], object_fit="contain", height="auto")
52
+
53
+ demo = gr.Interface(fn=greet,
54
+ inputs=gr.Dropdown(choices=examples, label="Search Image by text prompt"),
55
+ outputs=gallery,
56
+ title="Open AI CLIP")
57
+
58
+
59
+ demo.launch()