Upyaya commited on
Commit
587ee18
1 Parent(s): 874ef64

Change to include to test with provided sample images

Browse files
Files changed (1) hide show
  1. app.py +28 -14
app.py CHANGED
@@ -9,8 +9,20 @@ import os
9
  preprocess_ckp = "Salesforce/blip2-opt-2.7b" #Checkpoint path used for perprocess image
10
  base_model_ckp = "./model/blip2-opt-2.7b-fp16-sharded" #Base model checkpoint path
11
  peft_model_ckp = "./model/blip2_peft" #PEFT model checkpoint path
12
- sample_img_path = "./sample_images/"
13
-
 
 
 
 
 
 
 
 
 
 
 
 
14
  #init_model_required = True
15
 
16
  def init_model():
@@ -32,31 +44,33 @@ def init_model():
32
  #init_model_required = False
33
 
34
  return processor, model
35
-
36
 
37
  def main():
38
 
39
  st.title("Fashion Image Caption using BLIP2")
40
 
41
- #processor, model = init_model()
42
 
43
  #Select few sample images for the catagory of cloths
44
- option = st.selectbox('Select from sample an images', ('None','cap', 'tee', 'dress'), index = 0)
 
45
  st.text("OR")
46
  file_name = st.file_uploader("Upload an image")
47
- st.text(option)
48
-
49
- """
50
- if file_name is None and option is not None:
51
 
52
- file_name = os.path.join(sample_img_path, option)
53
 
54
- if file_name is not None:
55
 
56
- image_col, caption_text = st.columns(2)
 
 
 
57
 
 
58
  image_col.header("Image")
59
- image = Image.open(file_name)
60
  image_col.image(image, use_column_width = True)
61
 
62
  #Preprocess the image
@@ -75,7 +89,7 @@ def main():
75
  #Output the predict text
76
  caption_text.header("Generated Caption")
77
  caption_text.text(generated_caption)
78
- """
79
 
80
  if __name__ == "__main__":
81
  main()
 
9
  preprocess_ckp = "Salesforce/blip2-opt-2.7b" #Checkpoint path used for perprocess image
10
  base_model_ckp = "./model/blip2-opt-2.7b-fp16-sharded" #Base model checkpoint path
11
  peft_model_ckp = "./model/blip2_peft" #PEFT model checkpoint path
12
+ sample_img_path = "./sample_images"
13
+
14
+ map_sampleid_name = {
15
+ 'dress' : '00fe223d-9d1f-4bd3-a556-7ece9d28e6fb.jpeg',
16
+ 'earrings': '0b3862ae-f89e-419c-bc1e-57418abd4180.jpeg',
17
+ 'sweater': '0c21ba7b-ceb6-4136-94a4-1d4394499986.jpeg',
18
+ 'sunglasses': '0e44ec10-e53b-473a-a77f-ac8828bb5e01.jpeg',
19
+ 'shoe': '4cd37d6d-e7ea-4c6e-aab2-af700e480bc1.jpeg',
20
+ 'hat': '69aeb517-c66c-47b8-af7d-bdf1fde57ed0.jpeg',
21
+ 'heels':'447abc42-6ac7-4458-a514-bdcd570b1cd1.jpeg',
22
+ 'socks': 'd188836c-b734-4031-98e5-423d5ff1239d.jpeg',
23
+ 'tee': 'e2d8637a-5478-429d-a2a8-3d5859dbc64d.jpeg',
24
+ 'bracelet': 'e78518ac-0f54-4483-a233-fad6511f0b86.jpeg'
25
+ }
26
  #init_model_required = True
27
 
28
  def init_model():
 
44
  #init_model_required = False
45
 
46
  return processor, model
 
47
 
48
  def main():
49
 
50
  st.title("Fashion Image Caption using BLIP2")
51
 
52
+ processor, model = init_model()
53
 
54
  #Select few sample images for the catagory of cloths
55
+ st.text("Select image:")
56
+ option = st.selectbox('From sample', ('None', 'dress', 'earrings', 'sweater', 'sunglasses', 'shoe', 'hat', 'heels', 'socks', 'tee', 'bracelet'), index = 0)
57
  st.text("OR")
58
  file_name = st.file_uploader("Upload an image")
59
+
60
+ image = None
61
+ if file_name is not None:
 
62
 
63
+ image = Image.open(file_name)
64
 
65
+ elif option is not 'None':
66
 
67
+ file_name = os.path.join(sample_img_path, map_sampleid_name[option])
68
+ image = Image.open(file_name)
69
+
70
+ if image is not None:
71
 
72
+ image_col, caption_text = st.columns(2)
73
  image_col.header("Image")
 
74
  image_col.image(image, use_column_width = True)
75
 
76
  #Preprocess the image
 
89
  #Output the predict text
90
  caption_text.header("Generated Caption")
91
  caption_text.text(generated_caption)
92
+
93
 
94
  if __name__ == "__main__":
95
  main()