Upyaya commited on
Commit
f29da3c
1 Parent(s): 1cf7cf7

Reset Variable

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -52,17 +52,18 @@ st.caption("So, when customer find the right product they are mostly going to ad
52
  st.caption("Accurate and enchanting descriptions of clothes on shopping websites can help customers without fashion knowledge to better understand the features (attributes, style, functionality, etc.) of the items and increase online sales by enticing more customers.")
53
  st.caption("Also, most of the time when any customer visits shopping websites, they are looking for a certain style or type of clothes that wish to purchase, they search for the item by providing a description of the item and the system finds the relevant items that match the search query by computing the similarity score between the query and the item caption.")
54
  st.caption("Given the clothes image provide a short caption that describes the item. In general, in image captioning datasets (e.g., COCO, Fliker), the descriptions of fashion items have three unique features, which makes the automatic generation of captions a challenging task. First, fashion captioning needs to describe the attributes of an item, while image captioning generally narrates the objects and their relations in the image.")
55
- st.caption("Solution: Used Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models [(BLIP-2)](https://huggingface.co/Salesforce/blip2-opt-2.7b) by Salesforce")
 
 
56
  st.caption("For more detail: [Github link](https://github.com/SmithaUpadhyaya/fashion_image_caption)") #write
57
 
58
  #Select few sample images for the catagory of cloths
59
  st.caption("Select image:")
60
- option = 'None'
61
  option = st.selectbox('From sample', ('None', 'dress', 'earrings', 'sweater', 'sunglasses', 'shoe', 'hat', 'heels', 'socks', 'tee', 'bracelet'), index = 0)
62
  st.text("Or")
63
  file_name = st.file_uploader(label = "Upload an image", accept_multiple_files = False)
64
 
65
- image = None
66
  btn_click = st.button('Generate')
67
  st.caption("Application deployed on CPU basic with 16GB RAM")
68
 
@@ -117,6 +118,10 @@ if btn_click:
117
  #Output the predict text
118
  caption_text.text(generated_caption)
119
 
 
 
 
 
120
 
121
  #if __name__ == "__main__":
122
  # main()
 
52
  st.caption("Accurate and enchanting descriptions of clothes on shopping websites can help customers without fashion knowledge to better understand the features (attributes, style, functionality, etc.) of the items and increase online sales by enticing more customers.")
53
  st.caption("Also, most of the time when any customer visits shopping websites, they are looking for a certain style or type of clothes that wish to purchase, they search for the item by providing a description of the item and the system finds the relevant items that match the search query by computing the similarity score between the query and the item caption.")
54
  st.caption("Given the clothes image provide a short caption that describes the item. In general, in image captioning datasets (e.g., COCO, Fliker), the descriptions of fashion items have three unique features, which makes the automatic generation of captions a challenging task. First, fashion captioning needs to describe the attributes of an item, while image captioning generally narrates the objects and their relations in the image.")
55
+ st.caption("Solution: Used Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models [(BLIP-2)](https://huggingface.co/Salesforce/blip2-opt-2.7b) by Salesforce. The original model size was too large. It was quite challenging to fit and fine-tune the model on the 16GB GPU.")
56
+ st.caption("So, for this project have downloaded the pre-trained model [ybelkada/blip2-opt-2.7b-fp16-sharded](https://huggingface.co/ybelkada/blip2-opt-2.7b-fp16-sharded). This model uses OPT-2.7b LLM model with reduced precision to float16.")
57
+
58
  st.caption("For more detail: [Github link](https://github.com/SmithaUpadhyaya/fashion_image_caption)") #write
59
 
60
  #Select few sample images for the catagory of cloths
61
  st.caption("Select image:")
 
62
  option = st.selectbox('From sample', ('None', 'dress', 'earrings', 'sweater', 'sunglasses', 'shoe', 'hat', 'heels', 'socks', 'tee', 'bracelet'), index = 0)
63
  st.text("Or")
64
  file_name = st.file_uploader(label = "Upload an image", accept_multiple_files = False)
65
 
66
+
67
  btn_click = st.button('Generate')
68
  st.caption("Application deployed on CPU basic with 16GB RAM")
69
 
 
118
  #Output the predict text
119
  caption_text.text(generated_caption)
120
 
121
+ #Reset the variable
122
+ option = 'None'
123
+ image = None
124
+ file_name = None
125
 
126
  #if __name__ == "__main__":
127
  # main()