Soumen commited on
Commit
1730199
1 Parent(s): 4b17ef5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -8,6 +8,7 @@ def load_models():
8
  feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
9
  tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
10
  return model, feature_extractor, tokenizer
 
11
  #pickle.load(open('energy_model.pkl', 'rb'))
12
  #vocab = np.load('w2i.p', allow_pickle=True)
13
  st.title("Image_Captioning_App")
@@ -24,9 +25,9 @@ def load_image(img):
24
  return im
25
  uploaded_photo = c2.file_uploader("Upload Image",type=['jpg','png','jpeg'], on_change=change_photo_state)
26
  camera_photo = c2.camera_input("Take a photo", on_change=change_photo_state)
 
27
  #st.subheader("Detection")
28
  if st.checkbox("Generate_Caption"):
29
- model, feature_extractor, tokenizer = load_models()
30
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
31
  model.to(device)
32
  max_length = 16
 
8
  feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
9
  tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
10
  return model, feature_extractor, tokenizer
11
+ model, feature_extractor, tokenizer = load_models()
12
  #pickle.load(open('energy_model.pkl', 'rb'))
13
  #vocab = np.load('w2i.p', allow_pickle=True)
14
  st.title("Image_Captioning_App")
 
25
  return im
26
  uploaded_photo = c2.file_uploader("Upload Image",type=['jpg','png','jpeg'], on_change=change_photo_state)
27
  camera_photo = c2.camera_input("Take a photo", on_change=change_photo_state)
28
+
29
  #st.subheader("Detection")
30
  if st.checkbox("Generate_Caption"):
 
31
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
32
  model.to(device)
33
  max_length = 16