Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -11,7 +11,7 @@ from engine import inference
11
 
12
 
13
  model_trained = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
14
- model_trained.load_state_dict(torch.load('model_trained.pth',map_location=torch.device('cpu')))
15
  image_processor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
16
  tokenizer = GPT2TokenizerFast.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
17
 
@@ -38,12 +38,12 @@ prefix_length = 10
38
 
39
  model = ClipCaptionModel(prefix_length)
40
 
41
- model.load_state_dict(torch.load('model.h5',map_location=torch.device('cpu')))
42
 
43
  model = model.eval()
44
 
45
  coco_model = ClipCaptionModel(prefix_length)
46
- coco_model.load_state_dict(torch.load('COCO_model.h5',map_location=torch.device('cpu')))
47
  # model = model.eval()
48
 
49
 
 
11
 
12
 
13
  model_trained = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
14
+ model_trained.load_state_dict(torch.load('model_trained.pth',map_location=torch.device('cpu')),strict=False)
15
  image_processor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
16
  tokenizer = GPT2TokenizerFast.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
17
 
 
38
 
39
  model = ClipCaptionModel(prefix_length)
40
 
41
+ model.load_state_dict(torch.load('model.h5',map_location=torch.device('cpu')),strict=False)
42
 
43
  model = model.eval()
44
 
45
  coco_model = ClipCaptionModel(prefix_length)
46
+ coco_model.load_state_dict(torch.load('COCO_model.h5',map_location=torch.device('cpu')),strict=False)
47
  # model = model.eval()
48
 
49