amosyou commited on
Commit
d4f762a
β€’
1 Parent(s): 145deb3

fix: more imports + loading files

Browse files
app.py CHANGED
@@ -22,10 +22,10 @@ map_loc = None if torch.cuda.is_available() and use_gpu else 'cpu'
22
 
23
  # Inverse Cooking
24
  ingrs_vocab = pickle.load(
25
- hf_hub_download(REPO_ID, 'data/ingr_vocab.pkl', HF_TOKEN), 'rb'
26
  )
27
  vocab = pickle.load(
28
- hf_hub_download(REPO_ID, 'data/instr_vocab.pkl', HF_TOKEN), 'rb'
29
  )
30
 
31
  ingr_vocab_size = len(ingrs_vocab)
@@ -56,7 +56,7 @@ args.ingrs_only = False
56
  # Load the trained model parameters
57
  model = get_model(args, ingr_vocab_size, instrs_vocab_size)
58
  model.load_state_dict(torch.load(
59
- hf_hub_download(REPO_ID, 'data/modelbest.ckpt', HF_TOKEN), map_location=map_loc)
60
  )
61
  model = model.to(device)
62
  model.eval()
 
22
 
23
  # Inverse Cooking
24
  ingrs_vocab = pickle.load(
25
+ open(hf_hub_download(REPO_ID, 'data/ingr_vocab.pkl', token=HF_TOKEN), 'rb')
26
  )
27
  vocab = pickle.load(
28
+ open(hf_hub_download(REPO_ID, 'data/instr_vocab.pkl', token=HF_TOKEN), 'rb')
29
  )
30
 
31
  ingr_vocab_size = len(ingrs_vocab)
 
56
  # Load the trained model parameters
57
  model = get_model(args, ingr_vocab_size, instrs_vocab_size)
58
  model.load_state_dict(torch.load(
59
+ hf_hub_download(REPO_ID, 'data/modelbest.ckpt', token=HF_TOKEN), map_location=map_loc)
60
  )
61
  model = model.to(device)
62
  model.eval()
src/model.py CHANGED
@@ -4,10 +4,10 @@ import torch
4
  import torch.nn as nn
5
  import random
6
  import numpy as np
7
- from modules.encoder import EncoderCNN, EncoderLabels
8
- from modules.transformer_decoder import DecoderTransformer
9
- from modules.multihead_attention import MultiheadAttention
10
- from utils.metrics import softIoU, MaskedCrossEntropyCriterion
11
  import pickle
12
  import os
13
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 
4
  import torch.nn as nn
5
  import random
6
  import numpy as np
7
+ from src.modules.encoder import EncoderCNN, EncoderLabels
8
+ from src.modules.transformer_decoder import DecoderTransformer
9
+ from src.modules.multihead_attention import MultiheadAttention
10
+ from src.utils.metrics import softIoU, MaskedCrossEntropyCriterion
11
  import pickle
12
  import os
13
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
src/modules/multihead_attention.py CHANGED
@@ -11,7 +11,7 @@ from torch import nn
11
  from torch.nn import Parameter
12
  import torch.nn.functional as F
13
 
14
- from modules.utils import fill_with_neg_inf, get_incremental_state, set_incremental_state
15
 
16
 
17
  class MultiheadAttention(nn.Module):
 
11
  from torch.nn import Parameter
12
  import torch.nn.functional as F
13
 
14
+ from src.modules.utils import fill_with_neg_inf, get_incremental_state, set_incremental_state
15
 
16
 
17
  class MultiheadAttention(nn.Module):
src/modules/transformer_decoder.py CHANGED
@@ -13,8 +13,8 @@ import torch
13
  import torch.nn as nn
14
  import torch.nn.functional as F
15
  from torch.nn.modules.utils import _single
16
- import modules.utils as utils
17
- from modules.multihead_attention import MultiheadAttention
18
  import numpy as np
19
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
20
  import copy
 
13
  import torch.nn as nn
14
  import torch.nn.functional as F
15
  from torch.nn.modules.utils import _single
16
+ import src.modules.utils as utils
17
+ from src.modules.multihead_attention import MultiheadAttention
18
  import numpy as np
19
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
20
  import copy