taka-yamakoshi commited on
Commit
e919fae
1 Parent(s): 2641f9d
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -49,9 +49,9 @@ def load_css(file_name):
49
 
50
  @st.cache(show_spinner=True,allow_output_mutation=True)
51
  def load_model():
52
- tokenizer = AlbertTokenizer.from_pretrained('albert-xxlarge-v2')
53
  #model = CustomFlaxAlbertForMaskedLM.from_pretrained('albert-xxlarge-v2',from_pt=True)
54
- model = AlbertForMaskedLM.from_pretrained('albert-xxlarge-v2')
55
  return tokenizer,model
56
 
57
  def clear_data():
@@ -163,7 +163,8 @@ if __name__=='__main__':
163
  wide_setup()
164
  #load_css('style.css')
165
  tokenizer,model = load_model()
166
- num_layers, num_heads = 12, 64
 
167
  mask_id = tokenizer('[MASK]').input_ids[1:-1][0]
168
 
169
  main_area = st.empty()
 
49
 
50
  @st.cache(show_spinner=True,allow_output_mutation=True)
51
  def load_model():
52
+ tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
53
  #model = CustomFlaxAlbertForMaskedLM.from_pretrained('albert-xxlarge-v2',from_pt=True)
54
+ model = AlbertForMaskedLM.from_pretrained('albert-base-v2')
55
  return tokenizer,model
56
 
57
  def clear_data():
 
163
  wide_setup()
164
  #load_css('style.css')
165
  tokenizer,model = load_model()
166
+ num_layers, num_heads = model.config.num_hidden_layers, model.config.num_attention_heads
167
+ st.write(num_layers,num_heads)
168
  mask_id = tokenizer('[MASK]').input_ids[1:-1][0]
169
 
170
  main_area = st.empty()