emanuelaboros commited on
Commit
bcb9d94
1 Parent(s): 8e6390c

Upload model

Browse files
config.json CHANGED
@@ -16,7 +16,7 @@
16
  "intermediate_size": 2048,
17
  "layer_norm_eps": 1e-12,
18
  "max_position_embeddings": 512,
19
- "model_type": "extended_multitask",
20
  "num_attention_heads": 8,
21
  "num_hidden_layers": 8,
22
  "pad_token_id": 0,
 
16
  "intermediate_size": 2048,
17
  "layer_norm_eps": 1e-12,
18
  "max_position_embeddings": 512,
19
+ "model_type": "stacked_bert",
20
  "num_attention_heads": 8,
21
  "num_hidden_layers": 8,
22
  "pad_token_id": 0,
configuration_extended_multitask.py CHANGED
@@ -2,7 +2,7 @@ from transformers import PretrainedConfig
2
 
3
 
4
  class ImpressoConfig(PretrainedConfig):
5
- model_type = "extended_multitask"
6
 
7
  def __init__(
8
  self,
 
2
 
3
 
4
  class ImpressoConfig(PretrainedConfig):
5
+ model_type = "stacked_bert"
6
 
7
  def __init__(
8
  self,