Anas Awadalla commited on
Commit
f070e65
1 Parent(s): f8da1f9
all_results.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"F1": 78.42817588033974, "EM": 68.59035004730369}
args.txt ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_name_or_path: facebook/opt-125m
2
+ config_name: None
3
+ tokenizer_name: None
4
+ use_slow_tokenizer: False
5
+ per_device_train_batch_size: 32
6
+ per_device_eval_batch_size: 32
7
+ learning_rate: 5e-05
8
+ weight_decay: 0.1
9
+ num_train_epochs: 5
10
+ patience: 100
11
+ max_train_steps: None
12
+ gradient_accumulation_steps: 1
13
+ lr_scheduler_type: SchedulerType.LINEAR
14
+ num_warmup_steps: 0
15
+ output_dir: opt_125m_squad_decay_0.1
16
+ seed: None
17
+ finetune_type: vanilla
18
+ beta: 0.1
19
+ model_type: None
20
+ max_seq_length: 384
21
+ max_context_length: 600
22
+ num_beams: 1
23
+ preprocessing_num_workers: 16
24
+ overwrite_cache: False
25
+ no_keep_linebreaks: False
26
+ push_to_hub: False
27
+ hub_model_id: None
28
+ hub_token: None
29
+ checkpointing_steps: epoch
30
+ resume_from_checkpoint: None
31
+ with_tracking: False
32
+ local_rank: -1
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/opt-125m",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "relu",
5
+ "architectures": [
6
+ "OPTForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 0,
10
+ "do_layer_norm_before": true,
11
+ "dropout": 0.1,
12
+ "eos_token_id": 2,
13
+ "ffn_dim": 3072,
14
+ "hidden_size": 768,
15
+ "init_std": 0.02,
16
+ "layerdrop": 0.0,
17
+ "max_position_embeddings": 2048,
18
+ "model_type": "opt",
19
+ "num_attention_heads": 12,
20
+ "num_hidden_layers": 12,
21
+ "pad_token_id": 1,
22
+ "prefix": "</s>",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.19.2",
25
+ "use_cache": true,
26
+ "vocab_size": 50265,
27
+ "word_embed_proj_dim": 768
28
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41befe28059dea6329557ab6340ec2e5153de5b221685084da500216b4686015
3
+ size 500993341
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "errors": "replace", "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_bos_token": true, "special_tokens_map_file": null, "name_or_path": "facebook/opt-125m", "tokenizer_class": "GPT2Tokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff