Anas Awadalla commited on
Commit
689c3ce
1 Parent(s): 26c22e5

init commit

Browse files
all_results.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"F1": 88.34753277550725, "EM": 80.4635761589404}
args.txt ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_name_or_path: gpt2-xl
2
+ config_name: None
3
+ tokenizer_name: None
4
+ use_slow_tokenizer: False
5
+ per_device_train_batch_size: 1
6
+ per_device_eval_batch_size: 1
7
+ learning_rate: 5e-06
8
+ weight_decay: 0.1
9
+ num_train_epochs: 5
10
+ patience: 2
11
+ max_train_steps: None
12
+ gradient_accumulation_steps: 1
13
+ lr_scheduler_type: SchedulerType.LINEAR
14
+ num_warmup_steps: 0
15
+ output_dir: gpt_xl_squad_decay_0.1
16
+ seed: None
17
+ finetune_type: vanilla
18
+ beta: 0.1
19
+ model_type: None
20
+ max_seq_length: 600
21
+ max_context_length: 600
22
+ num_beams: 1
23
+ preprocessing_num_workers: 16
24
+ overwrite_cache: False
25
+ no_keep_linebreaks: False
26
+ push_to_hub: False
27
+ hub_model_id: None
28
+ hub_token: None
29
+ checkpointing_steps: epoch
30
+ resume_from_checkpoint: None
31
+ with_tracking: False
32
+ local_rank: -1
config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2-xl",
3
+ "activation_function": "gelu_new",
4
+ "adapters": {
5
+ "adapters": {},
6
+ "config_map": {},
7
+ "fusion_config_map": {},
8
+ "fusions": {}
9
+ },
10
+ "architectures": [
11
+ "GPT2LMHeadModel"
12
+ ],
13
+ "attn_pdrop": 0.1,
14
+ "bos_token_id": 50256,
15
+ "embd_pdrop": 0.1,
16
+ "eos_token_id": 50256,
17
+ "initializer_range": 0.02,
18
+ "layer_norm_epsilon": 1e-05,
19
+ "model_type": "gpt2",
20
+ "n_ctx": 1024,
21
+ "n_embd": 1600,
22
+ "n_head": 25,
23
+ "n_inner": null,
24
+ "n_layer": 48,
25
+ "n_positions": 1024,
26
+ "output_past": true,
27
+ "pad_token": "<|endoftext|>",
28
+ "reorder_and_upcast_attn": false,
29
+ "resid_pdrop": 0.1,
30
+ "scale_attn_by_inverse_layer_idx": false,
31
+ "scale_attn_weights": true,
32
+ "summary_activation": null,
33
+ "summary_first_dropout": 0.1,
34
+ "summary_proj_to_labels": true,
35
+ "summary_type": "cls_index",
36
+ "summary_use_proj": true,
37
+ "task_specific_params": {
38
+ "text-generation": {
39
+ "do_sample": true,
40
+ "max_length": 50
41
+ }
42
+ },
43
+ "torch_dtype": "float32",
44
+ "transformers_version": "4.17.0",
45
+ "use_cache": true,
46
+ "vocab_size": 50257
47
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b184e03b36ff0fff68cf6b7a280bbe02675a19f5ed1d890ecb684a3ffc079bde
3
+ size 6281056077
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>", "pad_token": "<|endoftext|>"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "gpt2-xl", "tokenizer_class": "GPT2Tokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff