Text Generation
Transformers
PyTorch
code
gpt2
custom_code
Eval Results
text-generation-inference
Asankhaya Sharma commited on
Commit
7b84712
·
1 Parent(s): 4d62099

update finetuned model

Browse files
Files changed (3) hide show
  1. config.json +1 -1
  2. pytorch_model.bin +1 -1
  3. tokenizer_config.json +2 -1
config.json CHANGED
@@ -31,7 +31,7 @@
31
  "summary_type": "cls_index",
32
  "summary_use_proj": true,
33
  "torch_dtype": "float32",
34
- "transformers_version": "4.27.0.dev0",
35
  "use_cache": true,
36
  "vocab_size": 49280
37
  }
 
31
  "summary_type": "cls_index",
32
  "summary_use_proj": true,
33
  "torch_dtype": "float32",
34
+ "transformers_version": "4.28.0.dev0",
35
  "use_cache": true,
36
  "vocab_size": 49280
37
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70a48e9b09c58a359c810c89dfed3e670d0d75ac1733ff6abd2cd58108f0cb77
3
  size 4600333857
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:788efc2aff3a2581cb6899374206b59337d6607d47ec363018267b9928649e16
3
  size 4600333857
tokenizer_config.json CHANGED
@@ -1,10 +1,11 @@
1
  {
2
  "add_prefix_space": false,
3
  "bos_token": "<|endoftext|>",
 
4
  "eos_token": "<|endoftext|>",
5
  "errors": "replace",
6
  "model_max_length": 2048,
7
- "special_tokens_map_file": "/root/.cache/huggingface/hub/models--bigcode--santacoder/snapshots/e4640726d600d55c806648f73e455be105d50af8/special_tokens_map.json",
8
  "tokenizer_class": "GPT2Tokenizer",
9
  "unk_token": "<|endoftext|>"
10
  }
 
1
  {
2
  "add_prefix_space": false,
3
  "bos_token": "<|endoftext|>",
4
+ "clean_up_tokenization_spaces": true,
5
  "eos_token": "<|endoftext|>",
6
  "errors": "replace",
7
  "model_max_length": 2048,
8
+ "special_tokens_map_file": "/root/.cache/huggingface/hub/models--bigcode--santacoder/snapshots/bb63c0e145ad465df0a97dec285a949c9042523c/special_tokens_map.json",
9
  "tokenizer_class": "GPT2Tokenizer",
10
  "unk_token": "<|endoftext|>"
11
  }