Galuh
commited on
Commit
•
b0a8a80
1
Parent(s):
95cfb7a
Add pytorch and tokenizer
Browse files- added_tokens.json +1 -0
- jax2torch.py +15 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
- vocab.json +0 -0
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<|endoftext|>": 50257}
|
jax2torch.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, GPT2LMHeadModel
|
2 |
+
|
3 |
+
'''
|
4 |
+
|
5 |
+
This is a script to convert the Jax model and the tokenizer to Pytorch model
|
6 |
+
|
7 |
+
'''
|
8 |
+
|
9 |
+
model = GPT2LMHeadModel.from_pretrained(".", from_flax=True)
|
10 |
+
|
11 |
+
model.save_pretrained(".")
|
12 |
+
|
13 |
+
tokenizer = AutoTokenizer.from_pretrained(".")
|
14 |
+
|
15 |
+
tokenizer.save_pretrained(".")
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8aa8f92e4afdbbb4c26a49803b7db96d7705c9b24d3406ab49fdc3ccfc8b69c7
|
3 |
+
size 510401385
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>"}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "special_tokens_map_file": null, "name_or_path": ".", "errors": "replace", "tokenizer_class": "GPT2Tokenizer"}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|