PereLluis13 commited on
Commit
f6a1983
1 Parent(s): 38dbc80

add tokenizer

Browse files
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<triplet>": 128112, "<relation>": 128113, "el_EL": 128114, "ca_XX": 128115, "tp_XX": 128116, "<loc>": 128117, "<misc>": 128118, "<per>": 128119, "<num>": 128120, "<time>": 128121, "<org>": 128122, "<date>": 128123, "<eve>": 128124, "<cel>": 128125, "<media>": 128126, "<dis>": 128127, "<concept>": 128128}
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8f7c76ed2a5e0822be39f0a4f95a55eb19c78f4593ce609e2edbc2aea4d380a
3
+ size 2423393
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "additional_special_tokens": ["<triplet>", "<relation>", "el_EL", "ca_XX", "tp_XX", "<loc>", "<misc>", "<per>", "<num>", "<time>", "<org>", "<date>", "<eve>", "<cel>", "<media>", "<dis>", "<unk>", "<concept>", "<loc>", "<misc>", "<per>", "<num>", "<time>", "<org>", "<date>", "<eve>", "<cel>", "<media>", "<dis>", "<unk>", "<concept>", "__af__", "__am__", "__ar__", "__ast__", "__az__", "__ba__", "__be__", "__bg__", "__bn__", "__br__", "__bs__", "__ca__", "__ceb__", "__cs__", "__cy__", "__da__", "__de__", "__el__", "__en__", "__es__", "__et__", "__fa__", "__ff__", "__fi__", "__fr__", "__fy__", "__ga__", "__gd__", "__gl__", "__gu__", "__ha__", "__he__", "__hi__", "__hr__", "__ht__", "__hu__", "__hy__", "__id__", "__ig__", "__ilo__", "__is__", "__it__", "__ja__", "__jv__", "__ka__", "__kk__", "__km__", "__kn__", "__ko__", "__lb__", "__lg__", "__ln__", "__lo__", "__lt__", "__lv__", "__mg__", "__mk__", "__ml__", "__mn__", "__mr__", "__ms__", "__my__", "__ne__", "__nl__", "__no__", "__ns__", "__oc__", "__or__", "__pa__", "__pl__", "__ps__", "__pt__", "__ro__", "__ru__", "__sd__", "__si__", "__sk__", "__sl__", "__so__", "__sq__", "__sr__", "__ss__", "__su__", "__sv__", "__sw__", "__ta__", "__th__", "__tl__", "__tn__", "__tr__", "__uk__", "__ur__", "__uz__", "__vi__", "__wo__", "__xh__", "__yi__", "__yo__", "__zh__", "__zu__"]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"src_lang": null, "tgt_lang": "en", "bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "language_codes": "m2m100", "sp_model_kwargs": {}, "num_madeup_words": 8, "special_tokens_map_file": "m2m_100_1.2B_v2/special_tokens_map.json", "tokenizer_file": null, "name_or_path": "/home/huguetcabot/mrebel/rebel/model/mrebel-base", "additional_special_tokens": ["<triplet>", "<relation>", "el_EL", "ca_XX", "tp_XX", "<loc>", "<misc>", "<per>", "<num>", "<time>", "<org>", "<date>", "<eve>", "<cel>", "<media>", "<dis>", "<unk>", "<concept>", "<loc>", "<misc>", "<per>", "<num>", "<time>", "<org>", "<date>", "<eve>", "<cel>", "<media>", "<dis>", "<unk>", "<concept>", "__af__", "__am__", "__ar__", "__ast__", "__az__", "__ba__", "__be__", "__bg__", "__bn__", "__br__", "__bs__", "__ca__", "__ceb__", "__cs__", "__cy__", "__da__", "__de__", "__el__", "__en__", "__es__", "__et__", "__fa__", "__ff__", "__fi__", "__fr__", "__fy__", "__ga__", "__gd__", "__gl__", "__gu__", "__ha__", "__he__", "__hi__", "__hr__", "__ht__", "__hu__", "__hy__", "__id__", "__ig__", "__ilo__", "__is__", "__it__", "__ja__", "__jv__", "__ka__", "__kk__", "__km__", "__kn__", "__ko__", "__lb__", "__lg__", "__ln__", "__lo__", "__lt__", "__lv__", "__mg__", "__mk__", "__ml__", "__mn__", "__mr__", "__ms__", "__my__", "__ne__", "__nl__", "__no__", "__ns__", "__oc__", "__or__", "__pa__", "__pl__", "__ps__", "__pt__", "__ro__", "__ru__", "__sd__", "__si__", "__sk__", "__sl__", "__so__", "__sq__", "__sr__", "__ss__", "__su__", "__sv__", "__sw__", "__ta__", "__th__", "__tl__", "__tn__", "__tr__", "__uk__", "__ur__", "__uz__", "__vi__", "__wo__", "__xh__", "__yi__", "__yo__", "__zh__", "__zu__"], "model_max_length": 1024, "tokenizer_class": "M2M100Tokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff