Text2Text Generation
Transformers
PyTorch
t5
codet5
text-generation-inference
nielsr HF staff commited on
Commit
4c2e66e
1 Parent(s): 825eae1

Add tokenizer files

Browse files
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<extra_id_99>": 32000, "<extra_id_98>": 32001, "<extra_id_97>": 32002, "<extra_id_96>": 32003, "<extra_id_95>": 32004, "<extra_id_94>": 32005, "<extra_id_93>": 32006, "<extra_id_92>": 32007, "<extra_id_91>": 32008, "<extra_id_90>": 32009, "<extra_id_89>": 32010, "<extra_id_88>": 32011, "<extra_id_87>": 32012, "<extra_id_86>": 32013, "<extra_id_85>": 32014, "<extra_id_84>": 32015, "<extra_id_83>": 32016, "<extra_id_82>": 32017, "<extra_id_81>": 32018, "<extra_id_80>": 32019, "<extra_id_79>": 32020, "<extra_id_78>": 32021, "<extra_id_77>": 32022, "<extra_id_76>": 32023, "<extra_id_75>": 32024, "<extra_id_74>": 32025, "<extra_id_73>": 32026, "<extra_id_72>": 32027, "<extra_id_71>": 32028, "<extra_id_70>": 32029, "<extra_id_69>": 32030, "<extra_id_68>": 32031, "<extra_id_67>": 32032, "<extra_id_66>": 32033, "<extra_id_65>": 32034, "<extra_id_64>": 32035, "<extra_id_63>": 32036, "<extra_id_62>": 32037, "<extra_id_61>": 32038, "<extra_id_60>": 32039, "<extra_id_59>": 32040, "<extra_id_58>": 32041, "<extra_id_57>": 32042, "<extra_id_56>": 32043, "<extra_id_55>": 32044, "<extra_id_54>": 32045, "<extra_id_53>": 32046, "<extra_id_52>": 32047, "<extra_id_51>": 32048, "<extra_id_50>": 32049, "<extra_id_49>": 32050, "<extra_id_48>": 32051, "<extra_id_47>": 32052, "<extra_id_46>": 32053, "<extra_id_45>": 32054, "<extra_id_44>": 32055, "<extra_id_43>": 32056, "<extra_id_42>": 32057, "<extra_id_41>": 32058, "<extra_id_40>": 32059, "<extra_id_39>": 32060, "<extra_id_38>": 32061, "<extra_id_37>": 32062, "<extra_id_36>": 32063, "<extra_id_35>": 32064, "<extra_id_34>": 32065, "<extra_id_33>": 32066, "<extra_id_32>": 32067, "<extra_id_31>": 32068, "<extra_id_30>": 32069, "<extra_id_29>": 32070, "<extra_id_28>": 32071, "<extra_id_27>": 32072, "<extra_id_26>": 32073, "<extra_id_25>": 32074, "<extra_id_24>": 32075, "<extra_id_23>": 32076, "<extra_id_22>": 32077, "<extra_id_21>": 32078, "<extra_id_20>": 32079, "<extra_id_19>": 32080, "<extra_id_18>": 32081, "<extra_id_17>": 32082, "<extra_id_16>": 32083, "<extra_id_15>": 32084, "<extra_id_14>": 32085, "<extra_id_13>": 32086, "<extra_id_12>": 32087, "<extra_id_11>": 32088, "<extra_id_10>": 32089, "<extra_id_9>": 32090, "<extra_id_8>": 32091, "<extra_id_7>": 32092, "<extra_id_6>": 32093, "<extra_id_5>": 32094, "<extra_id_4>": 32095, "<extra_id_3>": 32096, "<extra_id_2>": 32097, "<extra_id_1>": 32098, "<extra_id_0>": 32099}
config.json DELETED
@@ -1,65 +0,0 @@
1
- {
2
- "_name_or_path": "/content/drive/MyDrive/CodeT5/pretrained_models/codet5_base",
3
- "architectures": [
4
- "T5ForConditionalGeneration"
5
- ],
6
- "bos_token_id": 1,
7
- "d_ff": 3072,
8
- "d_kv": 64,
9
- "d_model": 768,
10
- "decoder_start_token_id": 0,
11
- "dropout_rate": 0.1,
12
- "eos_token_id": 2,
13
- "feed_forward_proj": "relu",
14
- "gradient_checkpointing": false,
15
- "id2label": {
16
- "0": "LABEL_0"
17
- },
18
- "initializer_factor": 1.0,
19
- "is_encoder_decoder": true,
20
- "label2id": {
21
- "LABEL_0": 0
22
- },
23
- "layer_norm_epsilon": 1e-06,
24
- "model_type": "t5",
25
- "n_positions": 512,
26
- "num_decoder_layers": 12,
27
- "num_heads": 12,
28
- "num_layers": 12,
29
- "output_past": true,
30
- "pad_token_id": 0,
31
- "relative_attention_num_buckets": 32,
32
- "task_specific_params": {
33
- "summarization": {
34
- "early_stopping": true,
35
- "length_penalty": 2.0,
36
- "max_length": 200,
37
- "min_length": 30,
38
- "no_repeat_ngram_size": 3,
39
- "num_beams": 4,
40
- "prefix": "summarize: "
41
- },
42
- "translation_en_to_de": {
43
- "early_stopping": true,
44
- "max_length": 300,
45
- "num_beams": 4,
46
- "prefix": "translate English to German: "
47
- },
48
- "translation_en_to_fr": {
49
- "early_stopping": true,
50
- "max_length": 300,
51
- "num_beams": 4,
52
- "prefix": "translate English to French: "
53
- },
54
- "translation_en_to_ro": {
55
- "early_stopping": true,
56
- "max_length": 300,
57
- "num_beams": 4,
58
- "prefix": "translate English to Romanian: "
59
- }
60
- },
61
- "torch_dtype": "float32",
62
- "transformers_version": "4.10.2",
63
- "use_cache": true,
64
- "vocab_size": 32100
65
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:053fbafd36f4011e13fe10f45d588102ffa7448f338443ff787e85360cd5e13c
3
- size 891641279
 
 
 
 
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}, "additional_special_tokens": ["<extra_id_99>", "<extra_id_98>", "<extra_id_97>", "<extra_id_96>", "<extra_id_95>", "<extra_id_94>", "<extra_id_93>", "<extra_id_92>", "<extra_id_91>", "<extra_id_90>", "<extra_id_89>", "<extra_id_88>", "<extra_id_87>", "<extra_id_86>", "<extra_id_85>", "<extra_id_84>", "<extra_id_83>", "<extra_id_82>", "<extra_id_81>", "<extra_id_80>", "<extra_id_79>", "<extra_id_78>", "<extra_id_77>", "<extra_id_76>", "<extra_id_75>", "<extra_id_74>", "<extra_id_73>", "<extra_id_72>", "<extra_id_71>", "<extra_id_70>", "<extra_id_69>", "<extra_id_68>", "<extra_id_67>", "<extra_id_66>", "<extra_id_65>", "<extra_id_64>", "<extra_id_63>", "<extra_id_62>", "<extra_id_61>", "<extra_id_60>", "<extra_id_59>", "<extra_id_58>", "<extra_id_57>", "<extra_id_56>", "<extra_id_55>", "<extra_id_54>", "<extra_id_53>", "<extra_id_52>", "<extra_id_51>", "<extra_id_50>", "<extra_id_49>", "<extra_id_48>", "<extra_id_47>", "<extra_id_46>", "<extra_id_45>", "<extra_id_44>", "<extra_id_43>", "<extra_id_42>", "<extra_id_41>", "<extra_id_40>", "<extra_id_39>", "<extra_id_38>", "<extra_id_37>", "<extra_id_36>", "<extra_id_35>", "<extra_id_34>", "<extra_id_33>", "<extra_id_32>", "<extra_id_31>", "<extra_id_30>", "<extra_id_29>", "<extra_id_28>", "<extra_id_27>", "<extra_id_26>", "<extra_id_25>", "<extra_id_24>", "<extra_id_23>", "<extra_id_22>", "<extra_id_21>", "<extra_id_20>", "<extra_id_19>", "<extra_id_18>", "<extra_id_17>", "<extra_id_16>", "<extra_id_15>", "<extra_id_14>", "<extra_id_13>", "<extra_id_12>", "<extra_id_11>", "<extra_id_10>", "<extra_id_9>", "<extra_id_8>", "<extra_id_7>", "<extra_id_6>", "<extra_id_5>", "<extra_id_4>", "<extra_id_3>", "<extra_id_2>", "<extra_id_1>", "<extra_id_0>"]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"errors": "replace", "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "tokenizer_class": "RobertaTokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff