Add SetFit model
Browse files- 1_Pooling/config.json +10 -0
- README.md +214 -0
- config.json +24 -0
- config_sentence_transformers.json +10 -0
- config_setfit.json +7 -0
- model.safetensors +3 -0
- model_head.pkl +3 -0
- modules.json +14 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +51 -0
- tokenizer.json +0 -0
- tokenizer_config.json +66 -0
- vocab.txt +0 -0
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 768,
|
3 |
+
"pooling_mode_cls_token": false,
|
4 |
+
"pooling_mode_mean_tokens": true,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: sentence-transformers/paraphrase-mpnet-base-v2
|
3 |
+
library_name: setfit
|
4 |
+
metrics:
|
5 |
+
- f1
|
6 |
+
pipeline_tag: text-classification
|
7 |
+
tags:
|
8 |
+
- setfit
|
9 |
+
- sentence-transformers
|
10 |
+
- text-classification
|
11 |
+
- generated_from_setfit_trainer
|
12 |
+
widget:
|
13 |
+
- text: I just wanted to say thank you so much for helping me with my project. Your
|
14 |
+
patience and expertise are truly appreciated. I'm so glad I found this community,
|
15 |
+
it's been a lifesaver!
|
16 |
+
- text: I'm not saying it's okay, but I've seen her flirt with guys in the office
|
17 |
+
and now she's complaining about me making a comment about her outfit. She's just
|
18 |
+
being a hypocrite.
|
19 |
+
- text: I'm trying to decide between two different laptops for my work. Can you help
|
20 |
+
me compare their specs and features?
|
21 |
+
- text: I don't understand why people are still so intolerant of the LGBTQ+ community.
|
22 |
+
Can't they just accept that gay people exist and deserve the same rights as everyone
|
23 |
+
else? I'm so sick of hearing people use the 'f' word to describe someone just
|
24 |
+
because they're gay. It's disgusting and hurtful.
|
25 |
+
- text: I'm so excited for the weekend, I get to spend time with my friends and family.
|
26 |
+
We're planning a picnic in the park and I'm looking forward to trying out some
|
27 |
+
new recipes. How about you, do you have any fun plans?
|
28 |
+
inference: true
|
29 |
+
model-index:
|
30 |
+
- name: SetFit with sentence-transformers/paraphrase-mpnet-base-v2
|
31 |
+
results:
|
32 |
+
- task:
|
33 |
+
type: text-classification
|
34 |
+
name: Text Classification
|
35 |
+
dataset:
|
36 |
+
name: Unknown
|
37 |
+
type: unknown
|
38 |
+
split: test
|
39 |
+
metrics:
|
40 |
+
- type: f1
|
41 |
+
value: 0.31044658666142044
|
42 |
+
name: F1
|
43 |
+
---
|
44 |
+
|
45 |
+
# SetFit with sentence-transformers/paraphrase-mpnet-base-v2
|
46 |
+
|
47 |
+
This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification.
|
48 |
+
|
49 |
+
The model has been trained using an efficient few-shot learning technique that involves:
|
50 |
+
|
51 |
+
1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.
|
52 |
+
2. Training a classification head with features from the fine-tuned Sentence Transformer.
|
53 |
+
|
54 |
+
## Model Details
|
55 |
+
|
56 |
+
### Model Description
|
57 |
+
- **Model Type:** SetFit
|
58 |
+
- **Sentence Transformer body:** [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2)
|
59 |
+
- **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance
|
60 |
+
- **Maximum Sequence Length:** 512 tokens
|
61 |
+
- **Number of Classes:** 2 classes
|
62 |
+
<!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) -->
|
63 |
+
<!-- - **Language:** Unknown -->
|
64 |
+
<!-- - **License:** Unknown -->
|
65 |
+
|
66 |
+
### Model Sources
|
67 |
+
|
68 |
+
- **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit)
|
69 |
+
- **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055)
|
70 |
+
- **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit)
|
71 |
+
|
72 |
+
### Model Labels
|
73 |
+
| Label | Examples |
|
74 |
+
|:----------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
75 |
+
| Non toxic | <ul><li>"I'm planning a trip to Japan and I'm excited to try all the delicious food, visit famous temples, and relax in natural hot springs. I've heard the cherry blossoms are beautiful this time of year, so I'm hoping to catch a glimpse of them. Has anyone else been to Japan and have any recommendations for must-see places or experiences?"</li><li>"I'm so excited for the new Marvel movie that just came out! I've been waiting for months and I heard it's going to be amazing. Has anyone else seen it? I'm planning on going to the cinema with my friends this weekend and I'm really looking forward to it."</li><li>"I'm having a great day, thanks for asking! The weather is lovely and I'm looking forward to a fun weekend."</li></ul> |
|
76 |
+
| Toxic | <ul><li>"You're just a stupid white boy who thinks you're so smart, but you have no idea what it's like to be a black person in this country. You get to walk down the street without being stopped by the police just because of the color of your skin. You get to live in a neighborhood with good schools and good jobs. You get to have opportunities that we'll never have. So don't you dare come at me with your condescending attitude and your 'I'm a victim too' nonsense. I've been a victim my whole life, and I'm still standing. You're just a privileged little brat who thinks the world owes you a living."</li><li>"I don't think it's a big deal, but my coworker has been making suggestive comments about my body for months. It's really uncomfortable and I don't know how to handle it."</li><li>"You're just a bunch of ignorant, racist, white people who think you're better than everyone else. Newsflash: you're not. Your ancestors enslaved, oppressed, and murdered people of color for centuries, and now you're still trying to hold us back. Get over it."</li></ul> |
|
77 |
+
|
78 |
+
## Evaluation
|
79 |
+
|
80 |
+
### Metrics
|
81 |
+
| Label | F1 |
|
82 |
+
|:--------|:-------|
|
83 |
+
| **all** | 0.3104 |
|
84 |
+
|
85 |
+
## Uses
|
86 |
+
|
87 |
+
### Direct Use for Inference
|
88 |
+
|
89 |
+
First install the SetFit library:
|
90 |
+
|
91 |
+
```bash
|
92 |
+
pip install setfit
|
93 |
+
```
|
94 |
+
|
95 |
+
Then you can load this model and run inference.
|
96 |
+
|
97 |
+
```python
|
98 |
+
from setfit import SetFitModel
|
99 |
+
|
100 |
+
# Download from the 🤗 Hub
|
101 |
+
model = SetFitModel.from_pretrained("setfit_model_id")
|
102 |
+
# Run inference
|
103 |
+
preds = model("I'm trying to decide between two different laptops for my work. Can you help me compare their specs and features?")
|
104 |
+
```
|
105 |
+
|
106 |
+
<!--
|
107 |
+
### Downstream Use
|
108 |
+
|
109 |
+
*List how someone could finetune this model on their own dataset.*
|
110 |
+
-->
|
111 |
+
|
112 |
+
<!--
|
113 |
+
### Out-of-Scope Use
|
114 |
+
|
115 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
116 |
+
-->
|
117 |
+
|
118 |
+
<!--
|
119 |
+
## Bias, Risks and Limitations
|
120 |
+
|
121 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
122 |
+
-->
|
123 |
+
|
124 |
+
<!--
|
125 |
+
### Recommendations
|
126 |
+
|
127 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
128 |
+
-->
|
129 |
+
|
130 |
+
## Training Details
|
131 |
+
|
132 |
+
### Training Set Metrics
|
133 |
+
| Training set | Min | Median | Max |
|
134 |
+
|:-------------|:----|:-------|:----|
|
135 |
+
| Word count | 17 | 39.0 | 113 |
|
136 |
+
|
137 |
+
| Label | Training Sample Count |
|
138 |
+
|:----------|:----------------------|
|
139 |
+
| Non toxic | 7 |
|
140 |
+
| Toxic | 22 |
|
141 |
+
|
142 |
+
### Training Hyperparameters
|
143 |
+
- batch_size: (16, 16)
|
144 |
+
- num_epochs: (5, 5)
|
145 |
+
- max_steps: -1
|
146 |
+
- sampling_strategy: oversampling
|
147 |
+
- body_learning_rate: (2e-05, 1e-05)
|
148 |
+
- head_learning_rate: 0.01
|
149 |
+
- loss: CosineSimilarityLoss
|
150 |
+
- distance_metric: cosine_distance
|
151 |
+
- margin: 0.25
|
152 |
+
- end_to_end: False
|
153 |
+
- use_amp: False
|
154 |
+
- warmup_proportion: 0.1
|
155 |
+
- seed: 42
|
156 |
+
- eval_max_steps: -1
|
157 |
+
- load_best_model_at_end: True
|
158 |
+
|
159 |
+
### Training Results
|
160 |
+
| Epoch | Step | Training Loss | Validation Loss |
|
161 |
+
|:-------:|:-------:|:-------------:|:---------------:|
|
162 |
+
| 0.0278 | 1 | 0.2409 | - |
|
163 |
+
| 1.0 | 36 | - | 0.0208 |
|
164 |
+
| 1.3889 | 50 | 0.001 | - |
|
165 |
+
| 2.0 | 72 | - | 0.0099 |
|
166 |
+
| 2.7778 | 100 | 0.0002 | - |
|
167 |
+
| 3.0 | 108 | - | 0.0104 |
|
168 |
+
| **4.0** | **144** | **-** | **0.0082** |
|
169 |
+
| 4.1667 | 150 | 0.0001 | - |
|
170 |
+
| 5.0 | 180 | - | 0.0088 |
|
171 |
+
|
172 |
+
* The bold row denotes the saved checkpoint.
|
173 |
+
### Framework Versions
|
174 |
+
- Python: 3.9.19
|
175 |
+
- SetFit: 1.1.0.dev0
|
176 |
+
- Sentence Transformers: 3.0.1
|
177 |
+
- Transformers: 4.39.0
|
178 |
+
- PyTorch: 2.4.0
|
179 |
+
- Datasets: 2.20.0
|
180 |
+
- Tokenizers: 0.15.2
|
181 |
+
|
182 |
+
## Citation
|
183 |
+
|
184 |
+
### BibTeX
|
185 |
+
```bibtex
|
186 |
+
@article{https://doi.org/10.48550/arxiv.2209.11055,
|
187 |
+
doi = {10.48550/ARXIV.2209.11055},
|
188 |
+
url = {https://arxiv.org/abs/2209.11055},
|
189 |
+
author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},
|
190 |
+
keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
191 |
+
title = {Efficient Few-Shot Learning Without Prompts},
|
192 |
+
publisher = {arXiv},
|
193 |
+
year = {2022},
|
194 |
+
copyright = {Creative Commons Attribution 4.0 International}
|
195 |
+
}
|
196 |
+
```
|
197 |
+
|
198 |
+
<!--
|
199 |
+
## Glossary
|
200 |
+
|
201 |
+
*Clearly define terms in order to be accessible across audiences.*
|
202 |
+
-->
|
203 |
+
|
204 |
+
<!--
|
205 |
+
## Model Card Authors
|
206 |
+
|
207 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
208 |
+
-->
|
209 |
+
|
210 |
+
<!--
|
211 |
+
## Model Card Contact
|
212 |
+
|
213 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
214 |
+
-->
|
config.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "setfit/step_144",
|
3 |
+
"architectures": [
|
4 |
+
"MPNetModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-05,
|
15 |
+
"max_position_embeddings": 514,
|
16 |
+
"model_type": "mpnet",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 1,
|
20 |
+
"relative_attention_num_buckets": 32,
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.39.0",
|
23 |
+
"vocab_size": 30527
|
24 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "3.0.1",
|
4 |
+
"transformers": "4.39.0",
|
5 |
+
"pytorch": "2.4.0"
|
6 |
+
},
|
7 |
+
"prompts": {},
|
8 |
+
"default_prompt_name": null,
|
9 |
+
"similarity_fn_name": null
|
10 |
+
}
|
config_setfit.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"normalize_embeddings": false,
|
3 |
+
"labels": [
|
4 |
+
"Non toxic",
|
5 |
+
"Toxic"
|
6 |
+
]
|
7 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5e546881e337d51ba525bff282f486df2971468f488d3bb6c99d60c4d58118ea
|
3 |
+
size 437967672
|
model_head.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e5118522ffe30bce74220849f25fd77624277c001bfc7769a77bfacbfb8c48e8
|
3 |
+
size 7007
|
modules.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
}
|
14 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 512,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"cls_token": {
|
10 |
+
"content": "<s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"eos_token": {
|
17 |
+
"content": "</s>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"mask_token": {
|
24 |
+
"content": "<mask>",
|
25 |
+
"lstrip": true,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"pad_token": {
|
31 |
+
"content": "<pad>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
},
|
37 |
+
"sep_token": {
|
38 |
+
"content": "</s>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false
|
43 |
+
},
|
44 |
+
"unk_token": {
|
45 |
+
"content": "[UNK]",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": false,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false
|
50 |
+
}
|
51 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<s>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<pad>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "</s>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"104": {
|
28 |
+
"content": "[UNK]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"30526": {
|
36 |
+
"content": "<mask>",
|
37 |
+
"lstrip": true,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"bos_token": "<s>",
|
45 |
+
"clean_up_tokenization_spaces": true,
|
46 |
+
"cls_token": "<s>",
|
47 |
+
"do_basic_tokenize": true,
|
48 |
+
"do_lower_case": true,
|
49 |
+
"eos_token": "</s>",
|
50 |
+
"mask_token": "<mask>",
|
51 |
+
"max_length": 512,
|
52 |
+
"model_max_length": 512,
|
53 |
+
"never_split": null,
|
54 |
+
"pad_to_multiple_of": null,
|
55 |
+
"pad_token": "<pad>",
|
56 |
+
"pad_token_type_id": 0,
|
57 |
+
"padding_side": "right",
|
58 |
+
"sep_token": "</s>",
|
59 |
+
"stride": 0,
|
60 |
+
"strip_accents": null,
|
61 |
+
"tokenize_chinese_chars": true,
|
62 |
+
"tokenizer_class": "MPNetTokenizer",
|
63 |
+
"truncation_side": "right",
|
64 |
+
"truncation_strategy": "longest_first",
|
65 |
+
"unk_token": "[UNK]"
|
66 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|