Upload 10 files
Browse files- .gitattributes +36 -35
- README.md +90 -0
- config.json +73 -0
- model.onnx +3 -0
- onnx/model_optimized.onnx +3 -0
- onnx/model_quantized.onnx +3 -0
- sentencepiece.bpe.model +3 -0
- special_tokens_map.json +15 -0
- tokenizer.json +3 -0
- tokenizer_config.json +54 -0
.gitattributes
CHANGED
@@ -1,35 +1,36 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- multilingual
|
4 |
+
- ar
|
5 |
+
- bg
|
6 |
+
- de
|
7 |
+
- el
|
8 |
+
- en
|
9 |
+
- es
|
10 |
+
- fr
|
11 |
+
- hi
|
12 |
+
- it
|
13 |
+
- ja
|
14 |
+
- nl
|
15 |
+
- pl
|
16 |
+
- pt
|
17 |
+
- ru
|
18 |
+
- sw
|
19 |
+
- th
|
20 |
+
- tr
|
21 |
+
- ur
|
22 |
+
- vi
|
23 |
+
- zh
|
24 |
+
license: mit
|
25 |
+
inference: false
|
26 |
+
tags:
|
27 |
+
- language
|
28 |
+
- language-detection
|
29 |
+
metrics:
|
30 |
+
- accuracy
|
31 |
+
- f1
|
32 |
+
base_model: papluca/xlm-roberta-base-language-detection
|
33 |
+
model-index:
|
34 |
+
- name: xlm-roberta-base-language-detection
|
35 |
+
results: []
|
36 |
+
pipeline_tag: text-classification
|
37 |
+
|
38 |
+
---
|
39 |
+
|
40 |
+
# ONNX version of papluca/xlm-roberta-base-language-detection
|
41 |
+
|
42 |
+
**This model is a conversion of [papluca/xlm-roberta-base-language-detection](https://huggingface.co/papluca/xlm-roberta-base-language-detection) to ONNX** format using the [🤗 Optimum](https://huggingface.co/docs/optimum/index) library.
|
43 |
+
|
44 |
+
## Model description
|
45 |
+
|
46 |
+
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the [Language Identification](https://huggingface.co/datasets/papluca/language-identification#additional-information) dataset.
|
47 |
+
|
48 |
+
This model is an XLM-RoBERTa transformer model with a classification head on top (i.e. a linear layer on top of the pooled output).
|
49 |
+
For additional information please refer to the [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) model card or to the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Conneau et al.
|
50 |
+
|
51 |
+
## Intended uses & limitations
|
52 |
+
|
53 |
+
You can directly use this model as a language detector, i.e. for sequence classification tasks. Currently, it supports the following 20 languages:
|
54 |
+
|
55 |
+
`arabic (ar), bulgarian (bg), german (de), modern greek (el), english (en), spanish (es), french (fr), hindi (hi), italian (it), japanese (ja), dutch (nl), polish (pl), portuguese (pt), russian (ru), swahili (sw), thai (th), turkish (tr), urdu (ur), vietnamese (vi), and chinese (zh)`
|
56 |
+
|
57 |
+
## Usage
|
58 |
+
|
59 |
+
### Optimum
|
60 |
+
|
61 |
+
Loading the model requires the [🤗 Optimum](https://huggingface.co/docs/optimum/index) library installed.
|
62 |
+
|
63 |
+
```python
|
64 |
+
from optimum.onnxruntime import ORTModelForSequenceClassification
|
65 |
+
from transformers import AutoTokenizer, pipeline
|
66 |
+
|
67 |
+
|
68 |
+
tokenizer = AutoTokenizer.from_pretrained("laiyer/xlm-roberta-base-language-detection-onnx")
|
69 |
+
model = ORTModelForSequenceClassification.from_pretrained("laiyer/xlm-roberta-base-language-detection-onnx")
|
70 |
+
classifier = pipeline(
|
71 |
+
task="text-classification",
|
72 |
+
model=model,
|
73 |
+
tokenizer=tokenizer,
|
74 |
+
top_k=None,
|
75 |
+
)
|
76 |
+
|
77 |
+
classifier_output = ner("It's not toxic comment")
|
78 |
+
print(classifier_output)
|
79 |
+
```
|
80 |
+
|
81 |
+
### LLM Guard
|
82 |
+
|
83 |
+
[Language scanner](https://llm-guard.com/input_scanners/language/)
|
84 |
+
|
85 |
+
## Community
|
86 |
+
|
87 |
+
Join our Slack to give us feedback, connect with the maintainers and fellow users, ask questions,
|
88 |
+
or engage in discussions about LLM security!
|
89 |
+
|
90 |
+
<a href="https://join.slack.com/t/laiyerai/shared_invite/zt-28jv3ci39-sVxXrLs3rQdaN3mIl9IT~w"><img src="https://github.com/laiyer-ai/llm-guard/blob/main/docs/assets/join-our-slack-community.png?raw=true" width="200"></a>
|
config.json
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "papluca/xlm-roberta-base-language-detection",
|
3 |
+
"architectures": [
|
4 |
+
"XLMRobertaForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 768,
|
13 |
+
"id2label": {
|
14 |
+
"0": "ja",
|
15 |
+
"1": "nl",
|
16 |
+
"2": "ar",
|
17 |
+
"3": "pl",
|
18 |
+
"4": "de",
|
19 |
+
"5": "it",
|
20 |
+
"6": "pt",
|
21 |
+
"7": "tr",
|
22 |
+
"8": "es",
|
23 |
+
"9": "hi",
|
24 |
+
"10": "el",
|
25 |
+
"11": "ur",
|
26 |
+
"12": "bg",
|
27 |
+
"13": "en",
|
28 |
+
"14": "fr",
|
29 |
+
"15": "zh",
|
30 |
+
"16": "ru",
|
31 |
+
"17": "th",
|
32 |
+
"18": "sw",
|
33 |
+
"19": "vi"
|
34 |
+
},
|
35 |
+
"initializer_range": 0.02,
|
36 |
+
"intermediate_size": 3072,
|
37 |
+
"label2id": {
|
38 |
+
"ar": 2,
|
39 |
+
"bg": 12,
|
40 |
+
"de": 4,
|
41 |
+
"el": 10,
|
42 |
+
"en": 13,
|
43 |
+
"es": 8,
|
44 |
+
"fr": 14,
|
45 |
+
"hi": 9,
|
46 |
+
"it": 5,
|
47 |
+
"ja": 0,
|
48 |
+
"nl": 1,
|
49 |
+
"pl": 3,
|
50 |
+
"pt": 6,
|
51 |
+
"ru": 16,
|
52 |
+
"sw": 18,
|
53 |
+
"th": 17,
|
54 |
+
"tr": 7,
|
55 |
+
"ur": 11,
|
56 |
+
"vi": 19,
|
57 |
+
"zh": 15
|
58 |
+
},
|
59 |
+
"layer_norm_eps": 1e-05,
|
60 |
+
"max_position_embeddings": 514,
|
61 |
+
"model_type": "xlm-roberta",
|
62 |
+
"num_attention_heads": 12,
|
63 |
+
"num_hidden_layers": 12,
|
64 |
+
"output_past": true,
|
65 |
+
"pad_token_id": 1,
|
66 |
+
"position_embedding_type": "absolute",
|
67 |
+
"problem_type": "single_label_classification",
|
68 |
+
"torch_dtype": "float32",
|
69 |
+
"transformers_version": "4.35.0",
|
70 |
+
"type_vocab_size": 1,
|
71 |
+
"use_cache": true,
|
72 |
+
"vocab_size": 250002
|
73 |
+
}
|
model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71b2a7bcc15397859700e8b8e4fe94050b972b23cf02f419d95ef70a63866bb8
|
3 |
+
size 1112518035
|
onnx/model_optimized.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8393411b9c0bcc965b9e7431c911ebe73df240f7334f773d979b549b0d33a02a
|
3 |
+
size 1112276547
|
onnx/model_quantized.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:abdc1b266843170cedc57a6a1206e6309a8eb9c58bc67130ab9678753cbc2086
|
3 |
+
size 278939489
|
sentencepiece.bpe.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
|
3 |
+
size 5069051
|
special_tokens_map.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"cls_token": "<s>",
|
4 |
+
"eos_token": "</s>",
|
5 |
+
"mask_token": {
|
6 |
+
"content": "<mask>",
|
7 |
+
"lstrip": true,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"pad_token": "<pad>",
|
13 |
+
"sep_token": "</s>",
|
14 |
+
"unk_token": "<unk>"
|
15 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:19232daba569c004c5fbe45f373ccaf2ee505055da00969f78a99e5754db8f25
|
3 |
+
size 17082688
|
tokenizer_config.json
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<s>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<pad>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "</s>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "<unk>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"250001": {
|
36 |
+
"content": "<mask>",
|
37 |
+
"lstrip": true,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"bos_token": "<s>",
|
45 |
+
"clean_up_tokenization_spaces": true,
|
46 |
+
"cls_token": "<s>",
|
47 |
+
"eos_token": "</s>",
|
48 |
+
"mask_token": "<mask>",
|
49 |
+
"model_max_length": 512,
|
50 |
+
"pad_token": "<pad>",
|
51 |
+
"sep_token": "</s>",
|
52 |
+
"tokenizer_class": "XLMRobertaTokenizer",
|
53 |
+
"unk_token": "<unk>"
|
54 |
+
}
|