Xenova HF staff commited on
Commit
125f618
1 Parent(s): 4130b6e

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ onnx/model.onnx_data filter=lfs diff=lfs merge=lfs -text
37
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<<ENT>>": 250003,
3
+ "<<SEP>>": 250004,
4
+ "[FLERT]": 250002
5
+ }
gliner_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "class_token_index": 250003,
3
+ "dropout": 0.4,
4
+ "encoder_config": null,
5
+ "ent_token": "<<ENT>>",
6
+ "eval_every": 900000,
7
+ "fine_tune": true,
8
+ "has_rnn": true,
9
+ "hidden_size": 768,
10
+ "log_dir": "logs",
11
+ "lr_encoder": "1e-5",
12
+ "lr_others": "5e-5",
13
+ "max_len": 384,
14
+ "max_neg_type_ratio": 1,
15
+ "max_types": 50,
16
+ "max_width": 10,
17
+ "model_name": "FacebookAI/xlm-roberta-large",
18
+ "model_type": "gliner",
19
+ "name": "large",
20
+ "num_steps": 1,
21
+ "prev_path": "none",
22
+ "random_drop": true,
23
+ "root_dir": "ablation_backbone",
24
+ "sep_token": "<<SEP>>",
25
+ "shuffle_types": true,
26
+ "size_sup": -1,
27
+ "span_mode": "markerV0",
28
+ "subtoken_pooling": "first",
29
+ "train_batch_size": 8,
30
+ "train_data": "dataset/train.json",
31
+ "transformers_version": "4.38.2",
32
+ "vocab_size": 250005,
33
+ "warmup_ratio": 0.1,
34
+ "words_splitter_type": "whitespace"
35
+ }
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6384dc84f9aab985612f03859aec05fe7bbfcada808bc2995f1b0122cbf34181
3
+ size 490385767
onnx/model.onnx_data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:264fc59ab2ed8abb1ca0c79ada5862ab9d738285674d422326c1f981c6c93858
3
+ size 1848201216
onnx/model_bnb4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:898bbd592f79d2a802db3695557f1992818fb130e2b627b77ffbac34a2b66f56
3
+ size 703991605
onnx/model_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e50fb42c05698cb8a4b86f22bdff5723c2c8a5c7fe4b1a5b823d4295d659ec65
3
+ size 1169733476
onnx/model_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6e8ed12af891360fb8babf1fa943b13f52cb36c8776099fa1ff18d516f25099
3
+ size 586904720
onnx/model_q4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:898bbd592f79d2a802db3695557f1992818fb130e2b627b77ffbac34a2b66f56
3
+ size 703991605
onnx/model_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c920c1bbc5bd20381d39a950cbb595a3bb4029a058d53c1a68ae80ed184c9b43
3
+ size 704258259
onnx/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6e8ed12af891360fb8babf1fa943b13f52cb36c8776099fa1ff18d516f25099
3
+ size 586904720
onnx/model_uint8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee595a711784f1268e278b5e0aa27a2801c60985609e4670bf2dc1c1d9453a52
3
+ size 586904793
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d839b6b724f4fb44f57c02b2c83223c298e0b37b3faa90033c618547b69c63c
3
+ size 17083308
tokenizer_config.json ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "250002": {
44
+ "content": "[FLERT]",
45
+ "lstrip": false,
46
+ "normalized": true,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": false
50
+ },
51
+ "250003": {
52
+ "content": "<<ENT>>",
53
+ "lstrip": false,
54
+ "normalized": true,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": false
58
+ },
59
+ "250004": {
60
+ "content": "<<SEP>>",
61
+ "lstrip": false,
62
+ "normalized": true,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": false
66
+ }
67
+ },
68
+ "bos_token": "<s>",
69
+ "clean_up_tokenization_spaces": true,
70
+ "cls_token": "<s>",
71
+ "eos_token": "</s>",
72
+ "mask_token": "<mask>",
73
+ "model_max_length": 512,
74
+ "pad_token": "<pad>",
75
+ "sep_token": "</s>",
76
+ "tokenizer_class": "XLMRobertaTokenizer",
77
+ "unk_token": "<unk>"
78
+ }