system HF staff commited on
Commit
b3324bd
1 Parent(s): ebfa325

Commit From AutoTrain

Browse files
.gitattributes CHANGED
@@ -29,3 +29,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
  *.zip filter=lfs diff=lfs merge=lfs -text
30
  *.zstandard filter=lfs diff=lfs merge=lfs -text
31
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
29
  *.zip filter=lfs diff=lfs merge=lfs -text
30
  *.zstandard filter=lfs diff=lfs merge=lfs -text
31
  *tfevents* filter=lfs diff=lfs merge=lfs -text
32
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
33
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - autotrain
4
+ - text-classification
5
+ language:
6
+ - unk
7
+ widget:
8
+ - text: "I love AutoTrain 🤗"
9
+ datasets:
10
+ - jfan-98/autotrain-data-LegalLong_on_contracts
11
+ co2_eq_emissions:
12
+ emissions: 47.64808387548789
13
+ ---
14
+
15
+ # Model Trained Using AutoTrain
16
+
17
+ - Problem type: Multi-class Classification
18
+ - Model ID: 1230246837
19
+ - CO2 Emissions (in grams): 47.6481
20
+
21
+ ## Validation Metrics
22
+
23
+ - Loss: 0.265
24
+ - Accuracy: 0.943
25
+ - Macro F1: 0.944
26
+ - Micro F1: 0.943
27
+ - Weighted F1: 0.943
28
+ - Macro Precision: 0.947
29
+ - Micro Precision: 0.943
30
+ - Weighted Precision: 0.944
31
+ - Macro Recall: 0.943
32
+ - Micro Recall: 0.943
33
+ - Weighted Recall: 0.943
34
+
35
+
36
+ ## Usage
37
+
38
+ You can use cURL to access this model:
39
+
40
+ ```
41
+ $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/jfan-98/autotrain-LegalLong_on_contracts-1230246837
42
+ ```
43
+
44
+ Or Python API:
45
+
46
+ ```
47
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
48
+
49
+ model = AutoModelForSequenceClassification.from_pretrained("jfan-98/autotrain-LegalLong_on_contracts-1230246837", use_auth_token=True)
50
+
51
+ tokenizer = AutoTokenizer.from_pretrained("jfan-98/autotrain-LegalLong_on_contracts-1230246837", use_auth_token=True)
52
+
53
+ inputs = tokenizer("I love AutoTrain", return_tensors="pt")
54
+
55
+ outputs = model(**inputs)
56
+ ```
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "</s>": 21129,
3
+ "<s>": 21128
4
+ }
config.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AutoTrain",
3
+ "_num_labels": 17,
4
+ "architectures": [
5
+ "LongformerForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "attention_window": [
9
+ 512,
10
+ 512,
11
+ 512,
12
+ 512,
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512,
20
+ 512
21
+ ],
22
+ "bos_token_id": 0,
23
+ "classifier_dropout": null,
24
+ "directionality": "bidi",
25
+ "eos_token_id": 2,
26
+ "gradient_checkpointing": false,
27
+ "hidden_act": "gelu",
28
+ "hidden_dropout_prob": 0.1,
29
+ "hidden_size": 768,
30
+ "id2label": {
31
+ "0": "dt_zh_00",
32
+ "1": "dt_zh_01",
33
+ "2": "dt_zh_02",
34
+ "3": "dt_zh_03",
35
+ "4": "dt_zh_04",
36
+ "5": "dt_zh_05",
37
+ "6": "dt_zh_06",
38
+ "7": "dt_zh_07",
39
+ "8": "dt_zh_08",
40
+ "9": "dt_zh_09",
41
+ "10": "dt_zh_10",
42
+ "11": "dt_zh_11",
43
+ "12": "dt_zh_12",
44
+ "13": "dt_zh_13",
45
+ "14": "dt_zh_14",
46
+ "15": "dt_zh_15",
47
+ "16": "dt_zh_16"
48
+ },
49
+ "initializer_range": 0.02,
50
+ "intermediate_size": 3072,
51
+ "label2id": {
52
+ "dt_zh_00": 0,
53
+ "dt_zh_01": 1,
54
+ "dt_zh_02": 2,
55
+ "dt_zh_03": 3,
56
+ "dt_zh_04": 4,
57
+ "dt_zh_05": 5,
58
+ "dt_zh_06": 6,
59
+ "dt_zh_07": 7,
60
+ "dt_zh_08": 8,
61
+ "dt_zh_09": 9,
62
+ "dt_zh_10": 10,
63
+ "dt_zh_11": 11,
64
+ "dt_zh_12": 12,
65
+ "dt_zh_13": 13,
66
+ "dt_zh_14": 14,
67
+ "dt_zh_15": 15,
68
+ "dt_zh_16": 16
69
+ },
70
+ "layer_norm_eps": 1e-12,
71
+ "max_length": 192,
72
+ "max_position_embeddings": 4098,
73
+ "model_type": "longformer",
74
+ "num_attention_heads": 12,
75
+ "num_hidden_layers": 12,
76
+ "output_past": true,
77
+ "pad_token_id": 1,
78
+ "padding": "max_length",
79
+ "pooler_fc_size": 768,
80
+ "pooler_num_attention_heads": 12,
81
+ "pooler_num_fc_layers": 3,
82
+ "pooler_size_per_head": 128,
83
+ "pooler_type": "first_token_transform",
84
+ "position_embedding_type": "absolute",
85
+ "problem_type": "single_label_classification",
86
+ "sep_token_id": 2,
87
+ "torch_dtype": "float32",
88
+ "transformers_version": "4.20.0",
89
+ "type_vocab_size": 2,
90
+ "use_cache": true,
91
+ "vocab_size": 21128
92
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ffd3ab7a0be7b55c51ff53b9153a6c290a41fe4b1dfa8c11d9a7ad772e0c758
3
+ size 505310557
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "[MASK]",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "[PAD]",
13
+ "sep_token": "[SEP]",
14
+ "unk_token": "[UNK]"
15
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<s>",
4
+ "cls_token": "[CLS]",
5
+ "do_lower_case": true,
6
+ "eos_token": "</s>",
7
+ "errors": "replace",
8
+ "mask_token": "[MASK]",
9
+ "name_or_path": "AutoTrain",
10
+ "pad_token": "[PAD]",
11
+ "sep_token": "[SEP]",
12
+ "special_tokens_map_file": "/home/xiaochaojun/.cache/huggingface/transformers/d521373fc7ac35f63d56cf303de74a202403dcf1aaa792cd01f653694be59563.dd8bd9bfd3664b530ea4e645105f557769387b3da9f79bdb55ed556bdd80611d",
13
+ "strip_accents": null,
14
+ "tokenize_chinese_chars": true,
15
+ "tokenizer_class": "LongformerTokenizer",
16
+ "trim_offsets": true,
17
+ "unk_token": "[UNK]"
18
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff