system HF staff commited on
Commit
9740216
1 Parent(s): 5c42e1e

Commit From AutoTrain

Browse files
.gitattributes CHANGED
@@ -32,3 +32,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
36
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
37
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - autotrain
4
+ - text-classification
5
+ language:
6
+ - en
7
+ widget:
8
+ - text: "I love AutoTrain 🤗"
9
+ datasets:
10
+ - gjbooth2/autotrain-data-glenn_epa_second_pooled_25
11
+ co2_eq_emissions:
12
+ emissions: 0.02021601897058404
13
+ ---
14
+
15
+ # Model Trained Using AutoTrain
16
+
17
+ - Problem type: Multi-class Classification
18
+ - Model ID: 3519195196
19
+ - CO2 Emissions (in grams): 0.0202
20
+
21
+ ## Validation Metrics
22
+
23
+ - Loss: 1.733
24
+ - Accuracy: 0.534
25
+ - Macro F1: 0.343
26
+ - Micro F1: 0.534
27
+ - Weighted F1: 0.473
28
+ - Macro Precision: 0.371
29
+ - Micro Precision: 0.534
30
+ - Weighted Precision: 0.477
31
+ - Macro Recall: 0.375
32
+ - Micro Recall: 0.534
33
+ - Weighted Recall: 0.534
34
+
35
+
36
+ ## Usage
37
+
38
+ You can use cURL to access this model:
39
+
40
+ ```
41
+ $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/gjbooth2/autotrain-glenn_epa_second_pooled_25-3519195196
42
+ ```
43
+
44
+ Or Python API:
45
+
46
+ ```
47
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
48
+
49
+ model = AutoModelForSequenceClassification.from_pretrained("gjbooth2/autotrain-glenn_epa_second_pooled_25-3519195196", use_auth_token=True)
50
+
51
+ tokenizer = AutoTokenizer.from_pretrained("gjbooth2/autotrain-glenn_epa_second_pooled_25-3519195196", use_auth_token=True)
52
+
53
+ inputs = tokenizer("I love AutoTrain", return_tensors="pt")
54
+
55
+ outputs = model(**inputs)
56
+ ```
config.json ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AutoTrain",
3
+ "_num_labels": 25,
4
+ "architectures": [
5
+ "DebertaForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "EPA.1",
13
+ "1": "EPA.10",
14
+ "2": "EPA.11",
15
+ "3": "EPA.12",
16
+ "4": "EPA.13",
17
+ "5": "EPA.14",
18
+ "6": "EPA.15",
19
+ "7": "EPA.16",
20
+ "8": "EPA.17",
21
+ "9": "EPA.18",
22
+ "10": "EPA.19",
23
+ "11": "EPA.2",
24
+ "12": "EPA.20",
25
+ "13": "EPA.21",
26
+ "14": "EPA.22",
27
+ "15": "EPA.23",
28
+ "16": "EPA.24",
29
+ "17": "EPA.25",
30
+ "18": "EPA.3",
31
+ "19": "EPA.4",
32
+ "20": "EPA.5",
33
+ "21": "EPA.6",
34
+ "22": "EPA.7",
35
+ "23": "EPA.8",
36
+ "24": "EPA.9"
37
+ },
38
+ "initializer_range": 0.02,
39
+ "intermediate_size": 3072,
40
+ "label2id": {
41
+ "EPA.1": 0,
42
+ "EPA.10": 1,
43
+ "EPA.11": 2,
44
+ "EPA.12": 3,
45
+ "EPA.13": 4,
46
+ "EPA.14": 5,
47
+ "EPA.15": 6,
48
+ "EPA.16": 7,
49
+ "EPA.17": 8,
50
+ "EPA.18": 9,
51
+ "EPA.19": 10,
52
+ "EPA.2": 11,
53
+ "EPA.20": 12,
54
+ "EPA.21": 13,
55
+ "EPA.22": 14,
56
+ "EPA.23": 15,
57
+ "EPA.24": 16,
58
+ "EPA.25": 17,
59
+ "EPA.3": 18,
60
+ "EPA.4": 19,
61
+ "EPA.5": 20,
62
+ "EPA.6": 21,
63
+ "EPA.7": 22,
64
+ "EPA.8": 23,
65
+ "EPA.9": 24
66
+ },
67
+ "layer_norm_eps": 1e-07,
68
+ "max_length": 384,
69
+ "max_position_embeddings": 512,
70
+ "max_relative_positions": -1,
71
+ "model_type": "deberta",
72
+ "num_attention_heads": 12,
73
+ "num_hidden_layers": 12,
74
+ "pad_token_id": 0,
75
+ "padding": "max_length",
76
+ "pooler_dropout": 0,
77
+ "pooler_hidden_act": "gelu",
78
+ "pooler_hidden_size": 768,
79
+ "pos_att_type": [
80
+ "c2p",
81
+ "p2c"
82
+ ],
83
+ "position_biased_input": false,
84
+ "relative_attention": true,
85
+ "torch_dtype": "float32",
86
+ "transformers_version": "4.25.1",
87
+ "type_vocab_size": 0,
88
+ "vocab_size": 50265
89
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b773336a1f7630ed8758ee3a9af4203f8133774d068362c2071fdd43c4a19c0
3
+ size 556919345
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "[CLS]",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "[SEP]",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "[MASK]",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "[PAD]",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "[SEP]",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "[UNK]",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59b1df23073dc6fd08170b31370ab2c9da91a396ad232a0647d55db5a20d2579
3
+ size 2109848
tokenizer_config.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "[CLS]",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "cls_token": {
13
+ "__type": "AddedToken",
14
+ "content": "[CLS]",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "do_lower_case": false,
21
+ "eos_token": {
22
+ "__type": "AddedToken",
23
+ "content": "[SEP]",
24
+ "lstrip": false,
25
+ "normalized": true,
26
+ "rstrip": false,
27
+ "single_word": false
28
+ },
29
+ "errors": "replace",
30
+ "mask_token": {
31
+ "__type": "AddedToken",
32
+ "content": "[MASK]",
33
+ "lstrip": true,
34
+ "normalized": true,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ },
38
+ "model_max_length": 512,
39
+ "name_or_path": "AutoTrain",
40
+ "pad_token": {
41
+ "__type": "AddedToken",
42
+ "content": "[PAD]",
43
+ "lstrip": false,
44
+ "normalized": true,
45
+ "rstrip": false,
46
+ "single_word": false
47
+ },
48
+ "sep_token": {
49
+ "__type": "AddedToken",
50
+ "content": "[SEP]",
51
+ "lstrip": false,
52
+ "normalized": true,
53
+ "rstrip": false,
54
+ "single_word": false
55
+ },
56
+ "special_tokens_map_file": null,
57
+ "tokenizer_class": "DebertaTokenizer",
58
+ "unk_token": {
59
+ "__type": "AddedToken",
60
+ "content": "[UNK]",
61
+ "lstrip": false,
62
+ "normalized": true,
63
+ "rstrip": false,
64
+ "single_word": false
65
+ },
66
+ "vocab_type": "gpt2"
67
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff