fhamborg commited on
Commit
d3bcbf1
1 Parent(s): d8527d0

Push model using huggingface_hub.

Browse files
Files changed (5) hide show
  1. README.md +2 -2
  2. config.json +1 -1
  3. model_head.pkl +1 -1
  4. pytorch_model.bin +1 -1
  5. tokenizer_config.json +7 -0
README.md CHANGED
@@ -7,7 +7,7 @@ tags:
7
  pipeline_tag: text-classification
8
  ---
9
 
10
- # /var/folders/qg/vmj6zq4s7hb2pbkp3b8kstvh0000gn/T/tmpwt2xprax/fhamborg/newsframes-aff
11
 
12
  This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves:
13
 
@@ -28,7 +28,7 @@ You can then run inference as follows:
28
  from setfit import SetFitModel
29
 
30
  # Download from Hub and run inference
31
- model = SetFitModel.from_pretrained("/var/folders/qg/vmj6zq4s7hb2pbkp3b8kstvh0000gn/T/tmpwt2xprax/fhamborg/newsframes-aff")
32
  # Run inference
33
  preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"])
34
  ```
 
7
  pipeline_tag: text-classification
8
  ---
9
 
10
+ # /var/folders/qg/vmj6zq4s7hb2pbkp3b8kstvh0000gn/T/tmpk_iail3l/fhamborg/newsframes-aff
11
 
12
  This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves:
13
 
 
28
  from setfit import SetFitModel
29
 
30
  # Download from Hub and run inference
31
+ model = SetFitModel.from_pretrained("/var/folders/qg/vmj6zq4s7hb2pbkp3b8kstvh0000gn/T/tmpk_iail3l/fhamborg/newsframes-aff")
32
  # Run inference
33
  preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"])
34
  ```
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/Users/felix/.cache/torch/sentence_transformers/sentence-transformers_paraphrase-mpnet-base-v2/",
3
  "architectures": [
4
  "MPNetModel"
5
  ],
 
1
  {
2
+ "_name_or_path": "models/sentence-transformers-all-mpnet-base-v2-dataset_systematic_random_compare_with_absent.json-AFF_4-False-0.15-5e-06-10-3-0.7804744264931363/",
3
  "architectures": [
4
  "MPNetModel"
5
  ],
model_head.pkl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:32c50397f166f35251240ae16521662e76e45a8f435313f40a2c98c9baf82e20
3
  size 25479
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b8545b33e210f6339c6519741242ca227c41570d12b4f1a719736ddf68cbc27
3
  size 25479
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:215f95d6dc04cd341218925c30cfc04d6225e299c53d53bc2805fde72bbefd4d
3
  size 438009257
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4398568cf06a68bf8c5809f257f70b03d275e9f0886b3059fbd61d4bc87adfd2
3
  size 438009257
tokenizer_config.json CHANGED
@@ -34,8 +34,10 @@
34
  "rstrip": false,
35
  "single_word": false
36
  },
 
37
  "model_max_length": 512,
38
  "never_split": null,
 
39
  "pad_token": {
40
  "__type": "AddedToken",
41
  "content": "<pad>",
@@ -44,6 +46,8 @@
44
  "rstrip": false,
45
  "single_word": false
46
  },
 
 
47
  "sep_token": {
48
  "__type": "AddedToken",
49
  "content": "</s>",
@@ -52,9 +56,12 @@
52
  "rstrip": false,
53
  "single_word": false
54
  },
 
55
  "strip_accents": null,
56
  "tokenize_chinese_chars": true,
57
  "tokenizer_class": "MPNetTokenizer",
 
 
58
  "unk_token": {
59
  "__type": "AddedToken",
60
  "content": "[UNK]",
 
34
  "rstrip": false,
35
  "single_word": false
36
  },
37
+ "max_length": 512,
38
  "model_max_length": 512,
39
  "never_split": null,
40
+ "pad_to_multiple_of": null,
41
  "pad_token": {
42
  "__type": "AddedToken",
43
  "content": "<pad>",
 
46
  "rstrip": false,
47
  "single_word": false
48
  },
49
+ "pad_token_type_id": 0,
50
+ "padding_side": "right",
51
  "sep_token": {
52
  "__type": "AddedToken",
53
  "content": "</s>",
 
56
  "rstrip": false,
57
  "single_word": false
58
  },
59
+ "stride": 0,
60
  "strip_accents": null,
61
  "tokenize_chinese_chars": true,
62
  "tokenizer_class": "MPNetTokenizer",
63
+ "truncation_side": "right",
64
+ "truncation_strategy": "longest_first",
65
  "unk_token": {
66
  "__type": "AddedToken",
67
  "content": "[UNK]",