alexkueck commited on
Commit
f0ecd4e
1 Parent(s): 57b329f

Add SetFit model

Browse files
1_Pooling/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false
7
+ }
README.md CHANGED
@@ -1,52 +1,49 @@
1
  ---
2
- license: mit
3
  tags:
4
- - generated_from_trainer
5
- model-index:
6
- - name: li-tis-tuned-2
7
- results: []
8
  ---
9
 
10
- <!-- This model card has been generated automatically according to the information the Trainer had access to. You
11
- should probably proofread and complete it, then remove this comment. -->
12
 
13
- # li-tis-tuned-2
14
 
15
- This model is a fine-tuned version of [EleutherAI/gpt-neo-1.3B](https://huggingface.co/EleutherAI/gpt-neo-1.3B) on the None dataset.
 
16
 
17
- ## Model description
18
 
19
- More information needed
20
 
21
- ## Intended uses & limitations
 
 
22
 
23
- More information needed
24
 
25
- ## Training and evaluation data
 
26
 
27
- More information needed
 
 
 
 
28
 
29
- ## Training procedure
30
 
31
- ### Training hyperparameters
32
-
33
- The following hyperparameters were used during training:
34
- - learning_rate: 2e-05
35
- - train_batch_size: 2
36
- - eval_batch_size: 2
37
- - seed: 42
38
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
39
- - lr_scheduler_type: linear
40
- - lr_scheduler_warmup_steps: 1
41
- - num_epochs: 5
42
-
43
- ### Training results
44
-
45
-
46
-
47
- ### Framework versions
48
-
49
- - Transformers 4.29.1
50
- - Pytorch 2.0.1+cu117
51
- - Datasets 2.14.4
52
- - Tokenizers 0.13.3
 
1
  ---
2
+ license: apache-2.0
3
  tags:
4
+ - setfit
5
+ - sentence-transformers
6
+ - text-classification
7
+ pipeline_tag: text-classification
8
  ---
9
 
10
+ # alexkueck/li-tis-tuned-2
 
11
 
12
+ This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves:
13
 
14
+ 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.
15
+ 2. Training a classification head with features from the fine-tuned Sentence Transformer.
16
 
17
+ ## Usage
18
 
19
+ To use this model for inference, first install the SetFit library:
20
 
21
+ ```bash
22
+ python -m pip install setfit
23
+ ```
24
 
25
+ You can then run inference as follows:
26
 
27
+ ```python
28
+ from setfit import SetFitModel
29
 
30
+ # Download from Hub and run inference
31
+ model = SetFitModel.from_pretrained("alexkueck/li-tis-tuned-2")
32
+ # Run inference
33
+ preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 馃ぎ"])
34
+ ```
35
 
36
+ ## BibTeX entry and citation info
37
 
38
+ ```bibtex
39
+ @article{https://doi.org/10.48550/arxiv.2209.11055,
40
+ doi = {10.48550/ARXIV.2209.11055},
41
+ url = {https://arxiv.org/abs/2209.11055},
42
+ author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},
43
+ keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
44
+ title = {Efficient Few-Shot Learning Without Prompts},
45
+ publisher = {arXiv},
46
+ year = {2022},
47
+ copyright = {Creative Commons Attribution 4.0 International}
48
+ }
49
+ ```
 
 
 
 
 
 
 
 
 
 
config.json CHANGED
@@ -1,75 +1,24 @@
1
  {
2
- "_name_or_path": "EleutherAI/gpt-neo-1.3B",
3
- "activation_function": "gelu_new",
4
  "architectures": [
5
- "GPTNeoForCausalLM"
6
  ],
7
- "attention_dropout": 0,
8
- "attention_layers": [
9
- "global",
10
- "local",
11
- "global",
12
- "local",
13
- "global",
14
- "local",
15
- "global",
16
- "local",
17
- "global",
18
- "local",
19
- "global",
20
- "local",
21
- "global",
22
- "local",
23
- "global",
24
- "local",
25
- "global",
26
- "local",
27
- "global",
28
- "local",
29
- "global",
30
- "local",
31
- "global",
32
- "local"
33
- ],
34
- "attention_types": [
35
- [
36
- [
37
- "global",
38
- "local"
39
- ],
40
- 12
41
- ]
42
- ],
43
- "bos_token_id": 50256,
44
- "classifier_dropout": 0.1,
45
- "embed_dropout": 0,
46
- "eos_token_id": 50256,
47
- "gradient_checkpointing": false,
48
- "hidden_size": 2048,
49
  "initializer_range": 0.02,
50
- "intermediate_size": null,
51
- "layer_norm_epsilon": 1e-05,
52
- "max_position_embeddings": 2048,
53
- "model_type": "gpt_neo",
54
- "num_heads": 16,
55
- "num_layers": 24,
56
- "resid_dropout": 0,
57
- "summary_activation": null,
58
- "summary_first_dropout": 0.1,
59
- "summary_proj_to_labels": true,
60
- "summary_type": "cls_index",
61
- "summary_use_proj": true,
62
- "task_specific_params": {
63
- "text-generation": {
64
- "do_sample": true,
65
- "max_length": 50,
66
- "temperature": 0.9
67
- }
68
- },
69
- "tokenizer_class": "GPT2Tokenizer",
70
- "torch_dtype": "float16",
71
  "transformers_version": "4.29.1",
72
- "use_cache": true,
73
- "vocab_size": 50257,
74
- "window_size": 256
75
  }
 
1
  {
2
+ "_name_or_path": "/home/user/.cache/torch/sentence_transformers/sentence-transformers_paraphrase-mpnet-base-v2/",
 
3
  "architectures": [
4
+ "MPNetModel"
5
  ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "mpnet",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "relative_attention_num_buckets": 32,
21
+ "torch_dtype": "float32",
 
 
 
 
 
 
 
 
 
 
 
 
22
  "transformers_version": "4.29.1",
23
+ "vocab_size": 30527
 
 
24
  }
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.0.0",
4
+ "transformers": "4.7.0",
5
+ "pytorch": "1.9.0+cu102"
6
+ }
7
+ }
model_head.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26274d5599db8610add5ff7a5a0fd85db61333a7c6fe7726a0cbde0b12d704d8
3
+ size 7007
modules.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ }
14
+ ]
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c202b00de073af0ff097f7d4d42b49f5631796e6a550f943b373886d28d39ad
3
- size 2731935805
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:339005c0ac8dc02880924e687addc56e88a28360e42babb5a3306c2730357c0e
3
+ size 438013677
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 512,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json CHANGED
@@ -1,6 +1,15 @@
1
  {
2
- "bos_token": "<|endoftext|>",
3
- "eos_token": "<|endoftext|>",
4
- "pad_token": "<|endoftext|>",
5
- "unk_token": "<|endoftext|>"
 
 
 
 
 
 
 
 
 
6
  }
 
1
  {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "[UNK]"
15
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,30 +1,63 @@
1
  {
2
- "add_bos_token": false,
3
- "add_prefix_space": false,
4
  "bos_token": {
5
  "__type": "AddedToken",
6
- "content": "<|endoftext|>",
7
  "lstrip": false,
8
  "normalized": true,
9
  "rstrip": false,
10
  "single_word": false
11
  },
12
  "clean_up_tokenization_spaces": true,
 
 
 
 
 
 
 
 
 
 
13
  "eos_token": {
14
  "__type": "AddedToken",
15
- "content": "<|endoftext|>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  "lstrip": false,
17
  "normalized": true,
18
  "rstrip": false,
19
  "single_word": false
20
  },
21
- "errors": "replace",
22
- "model_max_length": 2048,
23
- "pad_token": null,
24
- "tokenizer_class": "GPT2Tokenizer",
25
  "unk_token": {
26
  "__type": "AddedToken",
27
- "content": "<|endoftext|>",
28
  "lstrip": false,
29
  "normalized": true,
30
  "rstrip": false,
 
1
  {
 
 
2
  "bos_token": {
3
  "__type": "AddedToken",
4
+ "content": "<s>",
5
  "lstrip": false,
6
  "normalized": true,
7
  "rstrip": false,
8
  "single_word": false
9
  },
10
  "clean_up_tokenization_spaces": true,
11
+ "cls_token": {
12
+ "__type": "AddedToken",
13
+ "content": "<s>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "do_basic_tokenize": true,
20
+ "do_lower_case": true,
21
  "eos_token": {
22
  "__type": "AddedToken",
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": true,
26
+ "rstrip": false,
27
+ "single_word": false
28
+ },
29
+ "mask_token": {
30
+ "__type": "AddedToken",
31
+ "content": "<mask>",
32
+ "lstrip": true,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "model_max_length": 512,
38
+ "never_split": null,
39
+ "pad_token": {
40
+ "__type": "AddedToken",
41
+ "content": "<pad>",
42
+ "lstrip": false,
43
+ "normalized": true,
44
+ "rstrip": false,
45
+ "single_word": false
46
+ },
47
+ "sep_token": {
48
+ "__type": "AddedToken",
49
+ "content": "</s>",
50
  "lstrip": false,
51
  "normalized": true,
52
  "rstrip": false,
53
  "single_word": false
54
  },
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": true,
57
+ "tokenizer_class": "MPNetTokenizer",
 
58
  "unk_token": {
59
  "__type": "AddedToken",
60
+ "content": "[UNK]",
61
  "lstrip": false,
62
  "normalized": true,
63
  "rstrip": false,
vocab.txt ADDED
The diff for this file is too large to render. See raw diff