GeneStory commited on
Commit
97427c6
1 Parent(s): bbb9edb

End of training

Browse files
Files changed (6) hide show
  1. README.md +17 -13
  2. config.json +32 -14
  3. model.safetensors +2 -2
  4. tokenizer.json +6 -1
  5. tokenizer_config.json +2 -2
  6. training_args.bin +1 -1
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- license: mit
3
- base_model: thenlper/gte-large
4
  tags:
5
  - generated_from_trainer
6
  metrics:
@@ -15,10 +15,10 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # Classify-model-v1.2
17
 
18
- This model is a fine-tuned version of [thenlper/gte-large](https://huggingface.co/thenlper/gte-large) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 0.3536
21
- - Accuracy: 0.9107
22
 
23
  ## Model description
24
 
@@ -37,24 +37,28 @@ More information needed
37
  ### Training hyperparameters
38
 
39
  The following hyperparameters were used during training:
40
- - learning_rate: 9.5e-05
41
  - train_batch_size: 128
42
  - eval_batch_size: 128
43
  - seed: 42
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: linear
46
- - num_epochs: 6
47
 
48
  ### Training results
49
 
50
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
51
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
52
- | 1.3025 | 1.0 | 11 | 0.7724 | 0.7411 |
53
- | 0.5536 | 2.0 | 22 | 0.4194 | 0.9018 |
54
- | 0.2468 | 3.0 | 33 | 0.4068 | 0.875 |
55
- | 0.111 | 4.0 | 44 | 0.3455 | 0.8839 |
56
- | 0.0531 | 5.0 | 55 | 0.3453 | 0.8929 |
57
- | 0.0305 | 6.0 | 66 | 0.3536 | 0.9107 |
 
 
 
 
58
 
59
 
60
  ### Framework versions
 
1
  ---
2
+ license: apache-2.0
3
+ base_model: Alibaba-NLP/gte-base-en-v1.5
4
  tags:
5
  - generated_from_trainer
6
  metrics:
 
15
 
16
  # Classify-model-v1.2
17
 
18
+ This model is a fine-tuned version of [Alibaba-NLP/gte-base-en-v1.5](https://huggingface.co/Alibaba-NLP/gte-base-en-v1.5) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.2915
21
+ - Accuracy: 0.9375
22
 
23
  ## Model description
24
 
 
37
  ### Training hyperparameters
38
 
39
  The following hyperparameters were used during training:
40
+ - learning_rate: 0.0001
41
  - train_batch_size: 128
42
  - eval_batch_size: 128
43
  - seed: 42
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: linear
46
+ - num_epochs: 10
47
 
48
  ### Training results
49
 
50
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
51
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
52
+ | 1.0241 | 1.0 | 11 | 0.3903 | 0.8482 |
53
+ | 0.2103 | 2.0 | 22 | 0.2814 | 0.8929 |
54
+ | 0.0637 | 3.0 | 33 | 0.2157 | 0.9196 |
55
+ | 0.023 | 4.0 | 44 | 0.2686 | 0.9375 |
56
+ | 0.0147 | 5.0 | 55 | 0.3088 | 0.9286 |
57
+ | 0.0091 | 6.0 | 66 | 0.2554 | 0.9375 |
58
+ | 0.0055 | 7.0 | 77 | 0.2785 | 0.9464 |
59
+ | 0.0054 | 8.0 | 88 | 0.2852 | 0.9464 |
60
+ | 0.0033 | 9.0 | 99 | 0.2899 | 0.9375 |
61
+ | 0.0034 | 10.0 | 110 | 0.2915 | 0.9375 |
62
 
63
 
64
  ### Framework versions
config.json CHANGED
@@ -1,14 +1,22 @@
1
  {
2
- "_name_or_path": "thenlper/gte-large",
3
  "architectures": [
4
- "BertForSequenceClassification"
5
  ],
6
- "attention_probs_dropout_prob": 0.1,
 
 
 
 
 
 
 
 
 
7
  "classifier_dropout": null,
8
- "gradient_checkpointing": false,
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
- "hidden_size": 1024,
12
  "id2label": {
13
  "0": "LABEL_0",
14
  "1": "LABEL_1",
@@ -18,7 +26,7 @@
18
  "5": "LABEL_5"
19
  },
20
  "initializer_range": 0.02,
21
- "intermediate_size": 4096,
22
  "label2id": {
23
  "LABEL_0": 0,
24
  "LABEL_1": 1,
@@ -28,16 +36,26 @@
28
  "LABEL_5": 5
29
  },
30
  "layer_norm_eps": 1e-12,
31
- "max_position_embeddings": 512,
32
- "model_type": "bert",
33
- "num_attention_heads": 16,
34
- "num_hidden_layers": 24,
 
 
 
 
35
  "pad_token_id": 0,
36
- "position_embedding_type": "absolute",
37
  "problem_type": "single_label_classification",
 
 
 
 
 
38
  "torch_dtype": "float32",
39
  "transformers_version": "4.39.3",
40
- "type_vocab_size": 2,
41
- "use_cache": true,
42
- "vocab_size": 30522
 
43
  }
 
1
  {
2
+ "_name_or_path": "Alibaba-NLP/gte-base-en-v1.5",
3
  "architectures": [
4
+ "NewForSequenceClassification"
5
  ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "Alibaba-NLP/new-impl--configuration.NewConfig",
9
+ "AutoModel": "Alibaba-NLP/new-impl--modeling.NewModel",
10
+ "AutoModelForMaskedLM": "Alibaba-NLP/new-impl--modeling.NewForMaskedLM",
11
+ "AutoModelForMultipleChoice": "Alibaba-NLP/new-impl--modeling.NewForMultipleChoice",
12
+ "AutoModelForQuestionAnswering": "Alibaba-NLP/new-impl--modeling.NewForQuestionAnswering",
13
+ "AutoModelForSequenceClassification": "Alibaba-NLP/new-impl--modeling.NewForSequenceClassification",
14
+ "AutoModelForTokenClassification": "Alibaba-NLP/new-impl--modeling.NewForTokenClassification"
15
+ },
16
  "classifier_dropout": null,
 
17
  "hidden_act": "gelu",
18
  "hidden_dropout_prob": 0.1,
19
+ "hidden_size": 768,
20
  "id2label": {
21
  "0": "LABEL_0",
22
  "1": "LABEL_1",
 
26
  "5": "LABEL_5"
27
  },
28
  "initializer_range": 0.02,
29
+ "intermediate_size": 3072,
30
  "label2id": {
31
  "LABEL_0": 0,
32
  "LABEL_1": 1,
 
36
  "LABEL_5": 5
37
  },
38
  "layer_norm_eps": 1e-12,
39
+ "layer_norm_type": "layer_norm",
40
+ "logn_attention_clip1": false,
41
+ "logn_attention_scale": false,
42
+ "max_position_embeddings": 8192,
43
+ "model_type": "new",
44
+ "num_attention_heads": 12,
45
+ "num_hidden_layers": 12,
46
+ "pack_qkv": true,
47
  "pad_token_id": 0,
48
+ "position_embedding_type": "rope",
49
  "problem_type": "single_label_classification",
50
+ "rope_scaling": {
51
+ "factor": 2.0,
52
+ "type": "ntk"
53
+ },
54
+ "rope_theta": 500000,
55
  "torch_dtype": "float32",
56
  "transformers_version": "4.39.3",
57
+ "type_vocab_size": 0,
58
+ "unpad_inputs": false,
59
+ "use_memory_efficient_attention": false,
60
+ "vocab_size": 30528
61
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:906f698748c591ee0384de3a429b69e97858ed4fa9554310225bf6bf5b17e8a3
3
- size 1340639160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c31e825e54b9bc4da41f2280d716d3286424790d8ccf226186bcdbc3e11665b1
3
+ size 549500832
tokenizer.json CHANGED
@@ -1,6 +1,11 @@
1
  {
2
  "version": "1.0",
3
- "truncation": null,
 
 
 
 
 
4
  "padding": {
5
  "strategy": "BatchLongest",
6
  "direction": "Right",
 
1
  {
2
  "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 32768,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
  "padding": {
10
  "strategy": "BatchLongest",
11
  "direction": "Right",
tokenizer_config.json CHANGED
@@ -45,8 +45,8 @@
45
  "cls_token": "[CLS]",
46
  "do_lower_case": true,
47
  "mask_token": "[MASK]",
48
- "max_length": 128,
49
- "model_max_length": 1000000000000000019884624838656,
50
  "pad_to_multiple_of": null,
51
  "pad_token": "[PAD]",
52
  "pad_token_type_id": 0,
 
45
  "cls_token": "[CLS]",
46
  "do_lower_case": true,
47
  "mask_token": "[MASK]",
48
+ "max_length": 512,
49
+ "model_max_length": 32768,
50
  "pad_to_multiple_of": null,
51
  "pad_token": "[PAD]",
52
  "pad_token_type_id": 0,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e525a6f2cf5abc222a232545bacf1ed1e5885b59ccb79591274719284777ff7a
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2395a8c853e5d942df02a2e4b72617394e866cff32ac8e120dd6945f715fc162
3
  size 4984