tandevstag commited on
Commit
397cf1b
1 Parent(s): ee83423
Files changed (6) hide show
  1. README.md +55 -17
  2. config.json +5 -3
  3. pytorch_model.bin +2 -2
  4. tokenizer.json +1 -1
  5. tokenizer_config.json +1 -1
  6. training_args.bin +2 -2
README.md CHANGED
@@ -1,22 +1,60 @@
1
  ---
2
- license: apache-2.0
3
- language:
4
- - vi
5
  metrics:
6
  - accuracy
7
- library_name: transformers
8
- pipeline_tag: text-classification
9
- tags:
10
- - finance
11
  ---
12
- Model for financial news
13
 
14
- ---
15
- language:
16
- - vi
17
- metrics:
18
- - accuracy
19
- pipeline_tag: text-classification
20
- tags:
21
- - finance
22
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ base_model: FPTAI/vibert-base-cased
3
+ tags:
4
+ - generated_from_trainer
5
  metrics:
6
  - accuracy
7
+ model-index:
8
+ - name: vi-fin-news
9
+ results: []
 
10
  ---
 
11
 
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # vi-fin-news
16
+
17
+ This model is a fine-tuned version of [FPTAI/vibert-base-cased](https://huggingface.co/FPTAI/vibert-base-cased) on the None dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 0.4509
20
+ - Accuracy: 0.9136
21
+
22
+ ## Model description
23
+
24
+ More information needed
25
+
26
+ ## Intended uses & limitations
27
+
28
+ More information needed
29
+
30
+ ## Training and evaluation data
31
+
32
+ More information needed
33
+
34
+ ## Training procedure
35
+
36
+ ### Training hyperparameters
37
+
38
+ The following hyperparameters were used during training:
39
+ - learning_rate: 2e-05
40
+ - train_batch_size: 16
41
+ - eval_batch_size: 16
42
+ - seed: 42
43
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
+ - lr_scheduler_type: linear
45
+ - num_epochs: 2
46
+
47
+ ### Training results
48
+
49
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
50
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
51
+ | 0.1176 | 1.0 | 1150 | 0.3566 | 0.9181 |
52
+ | 0.0582 | 2.0 | 2300 | 0.4509 | 0.9136 |
53
+
54
+
55
+ ### Framework versions
56
+
57
+ - Transformers 4.32.1
58
+ - Pytorch 2.1.2
59
+ - Datasets 2.12.0
60
+ - Tokenizers 0.13.3
config.json CHANGED
@@ -11,12 +11,14 @@
11
  "hidden_dropout_prob": 0.1,
12
  "hidden_size": 768,
13
  "id2label": {
14
- "0": "LABEL_0"
 
15
  },
16
  "initializer_range": 0.02,
17
  "intermediate_size": 3072,
18
  "label2id": {
19
- "LABEL_0": 0
 
20
  },
21
  "layer_norm_eps": 1e-12,
22
  "max_position_embeddings": 512,
@@ -31,7 +33,7 @@
31
  "pooler_size_per_head": 128,
32
  "pooler_type": "first_token_transform",
33
  "position_embedding_type": "absolute",
34
- "problem_type": "regression",
35
  "torch_dtype": "float32",
36
  "transformers_version": "4.32.1",
37
  "type_vocab_size": 2,
 
11
  "hidden_dropout_prob": 0.1,
12
  "hidden_size": 768,
13
  "id2label": {
14
+ "0": "irrelevant",
15
+ "1": "relevant"
16
  },
17
  "initializer_range": 0.02,
18
  "intermediate_size": 3072,
19
  "label2id": {
20
+ "irrelevant": 0,
21
+ "relevant": 1
22
  },
23
  "layer_norm_eps": 1e-12,
24
  "max_position_embeddings": 512,
 
33
  "pooler_size_per_head": 128,
34
  "pooler_type": "first_token_transform",
35
  "position_embedding_type": "absolute",
36
+ "problem_type": "single_label_classification",
37
  "torch_dtype": "float32",
38
  "transformers_version": "4.32.1",
39
  "type_vocab_size": 2,
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4fa16f1a4b9148c93ac67a3c9fd9e85828b4e31083c18d6463e3d6e193ce8f49
3
- size 461489390
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ec9d1faf60f6576fcdb1491a99e05bf0939bc07b69cc6ac4805c6c4bfc7f15e
3
+ size 461492462
tokenizer.json CHANGED
@@ -54,7 +54,7 @@
54
  "clean_text": true,
55
  "handle_chinese_chars": true,
56
  "strip_accents": null,
57
- "lowercase": false
58
  },
59
  "pre_tokenizer": {
60
  "type": "BertPreTokenizer"
 
54
  "clean_text": true,
55
  "handle_chinese_chars": true,
56
  "strip_accents": null,
57
+ "lowercase": true
58
  },
59
  "pre_tokenizer": {
60
  "type": "BertPreTokenizer"
tokenizer_config.json CHANGED
@@ -2,7 +2,7 @@
2
  "clean_up_tokenization_spaces": true,
3
  "cls_token": "[CLS]",
4
  "do_basic_tokenize": true,
5
- "do_lower_case": false,
6
  "mask_token": "[MASK]",
7
  "model_max_length": 1000000000000000019884624838656,
8
  "never_split": null,
 
2
  "clean_up_tokenization_spaces": true,
3
  "cls_token": "[CLS]",
4
  "do_basic_tokenize": true,
5
+ "do_lower_case": true,
6
  "mask_token": "[MASK]",
7
  "model_max_length": 1000000000000000019884624838656,
8
  "never_split": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78a062a52576017395f1fa007b88b6a50afbb37ae4f122636f2a56ab111b559e
3
- size 3640
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c73d7d65b16a3bdd605616b54598a62789f030b081da49b7be85ceb3906f010
3
+ size 4536