ajrayman commited on
Commit
e59fab3
1 Parent(s): 9365691

Training in progress, epoch 1

Browse files
Files changed (4) hide show
  1. README.md +13 -12
  2. config.json +5 -5
  3. model.safetensors +2 -2
  4. training_args.bin +1 -1
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
  library_name: transformers
3
  license: mit
4
- base_model: roberta-base
5
  tags:
6
  - generated_from_trainer
7
  metrics:
@@ -10,22 +10,22 @@ metrics:
10
  - recall
11
  - f1
12
  model-index:
13
- - name: psychopathy_binary
14
  results: []
15
  ---
16
 
17
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
  should probably proofread and complete it, then remove this comment. -->
19
 
20
- # psychopathy_binary
21
 
22
- This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset.
23
  It achieves the following results on the evaluation set:
24
- - Loss: 0.5869
25
- - Accuracy: 0.7047
26
- - Precision: 0.7698
27
- - Recall: 0.4609
28
- - F1: 0.5766
29
 
30
  ## Model description
31
 
@@ -50,14 +50,15 @@ The following hyperparameters were used during training:
50
  - seed: 42
51
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
  - lr_scheduler_type: linear
53
- - num_epochs: 2
54
 
55
  ### Training results
56
 
57
  | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 |
58
  |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|
59
- | No log | 1.0 | 140 | 0.5715 | 0.6993 | 0.7254 | 0.5 | 0.5920 |
60
- | No log | 2.0 | 280 | 0.5869 | 0.7047 | 0.7698 | 0.4609 | 0.5766 |
 
61
 
62
 
63
  ### Framework versions
 
1
  ---
2
  library_name: transformers
3
  license: mit
4
+ base_model: roberta-large
5
  tags:
6
  - generated_from_trainer
7
  metrics:
 
10
  - recall
11
  - f1
12
  model-index:
13
+ - name: machiavellianism_binary
14
  results: []
15
  ---
16
 
17
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
  should probably proofread and complete it, then remove this comment. -->
19
 
20
+ # machiavellianism_binary
21
 
22
+ This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the None dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 0.6000
25
+ - Accuracy: 0.7284
26
+ - Precision: 0.7104
27
+ - Recall: 0.6075
28
+ - F1: 0.6549
29
 
30
  ## Model description
31
 
 
50
  - seed: 42
51
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
  - lr_scheduler_type: linear
53
+ - num_epochs: 3
54
 
55
  ### Training results
56
 
57
  | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 |
58
  |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|
59
+ | No log | 1.0 | 127 | 0.5672 | 0.7284 | 0.7895 | 0.4907 | 0.6052 |
60
+ | No log | 2.0 | 254 | 0.6207 | 0.7195 | 0.8372 | 0.4206 | 0.5599 |
61
+ | No log | 3.0 | 381 | 0.6000 | 0.7284 | 0.7104 | 0.6075 | 0.6549 |
62
 
63
 
64
  ### Framework versions
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "roberta-base",
3
  "architectures": [
4
  "RobertaForSequenceClassification"
5
  ],
@@ -9,14 +9,14 @@
9
  "eos_token_id": 2,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
- "hidden_size": 768,
13
  "initializer_range": 0.02,
14
- "intermediate_size": 3072,
15
  "layer_norm_eps": 1e-05,
16
  "max_position_embeddings": 514,
17
  "model_type": "roberta",
18
- "num_attention_heads": 12,
19
- "num_hidden_layers": 12,
20
  "pad_token_id": 1,
21
  "position_embedding_type": "absolute",
22
  "problem_type": "single_label_classification",
 
1
  {
2
+ "_name_or_path": "roberta-large",
3
  "architectures": [
4
  "RobertaForSequenceClassification"
5
  ],
 
9
  "eos_token_id": 2,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
  "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
  "layer_norm_eps": 1e-05,
16
  "max_position_embeddings": 514,
17
  "model_type": "roberta",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
  "pad_token_id": 1,
21
  "position_embedding_type": "absolute",
22
  "problem_type": "single_label_classification",
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c446090fbfc40c39d974726a0571ddf3b51313039f2fb81f44ec235215d55fe1
3
- size 498612824
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e30296c69ff71f2367ff5185c14195d981163908b0168c977f5d8515724df74
3
+ size 1421495416
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:875820a868754a6be17cbb559a498934adfffe01c798fc71f097d50cb12547bb
3
  size 4719
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ded6bede6eb00bc1d58a8e1dc7e452b94a0db065fb66753494fb8d02ac6d1206
3
  size 4719