icpro commited on
Commit
b77e525
1 Parent(s): c4d9a4a

Training in progress, epoch 1

Browse files
config.json CHANGED
@@ -1,37 +1,36 @@
1
  {
2
- "_name_or_path": "almanach/camembert-large",
3
  "architectures": [
4
- "CamembertForSequenceClassification"
5
  ],
6
- "attention_probs_dropout_prob": 0.1,
7
- "bos_token_id": 0,
8
- "classifier_dropout": null,
9
  "eos_token_id": 2,
10
- "hidden_act": "gelu",
11
- "hidden_dropout_prob": 0.1,
12
- "hidden_size": 1024,
13
  "id2label": {
14
  "0": "Incorrect",
15
  "1": "Correct"
16
  },
17
  "initializer_range": 0.02,
18
- "intermediate_size": 4096,
19
  "label2id": {
20
  "Correct": 1,
21
  "Incorrect": 0
22
  },
23
- "layer_norm_eps": 1e-05,
24
- "max_position_embeddings": 514,
25
- "model_type": "camembert",
26
- "num_attention_heads": 16,
27
- "num_hidden_layers": 24,
28
- "output_past": true,
29
- "pad_token_id": 1,
30
- "position_embedding_type": "absolute",
31
  "problem_type": "single_label_classification",
 
 
 
 
32
  "torch_dtype": "float32",
33
  "transformers_version": "4.41.0",
34
- "type_vocab_size": 1,
35
  "use_cache": true,
36
- "vocab_size": 32005
37
  }
 
1
  {
2
+ "_name_or_path": "mistralai/Mistral-7B-v0.3",
3
  "architectures": [
4
+ "MistralForSequenceClassification"
5
  ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
 
8
  "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
 
11
  "id2label": {
12
  "0": "Incorrect",
13
  "1": "Correct"
14
  },
15
  "initializer_range": 0.02,
16
+ "intermediate_size": 14336,
17
  "label2id": {
18
  "Correct": 1,
19
  "Incorrect": 0
20
  },
21
+ "max_position_embeddings": 32768,
22
+ "model_type": "mistral",
23
+ "num_attention_heads": 32,
24
+ "num_hidden_layers": 32,
25
+ "num_key_value_heads": 8,
26
+ "pad_token_id": 2,
 
 
27
  "problem_type": "single_label_classification",
28
+ "rms_norm_eps": 1e-05,
29
+ "rope_theta": 1000000.0,
30
+ "sliding_window": null,
31
+ "tie_word_embeddings": false,
32
  "torch_dtype": "float32",
33
  "transformers_version": "4.41.0",
 
34
  "use_cache": true,
35
+ "vocab_size": 32768
36
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f34a018458223da9b2b9acffaf6ee5d64b46df76bf7a560dbc602331ceffcfe9
3
  size 1346702400
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b1a12a721001bd50eb6c536b85a2d30a6d81f0e4340cbcca70d56740cd38cee
3
  size 1346702400
special_tokens_map.json CHANGED
@@ -1,20 +1,24 @@
1
  {
2
- "additional_special_tokens": [
3
- "<s>NOTUSED",
4
- "</s>NOTUSED",
5
- "<unk>NOTUSED"
6
- ],
7
- "bos_token": "<s>",
8
- "cls_token": "<s>",
9
- "eos_token": "</s>",
10
- "mask_token": {
11
- "content": "<mask>",
12
- "lstrip": true,
13
  "normalized": false,
14
  "rstrip": false,
15
  "single_word": false
16
  },
17
- "pad_token": "<pad>",
18
- "sep_token": "</s>",
19
- "unk_token": "<unk>"
 
 
 
 
 
 
 
 
 
 
 
 
20
  }
 
1
  {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
 
 
 
 
 
 
 
 
5
  "normalized": false,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37f00374dea48658ee8f5d0f21895b9bc55cb0103939607c8185bfd1c6ca1f89
3
+ size 587404
tokenizer_config.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:138411f6ed12ea6e68e0dcc5afc33cfa128666d40885d1266116ae4e0abca22b
3
  size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ad47d0c5638d898b3cfab06f0ca33bb319addfae7f028a120bc771c06af6545
3
  size 5112