n0tv1cky commited on
Commit
1365261
·
verified ·
1 Parent(s): 8bf6362

Training in progress, step 500

Browse files
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google-bert/bert-base-uncased
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: output
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # output
15
+
16
+ This model is a fine-tuned version of [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) on an unknown dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 0.1962
19
+ - Memory Allocated (gb): 54.03
20
+ - Max Memory Allocated (gb): 54.47
21
+ - Total Memory Available (gb): 94.62
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 2e-05
41
+ - train_batch_size: 128
42
+ - eval_batch_size: 8
43
+ - seed: 42
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-06
45
+ - lr_scheduler_type: reduce_lr_on_plateau
46
+ - lr_scheduler_warmup_ratio: 0.1
47
+ - num_epochs: 1
48
+
49
+ ### Training results
50
+
51
+ | Training Loss | Epoch | Step | Validation Loss | Allocated (gb) | Memory Allocated (gb) | Memory Available (gb) |
52
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:---------------------:|:---------------------:|
53
+ | 0.2846 | 1.0 | 196 | 0.1962 | 54.03 | 54.47 | 94.62 |
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.40.2
59
+ - Pytorch 2.3.1a0+git4989238
60
+ - Datasets 2.19.1
61
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google-bert/bert-base-uncased",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "BertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "classifier_dropout": null,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "neg",
15
+ "1": "pos"
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "neg": 0,
21
+ "pos": 1
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "max_position_embeddings": 512,
25
+ "model_type": "bert",
26
+ "num_attention_heads": 12,
27
+ "num_hidden_layers": 12,
28
+ "pad_token_id": 0,
29
+ "position_embedding_type": "absolute",
30
+ "problem_type": "single_label_classification",
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.40.2",
33
+ "type_vocab_size": 2,
34
+ "use_cache": true,
35
+ "vocab_size": 30522
36
+ }
emissions.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ timestamp,project_name,run_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
2
+ 2024-08-23T07:50:32,codecarbon,b5998c77-16a1-4ef1-b7cc-1c2fbe8be3a1,140.0069224834442,3.8855012085281973e-05,2.7752207816636043e-07,42.5,0.0,377.78892517089844,0.0016528503820300105,0,0.01469188600983762,0.01634473639186763,Canada,CAN,quebec,,,Linux-5.15.0-118-generic-x86_64-with-glibc2.35,3.10.12,2.3.5,160,Intel(R) Xeon(R) Platinum 8380 CPU @ 2.30GHz,,,-71.2,46.8,1007.4371337890625,machine,N,1.0
gaudi_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "autocast_bf16_ops": null,
3
+ "autocast_fp32_ops": null,
4
+ "optimum_version": "1.21.2",
5
+ "transformers_version": "4.40.2",
6
+ "use_dynamic_shapes": false,
7
+ "use_fused_adam": true,
8
+ "use_fused_clip_norm": true,
9
+ "use_torch_autocast": true
10
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7c4b213356ecceae95f712246d570258c97c3720fb747aa9fefca07329d2cdb
3
+ size 437958648
runs/Aug23_07-26-52_gaurav-gaudi-pod/events.out.tfevents.1724398116.gaurav-gaudi-pod.79697.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd1b7019a155cc0dda9fb8c5eb236ae555d785a478cc60ddfca698cdda337496
3
+ size 5381
runs/Aug23_07-38-06_gaurav-gaudi-pod/events.out.tfevents.1724398734.gaurav-gaudi-pod.84053.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01f13844939719fa3d9c1673a71e351b400737f09748c36fb103901f2df6c3e9
3
+ size 6341
runs/Aug23_07-47-26_gaurav-gaudi-pod/events.out.tfevents.1724399292.gaurav-gaudi-pod.87550.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47bcde9001397a55f06ae87bf1d3bcc96656d475bd92b4407dc2add305b145bf
3
+ size 6843
runs/Aug26_05-49-11_gaurav-gaudi-pod/events.out.tfevents.1724651400.gaurav-gaudi-pod.103622.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75702fd7ba8cba7f4abf39eb54411e3fd9ca1eb460c79d9945c490480c438974
3
+ size 5381
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8528ec44d41d506dc82f0637ca048b8fa5c39e9fc05601d7bf1ee86eea04511
3
+ size 4856
vocab.txt ADDED
The diff for this file is too large to render. See raw diff