lapp0 commited on
Commit
3eeac9a
1 Parent(s): 6b588b3

Training in progress, step 198000

Browse files
README.md CHANGED
@@ -44,7 +44,7 @@ More information needed
44
 
45
  # Resource Usage Comparison
46
 
47
- - VRAM Use: 7.4164 GB
48
 
49
  # Distillation (Teacher -> Student) Architecture Difference:
50
 
@@ -85,7 +85,7 @@ Trained on 226,096,614 tokens from the [wikimedia/wikipedia](https://huggingface
85
  # Training Objective
86
 
87
  ```
88
- DistillationObjective(logits_loss_component=LossComponent(label=logits, weight=1, loss_fn=kl), attn_loss_component=LossComponent(label=attn, weight=25, loss_fn=raw_mse, layer_mapper=layer-2, projector=orthogonal))
89
  ```
90
 
91
  # Hyperparameters
@@ -94,17 +94,16 @@ The following hyperparameters were used during training:
94
  <details>
95
  <summary>Expand</summary>
96
 
97
- - learning_rate: `0.0001`
98
- - train_batch_size: `4`
99
  - eval_batch_size: `8`
100
  - seed: `42`
101
  - optimizer: `Adam with betas=(0.9,0.999) and epsilon=1e-08`
102
  - lr_scheduler_type: `polynomial`
103
- - lr_scheduler_warmup_ratio: `0.2`
104
  - num_epochs: `1.0`
105
- - distillation_objective: `DistillationObjective(logits_loss_component=LossComponent(label=logits, weight=1, loss_fn=kl), attn_loss_component=LossComponent(label=attn, weight=25, loss_fn=raw_mse, layer_mapper=layer-2, projector=orthogonal))`
106
  - train_embeddings: `True`
107
- - lr_scheduler: `<torch.optim.lr_scheduler.LambdaLR object at 0x7fe60c136110>`
108
  - student_model_name_or_path: `None`
109
  - student_config_name_or_path: `distilbert/distilgpt2`
110
  - student_model_config: `None`
@@ -124,7 +123,7 @@ The following hyperparameters were used during training:
124
  - gradient_accumulation_steps: `1`
125
  - weight_decay: `0.0`
126
  - max_grad_norm: `1.0`
127
- - warmup_ratio: `0.2`
128
  - warmup_steps: `0`
129
  - gradient_checkpointing: `True`
130
 
@@ -134,6 +133,6 @@ The following hyperparameters were used during training:
134
 
135
  # Framework Versions
136
  - Distily 0.4.1
137
- - Transformers 4.44.2
138
  - Pytorch 2.4.0+cu121
139
  - Datasets 2.21.0
 
44
 
45
  # Resource Usage Comparison
46
 
47
+ - VRAM Use: 7.4173 GB
48
 
49
  # Distillation (Teacher -> Student) Architecture Difference:
50
 
 
85
  # Training Objective
86
 
87
  ```
88
+ DistillationObjective(logits_loss_component=LossComponent(label=logits, weight=1, loss_fn=kl), attn_loss_component=LossComponent(label=attn, weight=5, loss_fn=raw_mse, layer_mapper=layer-2, norm=layernorm, projector=orthogonal))
89
  ```
90
 
91
  # Hyperparameters
 
94
  <details>
95
  <summary>Expand</summary>
96
 
97
+ - learning_rate: `0.0002`
98
+ - train_batch_size: `2`
99
  - eval_batch_size: `8`
100
  - seed: `42`
101
  - optimizer: `Adam with betas=(0.9,0.999) and epsilon=1e-08`
102
  - lr_scheduler_type: `polynomial`
 
103
  - num_epochs: `1.0`
104
+ - distillation_objective: `DistillationObjective(logits_loss_component=LossComponent(label=logits, weight=1, loss_fn=kl), attn_loss_component=LossComponent(label=attn, weight=5, loss_fn=raw_mse, layer_mapper=layer-2, norm=layernorm, projector=orthogonal))`
105
  - train_embeddings: `True`
106
+ - lr_scheduler: `<torch.optim.lr_scheduler.LambdaLR object at 0x7eff83023ac0>`
107
  - student_model_name_or_path: `None`
108
  - student_config_name_or_path: `distilbert/distilgpt2`
109
  - student_model_config: `None`
 
123
  - gradient_accumulation_steps: `1`
124
  - weight_decay: `0.0`
125
  - max_grad_norm: `1.0`
126
+ - warmup_ratio: `0`
127
  - warmup_steps: `0`
128
  - gradient_checkpointing: `True`
129
 
 
133
 
134
  # Framework Versions
135
  - Distily 0.4.1
136
+ - Transformers 4.44.1
137
  - Pytorch 2.4.0+cu121
138
  - Datasets 2.21.0
config.json CHANGED
@@ -40,7 +40,7 @@
40
  }
41
  },
42
  "torch_dtype": "bfloat16",
43
- "transformers_version": "4.44.2",
44
  "use_cache": true,
45
  "vocab_size": 50257
46
  }
 
40
  }
41
  },
42
  "torch_dtype": "bfloat16",
43
+ "transformers_version": "4.44.1",
44
  "use_cache": true,
45
  "vocab_size": 50257
46
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 50256,
4
  "eos_token_id": 50256,
5
- "transformers_version": "4.44.2"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 50256,
4
  "eos_token_id": 50256,
5
+ "transformers_version": "4.44.1"
6
  }
logs/attn_norm=layernorm, attn_projector=orthogonal, attn_weight=25, learning_rate=0.0001, per_device_train_batch_size=2, warmup_ratio=0/events.out.tfevents.1725100177.e3f806ea38c9 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a1d7c9e9f617d4018feb7bcb814eab119e525f12b39ca7c23334ab47a214413
3
+ size 95114836
logs/attn_norm=layernorm, attn_projector=orthogonal, attn_weight=5, learning_rate=0.0002, per_device_train_batch_size=2, warmup_ratio=0/completed.flag ADDED
File without changes
logs/attn_norm=layernorm, attn_projector=orthogonal, attn_weight=5, learning_rate=0.0002, per_device_train_batch_size=2, warmup_ratio=0/events.out.tfevents.1725100036.e3f806ea38c9 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71abd75d8f98a0ab39b9941e36ec5c670680fbf3ccd8357cca08c6744bf4229b
3
- size 253
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea2c83974f57aefb09aabd69672ffaa74139651faded557ea9dd7cda621e3d79
3
+ size 529
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9ef7a40476007d3cc61e01ed4e06df0c4b2e8fe72ed3081f3e5cb306cab5cd71
3
  size 163832792
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee0bc028d14e92188f1bf00434de1c5ab583cda873f0baf9fd25fad9ab94c767
3
  size 163832792
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:07e795451ea41a525ff199bf7c3b971cb21ed8d822124b63f1fc03c6806654e4
3
  size 5560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30de67f670cfce3510b462760d4f129c859e8616c551fb78a4519ff8e158e460
3
  size 5560