AlekseyKorshuk commited on
Commit
2b2cb11
1 Parent(s): e300df7

huggingartists

Browse files
README.md CHANGED
@@ -45,15 +45,15 @@ from datasets import load_dataset
45
  dataset = load_dataset("huggingartists/kurt-cobain")
46
  ```
47
 
48
- [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/1whd1v3b/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on Kurt Cobain's lyrics.
53
 
54
- Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/3nzvvuyl) for full transparency and reproducibility.
55
 
56
- At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/3nzvvuyl/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
 
45
  dataset = load_dataset("huggingartists/kurt-cobain")
46
  ```
47
 
48
+ [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/tjfuj6tr/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on Kurt Cobain's lyrics.
53
 
54
+ Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/3enopofm) for full transparency and reproducibility.
55
 
56
+ At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/3enopofm/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "gpt2",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
 
1
  {
2
+ "_name_or_path": "huggingartists/kurt-cobain",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
evaluation.txt CHANGED
@@ -1 +1 @@
1
- {"eval_loss": 2.059675931930542, "eval_runtime": 3.7661, "eval_samples_per_second": 21.773, "eval_steps_per_second": 2.921, "epoch": 1.0}
 
1
+ {"eval_loss": 1.8867709636688232, "eval_runtime": 3.4897, "eval_samples_per_second": 21.779, "eval_steps_per_second": 2.866, "epoch": 2.0}
flax_model.msgpack CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7cfb2fb84e95c1f4166fbc68132aab67b3e2d43bb335046b2db6d09528a0fff9
3
  size 497764120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92cd1a1dbcc866561e4e2c65bc53fed5096e0ce5f836a41b712f588b6374cc74
3
  size 497764120
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fbc63b097ef5c6b54b802015cc1fd80b5d7fb752db3f09423f2231b2fd9bf470
3
  size 995603825
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a222bb05ea08ee5f83a19f1124fc880a18d9afb82a601e26ddfd42882683baaa
3
  size 995603825
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fbd5a79c6d6f4397e979deb5ed837e7a31a9444a389f1e4b7d9b629e59679130
3
  size 510403817
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd4e8ce7554bde500a78926cb81c5e1d2ae6a48cd1688e91bc158367b40e150c
3
  size 510403817
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e3669fa98bb5c4efd6c0df7cdf49d6ef6463037ace488e6ccbace1eb61273d86
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:997987240923c558e1c237d64d5efba40e39d7bad476231adb9f6a613bbb1a21
3
  size 14503
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea254c15c4fd5cb1f8de5c78442f725e8358618f09aaa6308f4d0adddbd45447
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ba8206d5153f46186cf77d6a4dc5a54b5948a8e7cf23f123d556cbf66dca801
3
  size 623
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "gpt2", "tokenizer_class": "GPT2Tokenizer"}
 
1
+ {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "huggingartists/kurt-cobain", "tokenizer_class": "GPT2Tokenizer"}
trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "best_metric": 2.059675931930542,
3
- "best_model_checkpoint": "output/kurt-cobain/checkpoint-63",
4
- "epoch": 1.0,
5
- "global_step": 63,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -86,11 +86,97 @@
86
  "eval_samples_per_second": 22.522,
87
  "eval_steps_per_second": 3.021,
88
  "step": 63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  }
90
  ],
91
- "max_steps": 63,
92
- "num_train_epochs": 1,
93
- "total_flos": 64931069952000.0,
94
  "trial_name": null,
95
  "trial_params": null
96
  }
 
1
  {
2
+ "best_metric": 1.8867709636688232,
3
+ "best_model_checkpoint": "output/kurt-cobain/checkpoint-126",
4
+ "epoch": 2.0,
5
+ "global_step": 126,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
86
  "eval_samples_per_second": 22.522,
87
  "eval_steps_per_second": 3.021,
88
  "step": 63
89
+ },
90
+ {
91
+ "epoch": 1.03,
92
+ "learning_rate": 3.408888099334633e-07,
93
+ "loss": 2.2316,
94
+ "step": 65
95
+ },
96
+ {
97
+ "epoch": 1.11,
98
+ "learning_rate": 4.137086214086682e-06,
99
+ "loss": 2.3402,
100
+ "step": 70
101
+ },
102
+ {
103
+ "epoch": 1.19,
104
+ "learning_rate": 1.1920020081922749e-05,
105
+ "loss": 2.2531,
106
+ "step": 75
107
+ },
108
+ {
109
+ "epoch": 1.27,
110
+ "learning_rate": 2.320835154085542e-05,
111
+ "loss": 2.3415,
112
+ "step": 80
113
+ },
114
+ {
115
+ "epoch": 1.35,
116
+ "learning_rate": 3.7303948905573005e-05,
117
+ "loss": 2.1674,
118
+ "step": 85
119
+ },
120
+ {
121
+ "epoch": 1.43,
122
+ "learning_rate": 5.333506393059682e-05,
123
+ "loss": 2.2066,
124
+ "step": 90
125
+ },
126
+ {
127
+ "epoch": 1.51,
128
+ "learning_rate": 7.031024545323179e-05,
129
+ "loss": 1.9122,
130
+ "step": 95
131
+ },
132
+ {
133
+ "epoch": 1.59,
134
+ "learning_rate": 8.71796561146101e-05,
135
+ "loss": 2.1474,
136
+ "step": 100
137
+ },
138
+ {
139
+ "epoch": 1.67,
140
+ "learning_rate": 0.00010290000000000001,
141
+ "loss": 2.0603,
142
+ "step": 105
143
+ },
144
+ {
145
+ "epoch": 1.75,
146
+ "learning_rate": 0.0001164990457207046,
147
+ "loss": 1.8847,
148
+ "step": 110
149
+ },
150
+ {
151
+ "epoch": 1.83,
152
+ "learning_rate": 0.00012713575447996587,
153
+ "loss": 1.881,
154
+ "step": 115
155
+ },
156
+ {
157
+ "epoch": 1.9,
158
+ "learning_rate": 0.00013415229447692924,
159
+ "loss": 2.2478,
160
+ "step": 120
161
+ },
162
+ {
163
+ "epoch": 1.98,
164
+ "learning_rate": 0.00013711472479561806,
165
+ "loss": 2.2805,
166
+ "step": 125
167
+ },
168
+ {
169
+ "epoch": 2.0,
170
+ "eval_loss": 1.8867709636688232,
171
+ "eval_runtime": 3.3707,
172
+ "eval_samples_per_second": 22.547,
173
+ "eval_steps_per_second": 2.967,
174
+ "step": 126
175
  }
176
  ],
177
+ "max_steps": 126,
178
+ "num_train_epochs": 2,
179
+ "total_flos": 130646016000000.0,
180
  "trial_name": null,
181
  "trial_params": null
182
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:827966f81c0e881fb54a56100eb0000ea908c70ca55363e9cba79025ffe95eea
3
  size 2863
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07a6cfdfce155fa31773129f68bc985f3200c031e3f132dded8a6f847b060755
3
  size 2863