AlekseyKorshuk commited on
Commit
ef0b90d
1 Parent(s): 6d4515e

huggingartists

Browse files
README.md CHANGED
@@ -14,7 +14,7 @@ widget:
14
  <div class="inline-flex flex-col" style="line-height: 1.5;">
15
  <div class="flex">
16
  <div
17
- style="display:DISPLAY_1; margin-left: auto; margin-right: auto; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://images.genius.com/cde6b08fe936280fe2b7d639703ffe36.459x459x1.png&#39;)">
18
  </div>
19
  </div>
20
  <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 HuggingArtists Model 🤖</div>
@@ -45,15 +45,15 @@ from datasets import load_dataset
45
  dataset = load_dataset("huggingartists/kanye-west")
46
  ```
47
 
48
- [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/1nriijf4/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on Kanye West's lyrics.
53
 
54
- Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/2qxzz0h9) for full transparency and reproducibility.
55
 
56
- At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/2qxzz0h9/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
 
14
  <div class="inline-flex flex-col" style="line-height: 1.5;">
15
  <div class="flex">
16
  <div
17
+ style="display:DISPLAY_1; margin-left: auto; margin-right: auto; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://images.genius.com/54520386ec39aca6408c7e2c156ae10a.399x399x1.png&#39;)">
18
  </div>
19
  </div>
20
  <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 HuggingArtists Model 🤖</div>
 
45
  dataset = load_dataset("huggingartists/kanye-west")
46
  ```
47
 
48
+ [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/hl7afoso/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on Kanye West's lyrics.
53
 
54
+ Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/28dw8m5v) for full transparency and reproducibility.
55
 
56
+ At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/28dw8m5v/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "gpt2",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
@@ -18,7 +18,9 @@
18
  "n_inner": null,
19
  "n_layer": 12,
20
  "n_positions": 1024,
 
21
  "resid_pdrop": 0.1,
 
22
  "scale_attn_weights": true,
23
  "summary_activation": null,
24
  "summary_first_dropout": 0.1,
@@ -35,7 +37,7 @@
35
  }
36
  },
37
  "torch_dtype": "float32",
38
- "transformers_version": "4.10.2",
39
  "use_cache": true,
40
  "vocab_size": 50257
41
  }
 
1
  {
2
+ "_name_or_path": "kanye-west",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
 
18
  "n_inner": null,
19
  "n_layer": 12,
20
  "n_positions": 1024,
21
+ "reorder_and_upcast_attn": false,
22
  "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
  "scale_attn_weights": true,
25
  "summary_activation": null,
26
  "summary_first_dropout": 0.1,
 
37
  }
38
  },
39
  "torch_dtype": "float32",
40
+ "transformers_version": "4.18.0",
41
  "use_cache": true,
42
  "vocab_size": 50257
43
  }
evaluation.txt CHANGED
@@ -1 +1 @@
1
- {"eval_loss": 3.426886558532715, "eval_runtime": 40.4126, "eval_samples_per_second": 22.072, "eval_steps_per_second": 2.771, "epoch": 1.0}
 
1
+ {"eval_loss": 3.2069807052612305, "eval_runtime": 34.4687, "eval_samples_per_second": 21.962, "eval_steps_per_second": 2.756, "epoch": 2.0}
flax_model.msgpack CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c397b32109b8c9119efbf9198333a04c7d3dc1d3635998f782cd23b936c4abe
3
  size 497764120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bf8c7007d52f47daeafd98497e3a4c6832019a218b639c8f0e7609e3d90b353
3
  size 497764120
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5bc79c6c9f8fedb48a4ff5a95bad64b932f7dd5ff39c66b2aa3cfabfab436064
3
  size 995604017
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9eceea64cd79fc507b601b5ccae1d6b7309b165ac6f5f2a9d5572ca0877744e4
3
  size 995604017
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3301b1feedf6fe9d822b04f8e9f8ed43f82e257ba739185c7687f7fe4b89008c
3
- size 510403817
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78144d9127a16c2e4fa3aadd6ebdbb582a261d7d8a5b5c8f9be1f4321d1f3e2e
3
+ size 510396521
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4bab3cfadfa486b6524b9320b4f4edb68d19a2ba31da2016e6b8447dd915fd53
3
  size 14567
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7663ef0ec7d9ce8e838550f0140c91174e7ec49d2a654580029e52bb9e272ceb
3
  size 14567
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:832488a71ca6e750947bd414fedb36c21eabb4cec011e1c50883c6f2174b85ee
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2ed597f7451cce52cd85d18bcb6f126f3d81b67d00e50637f296c1ac4267d42
3
  size 623
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "gpt2", "tokenizer_class": "GPT2Tokenizer"}
 
1
+ {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "huggingartists/kanye-west", "tokenizer_class": "GPT2Tokenizer"}
trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "best_metric": 3.426886558532715,
3
- "best_model_checkpoint": "output/kanye-west/checkpoint-532",
4
- "epoch": 1.0,
5
- "global_step": 532,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -650,11 +650,705 @@
650
  "eval_samples_per_second": 22.04,
651
  "eval_steps_per_second": 2.767,
652
  "step": 532
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
653
  }
654
  ],
655
- "max_steps": 532,
656
- "num_train_epochs": 1,
657
- "total_flos": 555637506048000.0,
658
  "trial_name": null,
659
  "trial_params": null
660
  }
 
1
  {
2
+ "best_metric": 3.2069807052612305,
3
+ "best_model_checkpoint": "output/kanye-west/checkpoint-1098",
4
+ "epoch": 2.0,
5
+ "global_step": 1098,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
650
  "eval_samples_per_second": 22.04,
651
  "eval_steps_per_second": 2.767,
652
  "step": 532
653
+ },
654
+ {
655
+ "epoch": 0.97,
656
+ "learning_rate": 2.200254683906266e-07,
657
+ "loss": 3.4087,
658
+ "step": 535
659
+ },
660
+ {
661
+ "epoch": 0.98,
662
+ "learning_rate": 9.09574339006257e-08,
663
+ "loss": 3.3394,
664
+ "step": 540
665
+ },
666
+ {
667
+ "epoch": 0.99,
668
+ "learning_rate": 1.797008773112274e-08,
669
+ "loss": 3.413,
670
+ "step": 545
671
+ },
672
+ {
673
+ "epoch": 1.0,
674
+ "eval_loss": 3.219294786453247,
675
+ "eval_runtime": 34.625,
676
+ "eval_samples_per_second": 21.863,
677
+ "eval_steps_per_second": 2.744,
678
+ "step": 549
679
+ },
680
+ {
681
+ "epoch": 1.0,
682
+ "learning_rate": 1.123176456552244e-09,
683
+ "loss": 3.4155,
684
+ "step": 550
685
+ },
686
+ {
687
+ "epoch": 1.01,
688
+ "learning_rate": 4.0430490758340044e-08,
689
+ "loss": 3.3359,
690
+ "step": 555
691
+ },
692
+ {
693
+ "epoch": 1.02,
694
+ "learning_rate": 1.3585985413598608e-07,
695
+ "loss": 3.4372,
696
+ "step": 560
697
+ },
698
+ {
699
+ "epoch": 1.03,
700
+ "learning_rate": 2.8733314924638363e-07,
701
+ "loss": 3.5177,
702
+ "step": 565
703
+ },
704
+ {
705
+ "epoch": 1.04,
706
+ "learning_rate": 4.947263818497525e-07,
707
+ "loss": 3.3014,
708
+ "step": 570
709
+ },
710
+ {
711
+ "epoch": 1.05,
712
+ "learning_rate": 7.578697823098032e-07,
713
+ "loss": 3.4521,
714
+ "step": 575
715
+ },
716
+ {
717
+ "epoch": 1.06,
718
+ "learning_rate": 1.0765479445652215e-06,
719
+ "loss": 3.263,
720
+ "step": 580
721
+ },
722
+ {
723
+ "epoch": 1.07,
724
+ "learning_rate": 1.4505000024584544e-06,
725
+ "loss": 3.3575,
726
+ "step": 585
727
+ },
728
+ {
729
+ "epoch": 1.07,
730
+ "learning_rate": 1.8794198432776367e-06,
731
+ "loss": 3.5385,
732
+ "step": 590
733
+ },
734
+ {
735
+ "epoch": 1.08,
736
+ "learning_rate": 2.362956358336902e-06,
737
+ "loss": 3.2621,
738
+ "step": 595
739
+ },
740
+ {
741
+ "epoch": 1.09,
742
+ "learning_rate": 2.900713730389673e-06,
743
+ "loss": 3.3471,
744
+ "step": 600
745
+ },
746
+ {
747
+ "epoch": 1.1,
748
+ "learning_rate": 3.4922517576400357e-06,
749
+ "loss": 3.3317,
750
+ "step": 605
751
+ },
752
+ {
753
+ "epoch": 1.11,
754
+ "learning_rate": 4.137086214086682e-06,
755
+ "loss": 3.3101,
756
+ "step": 610
757
+ },
758
+ {
759
+ "epoch": 1.12,
760
+ "learning_rate": 4.834689245904529e-06,
761
+ "loss": 3.5044,
762
+ "step": 615
763
+ },
764
+ {
765
+ "epoch": 1.13,
766
+ "learning_rate": 5.584489803539656e-06,
767
+ "loss": 3.2145,
768
+ "step": 620
769
+ },
770
+ {
771
+ "epoch": 1.14,
772
+ "learning_rate": 6.385874109163592e-06,
773
+ "loss": 3.088,
774
+ "step": 625
775
+ },
776
+ {
777
+ "epoch": 1.15,
778
+ "learning_rate": 7.238186159104615e-06,
779
+ "loss": 3.2756,
780
+ "step": 630
781
+ },
782
+ {
783
+ "epoch": 1.16,
784
+ "learning_rate": 8.140728260844604e-06,
785
+ "loss": 3.3539,
786
+ "step": 635
787
+ },
788
+ {
789
+ "epoch": 1.17,
790
+ "learning_rate": 9.09276160414173e-06,
791
+ "loss": 3.3302,
792
+ "step": 640
793
+ },
794
+ {
795
+ "epoch": 1.17,
796
+ "learning_rate": 1.0093506865811846e-05,
797
+ "loss": 3.3183,
798
+ "step": 645
799
+ },
800
+ {
801
+ "epoch": 1.18,
802
+ "learning_rate": 1.1142144847673131e-05,
803
+ "loss": 3.3733,
804
+ "step": 650
805
+ },
806
+ {
807
+ "epoch": 1.19,
808
+ "learning_rate": 1.2237817147132003e-05,
809
+ "loss": 3.3992,
810
+ "step": 655
811
+ },
812
+ {
813
+ "epoch": 1.2,
814
+ "learning_rate": 1.3379626859861349e-05,
815
+ "loss": 3.4621,
816
+ "step": 660
817
+ },
818
+ {
819
+ "epoch": 1.21,
820
+ "learning_rate": 1.4566639313995615e-05,
821
+ "loss": 3.4193,
822
+ "step": 665
823
+ },
824
+ {
825
+ "epoch": 1.22,
826
+ "learning_rate": 1.5797882835242138e-05,
827
+ "loss": 3.4483,
828
+ "step": 670
829
+ },
830
+ {
831
+ "epoch": 1.23,
832
+ "learning_rate": 1.7072349542282338e-05,
833
+ "loss": 3.1321,
834
+ "step": 675
835
+ },
836
+ {
837
+ "epoch": 1.24,
838
+ "learning_rate": 1.8388996171811105e-05,
839
+ "loss": 3.5093,
840
+ "step": 680
841
+ },
842
+ {
843
+ "epoch": 1.25,
844
+ "learning_rate": 1.9746744932540055e-05,
845
+ "loss": 3.3516,
846
+ "step": 685
847
+ },
848
+ {
849
+ "epoch": 1.26,
850
+ "learning_rate": 2.1144484387464373e-05,
851
+ "loss": 3.3896,
852
+ "step": 690
853
+ },
854
+ {
855
+ "epoch": 1.27,
856
+ "learning_rate": 2.258107036367215e-05,
857
+ "loss": 3.4337,
858
+ "step": 695
859
+ },
860
+ {
861
+ "epoch": 1.28,
862
+ "learning_rate": 2.4055326888950756e-05,
863
+ "loss": 3.3167,
864
+ "step": 700
865
+ },
866
+ {
867
+ "epoch": 1.28,
868
+ "learning_rate": 2.5566047154423428e-05,
869
+ "loss": 3.3134,
870
+ "step": 705
871
+ },
872
+ {
873
+ "epoch": 1.29,
874
+ "learning_rate": 2.711199450242914e-05,
875
+ "loss": 3.2474,
876
+ "step": 710
877
+ },
878
+ {
879
+ "epoch": 1.3,
880
+ "learning_rate": 2.8691903438835377e-05,
881
+ "loss": 3.234,
882
+ "step": 715
883
+ },
884
+ {
885
+ "epoch": 1.31,
886
+ "learning_rate": 3.030448066895703e-05,
887
+ "loss": 3.3668,
888
+ "step": 720
889
+ },
890
+ {
891
+ "epoch": 1.32,
892
+ "learning_rate": 3.194840615623184e-05,
893
+ "loss": 3.4867,
894
+ "step": 725
895
+ },
896
+ {
897
+ "epoch": 1.33,
898
+ "learning_rate": 3.36223342027866e-05,
899
+ "loss": 3.3089,
900
+ "step": 730
901
+ },
902
+ {
903
+ "epoch": 1.34,
904
+ "learning_rate": 3.53248945510095e-05,
905
+ "loss": 3.273,
906
+ "step": 735
907
+ },
908
+ {
909
+ "epoch": 1.35,
910
+ "learning_rate": 3.7054693505226934e-05,
911
+ "loss": 3.3763,
912
+ "step": 740
913
+ },
914
+ {
915
+ "epoch": 1.36,
916
+ "learning_rate": 3.8810315072565846e-05,
917
+ "loss": 3.4708,
918
+ "step": 745
919
+ },
920
+ {
921
+ "epoch": 1.37,
922
+ "learning_rate": 4.0590322122068896e-05,
923
+ "loss": 3.3732,
924
+ "step": 750
925
+ },
926
+ {
927
+ "epoch": 1.38,
928
+ "learning_rate": 4.239325756111289e-05,
929
+ "loss": 3.3236,
930
+ "step": 755
931
+ },
932
+ {
933
+ "epoch": 1.38,
934
+ "learning_rate": 4.421764552816718e-05,
935
+ "loss": 3.3189,
936
+ "step": 760
937
+ },
938
+ {
939
+ "epoch": 1.39,
940
+ "learning_rate": 4.6061992600916735e-05,
941
+ "loss": 3.3587,
942
+ "step": 765
943
+ },
944
+ {
945
+ "epoch": 1.4,
946
+ "learning_rate": 4.792478901875958e-05,
947
+ "loss": 3.3553,
948
+ "step": 770
949
+ },
950
+ {
951
+ "epoch": 1.41,
952
+ "learning_rate": 4.980450991867903e-05,
953
+ "loss": 3.3437,
954
+ "step": 775
955
+ },
956
+ {
957
+ "epoch": 1.42,
958
+ "learning_rate": 5.1699616583478625e-05,
959
+ "loss": 3.113,
960
+ "step": 780
961
+ },
962
+ {
963
+ "epoch": 1.43,
964
+ "learning_rate": 5.360855770135732e-05,
965
+ "loss": 3.5307,
966
+ "step": 785
967
+ },
968
+ {
969
+ "epoch": 1.44,
970
+ "learning_rate": 5.5529770635795374e-05,
971
+ "loss": 3.277,
972
+ "step": 790
973
+ },
974
+ {
975
+ "epoch": 1.45,
976
+ "learning_rate": 5.746168270470958e-05,
977
+ "loss": 3.4986,
978
+ "step": 795
979
+ },
980
+ {
981
+ "epoch": 1.46,
982
+ "learning_rate": 5.940271246783254e-05,
983
+ "loss": 3.0947,
984
+ "step": 800
985
+ },
986
+ {
987
+ "epoch": 1.47,
988
+ "learning_rate": 6.13512710212607e-05,
989
+ "loss": 3.453,
990
+ "step": 805
991
+ },
992
+ {
993
+ "epoch": 1.48,
994
+ "learning_rate": 6.330576329811206e-05,
995
+ "loss": 3.2989,
996
+ "step": 810
997
+ },
998
+ {
999
+ "epoch": 1.48,
1000
+ "learning_rate": 6.526458937422915e-05,
1001
+ "loss": 3.2466,
1002
+ "step": 815
1003
+ },
1004
+ {
1005
+ "epoch": 1.49,
1006
+ "learning_rate": 6.722614577785811e-05,
1007
+ "loss": 3.3078,
1008
+ "step": 820
1009
+ },
1010
+ {
1011
+ "epoch": 1.5,
1012
+ "learning_rate": 6.918882680223113e-05,
1013
+ "loss": 3.3115,
1014
+ "step": 825
1015
+ },
1016
+ {
1017
+ "epoch": 1.51,
1018
+ "learning_rate": 7.115102581997948e-05,
1019
+ "loss": 3.401,
1020
+ "step": 830
1021
+ },
1022
+ {
1023
+ "epoch": 1.52,
1024
+ "learning_rate": 7.311113659829913e-05,
1025
+ "loss": 3.388,
1026
+ "step": 835
1027
+ },
1028
+ {
1029
+ "epoch": 1.53,
1030
+ "learning_rate": 7.506755461379409e-05,
1031
+ "loss": 3.1893,
1032
+ "step": 840
1033
+ },
1034
+ {
1035
+ "epoch": 1.54,
1036
+ "learning_rate": 7.701867836592025e-05,
1037
+ "loss": 3.1964,
1038
+ "step": 845
1039
+ },
1040
+ {
1041
+ "epoch": 1.55,
1042
+ "learning_rate": 7.896291068795451e-05,
1043
+ "loss": 3.1817,
1044
+ "step": 850
1045
+ },
1046
+ {
1047
+ "epoch": 1.56,
1048
+ "learning_rate": 8.089866005441645e-05,
1049
+ "loss": 3.4222,
1050
+ "step": 855
1051
+ },
1052
+ {
1053
+ "epoch": 1.57,
1054
+ "learning_rate": 8.28243418838726e-05,
1055
+ "loss": 3.3935,
1056
+ "step": 860
1057
+ },
1058
+ {
1059
+ "epoch": 1.58,
1060
+ "learning_rate": 8.473837983605534e-05,
1061
+ "loss": 3.317,
1062
+ "step": 865
1063
+ },
1064
+ {
1065
+ "epoch": 1.58,
1066
+ "learning_rate": 8.663920710223691e-05,
1067
+ "loss": 3.3184,
1068
+ "step": 870
1069
+ },
1070
+ {
1071
+ "epoch": 1.59,
1072
+ "learning_rate": 8.852526768780001e-05,
1073
+ "loss": 3.3909,
1074
+ "step": 875
1075
+ },
1076
+ {
1077
+ "epoch": 1.6,
1078
+ "learning_rate": 9.039501768595671e-05,
1079
+ "loss": 3.3281,
1080
+ "step": 880
1081
+ },
1082
+ {
1083
+ "epoch": 1.61,
1084
+ "learning_rate": 9.224692654157222e-05,
1085
+ "loss": 3.4081,
1086
+ "step": 885
1087
+ },
1088
+ {
1089
+ "epoch": 1.62,
1090
+ "learning_rate": 9.407947830405896e-05,
1091
+ "loss": 3.3443,
1092
+ "step": 890
1093
+ },
1094
+ {
1095
+ "epoch": 1.63,
1096
+ "learning_rate": 9.589117286831579e-05,
1097
+ "loss": 3.2831,
1098
+ "step": 895
1099
+ },
1100
+ {
1101
+ "epoch": 1.64,
1102
+ "learning_rate": 9.768052720269644e-05,
1103
+ "loss": 3.3635,
1104
+ "step": 900
1105
+ },
1106
+ {
1107
+ "epoch": 1.65,
1108
+ "learning_rate": 9.944607656300112e-05,
1109
+ "loss": 3.3023,
1110
+ "step": 905
1111
+ },
1112
+ {
1113
+ "epoch": 1.66,
1114
+ "learning_rate": 0.0001011863756914993,
1115
+ "loss": 3.3958,
1116
+ "step": 910
1117
+ },
1118
+ {
1119
+ "epoch": 1.67,
1120
+ "learning_rate": 0.00010290000000000001,
1121
+ "loss": 3.439,
1122
+ "step": 915
1123
+ },
1124
+ {
1125
+ "epoch": 1.68,
1126
+ "learning_rate": 0.00010458554673600303,
1127
+ "loss": 3.3205,
1128
+ "step": 920
1129
+ },
1130
+ {
1131
+ "epoch": 1.68,
1132
+ "learning_rate": 0.00010624163613097559,
1133
+ "loss": 3.3548,
1134
+ "step": 925
1135
+ },
1136
+ {
1137
+ "epoch": 1.69,
1138
+ "learning_rate": 0.00010786691252981412,
1139
+ "loss": 3.402,
1140
+ "step": 930
1141
+ },
1142
+ {
1143
+ "epoch": 1.7,
1144
+ "learning_rate": 0.00010946004550056765,
1145
+ "loss": 3.1367,
1146
+ "step": 935
1147
+ },
1148
+ {
1149
+ "epoch": 1.71,
1150
+ "learning_rate": 0.00011101973092351368,
1151
+ "loss": 3.3214,
1152
+ "step": 940
1153
+ },
1154
+ {
1155
+ "epoch": 1.72,
1156
+ "learning_rate": 0.0001125446920586948,
1157
+ "loss": 3.4555,
1158
+ "step": 945
1159
+ },
1160
+ {
1161
+ "epoch": 1.73,
1162
+ "learning_rate": 0.0001140336805910432,
1163
+ "loss": 3.1367,
1164
+ "step": 950
1165
+ },
1166
+ {
1167
+ "epoch": 1.74,
1168
+ "learning_rate": 0.00011548547765223617,
1169
+ "loss": 3.391,
1170
+ "step": 955
1171
+ },
1172
+ {
1173
+ "epoch": 1.75,
1174
+ "learning_rate": 0.00011689889481844766,
1175
+ "loss": 3.0626,
1176
+ "step": 960
1177
+ },
1178
+ {
1179
+ "epoch": 1.76,
1180
+ "learning_rate": 0.00011827277508317747,
1181
+ "loss": 3.4176,
1182
+ "step": 965
1183
+ },
1184
+ {
1185
+ "epoch": 1.77,
1186
+ "learning_rate": 0.00011960599380436336,
1187
+ "loss": 3.2121,
1188
+ "step": 970
1189
+ },
1190
+ {
1191
+ "epoch": 1.78,
1192
+ "learning_rate": 0.00012089745962499954,
1193
+ "loss": 3.231,
1194
+ "step": 975
1195
+ },
1196
+ {
1197
+ "epoch": 1.79,
1198
+ "learning_rate": 0.00012214611536650856,
1199
+ "loss": 3.6329,
1200
+ "step": 980
1201
+ },
1202
+ {
1203
+ "epoch": 1.79,
1204
+ "learning_rate": 0.000123350938894135,
1205
+ "loss": 3.1573,
1206
+ "step": 985
1207
+ },
1208
+ {
1209
+ "epoch": 1.8,
1210
+ "learning_rate": 0.00012451094395365327,
1211
+ "loss": 3.3721,
1212
+ "step": 990
1213
+ },
1214
+ {
1215
+ "epoch": 1.81,
1216
+ "learning_rate": 0.00012562518097870298,
1217
+ "loss": 3.3447,
1218
+ "step": 995
1219
+ },
1220
+ {
1221
+ "epoch": 1.82,
1222
+ "learning_rate": 0.0001266927378680932,
1223
+ "loss": 3.3603,
1224
+ "step": 1000
1225
+ },
1226
+ {
1227
+ "epoch": 1.83,
1228
+ "learning_rate": 0.00012771274073243748,
1229
+ "loss": 3.1145,
1230
+ "step": 1005
1231
+ },
1232
+ {
1233
+ "epoch": 1.84,
1234
+ "learning_rate": 0.00012868435460950916,
1235
+ "loss": 3.2607,
1236
+ "step": 1010
1237
+ },
1238
+ {
1239
+ "epoch": 1.85,
1240
+ "learning_rate": 0.00012960678414773185,
1241
+ "loss": 3.3824,
1242
+ "step": 1015
1243
+ },
1244
+ {
1245
+ "epoch": 1.86,
1246
+ "learning_rate": 0.00013047927425724446,
1247
+ "loss": 3.4136,
1248
+ "step": 1020
1249
+ },
1250
+ {
1251
+ "epoch": 1.87,
1252
+ "learning_rate": 0.000131301110728009,
1253
+ "loss": 3.386,
1254
+ "step": 1025
1255
+ },
1256
+ {
1257
+ "epoch": 1.88,
1258
+ "learning_rate": 0.00013207162081445436,
1259
+ "loss": 3.2408,
1260
+ "step": 1030
1261
+ },
1262
+ {
1263
+ "epoch": 1.89,
1264
+ "learning_rate": 0.00013279017378617784,
1265
+ "loss": 3.2928,
1266
+ "step": 1035
1267
+ },
1268
+ {
1269
+ "epoch": 1.89,
1270
+ "learning_rate": 0.00013345618144425364,
1271
+ "loss": 3.16,
1272
+ "step": 1040
1273
+ },
1274
+ {
1275
+ "epoch": 1.9,
1276
+ "learning_rate": 0.00013406909860272533,
1277
+ "loss": 3.444,
1278
+ "step": 1045
1279
+ },
1280
+ {
1281
+ "epoch": 1.91,
1282
+ "learning_rate": 0.00013462842353488877,
1283
+ "loss": 3.5022,
1284
+ "step": 1050
1285
+ },
1286
+ {
1287
+ "epoch": 1.92,
1288
+ "learning_rate": 0.0001351336983839996,
1289
+ "loss": 3.4006,
1290
+ "step": 1055
1291
+ },
1292
+ {
1293
+ "epoch": 1.93,
1294
+ "learning_rate": 0.00013558450953806924,
1295
+ "loss": 3.436,
1296
+ "step": 1060
1297
+ },
1298
+ {
1299
+ "epoch": 1.94,
1300
+ "learning_rate": 0.00013598048796844284,
1301
+ "loss": 3.3172,
1302
+ "step": 1065
1303
+ },
1304
+ {
1305
+ "epoch": 1.95,
1306
+ "learning_rate": 0.0001363213095318818,
1307
+ "loss": 3.4138,
1308
+ "step": 1070
1309
+ },
1310
+ {
1311
+ "epoch": 1.96,
1312
+ "learning_rate": 0.00013660669523590338,
1313
+ "loss": 3.3525,
1314
+ "step": 1075
1315
+ },
1316
+ {
1317
+ "epoch": 1.97,
1318
+ "learning_rate": 0.00013683641146716082,
1319
+ "loss": 3.5252,
1320
+ "step": 1080
1321
+ },
1322
+ {
1323
+ "epoch": 1.98,
1324
+ "learning_rate": 0.00013701027018267633,
1325
+ "loss": 3.264,
1326
+ "step": 1085
1327
+ },
1328
+ {
1329
+ "epoch": 1.99,
1330
+ "learning_rate": 0.00013712812906377096,
1331
+ "loss": 3.262,
1332
+ "step": 1090
1333
+ },
1334
+ {
1335
+ "epoch": 1.99,
1336
+ "learning_rate": 0.00013718989163256484,
1337
+ "loss": 3.1863,
1338
+ "step": 1095
1339
+ },
1340
+ {
1341
+ "epoch": 2.0,
1342
+ "eval_loss": 3.2069807052612305,
1343
+ "eval_runtime": 34.3846,
1344
+ "eval_samples_per_second": 22.016,
1345
+ "eval_steps_per_second": 2.763,
1346
+ "step": 1098
1347
  }
1348
  ],
1349
+ "max_steps": 1098,
1350
+ "num_train_epochs": 2,
1351
+ "total_flos": 1146157498368000.0,
1352
  "trial_name": null,
1353
  "trial_params": null
1354
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:11e4834c8ab0301a41d1c08550c723db54a908586d28b697502acc43205b130e
3
- size 2671
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93bde65575ca16798c05d90b196d6e2572649372b8eaf2784f4f116cb2de0f1f
3
+ size 3055