ncbateman commited on
Commit
12c3ac8
1 Parent(s): 0df65fa

Training in progress, step 280, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:05a0a61b6f10e0b663c7393b8364bbe7a737d8c565430ce2d68b95474c31c1a5
3
  size 97307544
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:766bc29da4dd8f6ea69d080821e925bf3230dbf524883a26bacdbe8e6bdd7a55
3
  size 97307544
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7137e1d123529aa3be36b999601ee44510cfd22a08bdd5f889d472bd3c96a8c5
3
  size 49846644
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7074266859ed51b765dcb9e59295560b54f7517b3b32919d5126064139e6615c
3
  size 49846644
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b7e0c0119a97b0d02303a46cafb008390c13bc174db8f9999aab74f3b0c0f3f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7be84e9390ed163408a0abac028539ac68753c7bc3079d2a87d83274ace9ae69
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:321b80874f1460360522a6113a37b793358a0a363cc85543d6a6f1b21f904a79
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f975d6c1fc2270e579841b403d67bdb756eda7f69f91461b9a04eabdad070c25
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.35587188612099646,
5
  "eval_steps": 386,
6
- "global_step": 275,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1940,6 +1940,41 @@
1940
  "learning_rate": 9.91419575609712e-05,
1941
  "loss": 0.958,
1942
  "step": 275
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1943
  }
1944
  ],
1945
  "logging_steps": 1,
@@ -1959,7 +1994,7 @@
1959
  "attributes": {}
1960
  }
1961
  },
1962
- "total_flos": 3.074327222157312e+17,
1963
  "train_batch_size": 4,
1964
  "trial_name": null,
1965
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.3623422840504691,
5
  "eval_steps": 386,
6
+ "global_step": 280,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1940
  "learning_rate": 9.91419575609712e-05,
1941
  "loss": 0.958,
1942
  "step": 275
1943
+ },
1944
+ {
1945
+ "epoch": 0.35716596570689096,
1946
+ "grad_norm": 0.8483523726463318,
1947
+ "learning_rate": 9.913433570160469e-05,
1948
+ "loss": 0.905,
1949
+ "step": 276
1950
+ },
1951
+ {
1952
+ "epoch": 0.3584600452927855,
1953
+ "grad_norm": 0.8082010746002197,
1954
+ "learning_rate": 9.912668043543964e-05,
1955
+ "loss": 0.8633,
1956
+ "step": 277
1957
+ },
1958
+ {
1959
+ "epoch": 0.35975412487868,
1960
+ "grad_norm": 0.8654133677482605,
1961
+ "learning_rate": 9.911899176768091e-05,
1962
+ "loss": 0.8465,
1963
+ "step": 278
1964
+ },
1965
+ {
1966
+ "epoch": 0.3610482044645746,
1967
+ "grad_norm": 0.8709694743156433,
1968
+ "learning_rate": 9.911126970355609e-05,
1969
+ "loss": 0.9286,
1970
+ "step": 279
1971
+ },
1972
+ {
1973
+ "epoch": 0.3623422840504691,
1974
+ "grad_norm": 0.8783992528915405,
1975
+ "learning_rate": 9.910351424831546e-05,
1976
+ "loss": 0.9349,
1977
+ "step": 280
1978
  }
1979
  ],
1980
  "logging_steps": 1,
 
1994
  "attributes": {}
1995
  }
1996
  },
1997
+ "total_flos": 3.1302240807419904e+17,
1998
  "train_batch_size": 4,
1999
  "trial_name": null,
2000
  "trial_params": null