besimray commited on
Commit
e47c6f3
·
verified ·
1 Parent(s): 2bab56d

Training in progress, step 230, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1687d42b6c8fc20fba69955ada5c36a016df8b97c35e2197e9cefb81565a448
3
  size 125048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28ca0f5984758b0a6bc98d2b9fa3a918322dfc9def650fc2cedeb7b80033ea95
3
  size 125048
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a11756c6f7f6e54186c62470b3c672e88950e155939dddf52de8c5061bb695f5
3
  size 162868
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:206af652e7f3377e6c0aecfa1c36bcdf508e6b891b0a50e079a3425838b06728
3
  size 162868
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d87c59ed120a2ee66d7021d83c6d58b1559824c8ca94cb61ee05deb92e21229f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c41bdc40b014e3f55ca8563ea9de673fa395c6cc93a6cb821cfa7f874d5565c3
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ce5266a3e298d4cec8a8c6eda5afc2b0bfc8277ce078fdf7b96388fe070633d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95105769f4fba521c26686a89d84be3ea680f62dc39a9c9254e214e0a5a6bc5e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 11.021401405334473,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-220",
4
- "epoch": 0.009943278118008632,
5
  "eval_steps": 5,
6
- "global_step": 220,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1907,6 +1907,92 @@
1907
  "eval_samples_per_second": 52.85,
1908
  "eval_steps_per_second": 26.428,
1909
  "step": 220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1910
  }
1911
  ],
1912
  "logging_steps": 1,
@@ -1935,7 +2021,7 @@
1935
  "attributes": {}
1936
  }
1937
  },
1938
- "total_flos": 2311480934400.0,
1939
  "train_batch_size": 2,
1940
  "trial_name": null,
1941
  "trial_params": null
 
1
  {
2
+ "best_metric": 11.021036148071289,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-230",
4
+ "epoch": 0.010395245305190844,
5
  "eval_steps": 5,
6
+ "global_step": 230,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1907
  "eval_samples_per_second": 52.85,
1908
  "eval_steps_per_second": 26.428,
1909
  "step": 220
1910
+ },
1911
+ {
1912
+ "epoch": 0.009988474836726853,
1913
+ "grad_norm": 0.5452781915664673,
1914
+ "learning_rate": 0.00012162657372432836,
1915
+ "loss": 44.0602,
1916
+ "step": 221
1917
+ },
1918
+ {
1919
+ "epoch": 0.010033671555445075,
1920
+ "grad_norm": 0.5344114303588867,
1921
+ "learning_rate": 0.00012100016506928493,
1922
+ "loss": 44.045,
1923
+ "step": 222
1924
+ },
1925
+ {
1926
+ "epoch": 0.010078868274163296,
1927
+ "grad_norm": 0.4083841145038605,
1928
+ "learning_rate": 0.00012037289317970757,
1929
+ "loss": 44.0642,
1930
+ "step": 223
1931
+ },
1932
+ {
1933
+ "epoch": 0.010124064992881517,
1934
+ "grad_norm": 0.4382067918777466,
1935
+ "learning_rate": 0.00011974478384028672,
1936
+ "loss": 44.0648,
1937
+ "step": 224
1938
+ },
1939
+ {
1940
+ "epoch": 0.010169261711599738,
1941
+ "grad_norm": 0.42340517044067383,
1942
+ "learning_rate": 0.00011911586287013725,
1943
+ "loss": 44.1315,
1944
+ "step": 225
1945
+ },
1946
+ {
1947
+ "epoch": 0.010169261711599738,
1948
+ "eval_loss": 11.021224975585938,
1949
+ "eval_runtime": 176.0622,
1950
+ "eval_samples_per_second": 52.919,
1951
+ "eval_steps_per_second": 26.462,
1952
+ "step": 225
1953
+ },
1954
+ {
1955
+ "epoch": 0.01021445843031796,
1956
+ "grad_norm": 0.5047578811645508,
1957
+ "learning_rate": 0.00011848615612173688,
1958
+ "loss": 44.123,
1959
+ "step": 226
1960
+ },
1961
+ {
1962
+ "epoch": 0.01025965514903618,
1963
+ "grad_norm": 0.5647579431533813,
1964
+ "learning_rate": 0.00011785568947986367,
1965
+ "loss": 44.0525,
1966
+ "step": 227
1967
+ },
1968
+ {
1969
+ "epoch": 0.010304851867754402,
1970
+ "grad_norm": 0.48243632912635803,
1971
+ "learning_rate": 0.0001172244888605319,
1972
+ "loss": 44.1143,
1973
+ "step": 228
1974
+ },
1975
+ {
1976
+ "epoch": 0.010350048586472623,
1977
+ "grad_norm": 0.5492759943008423,
1978
+ "learning_rate": 0.0001165925802099268,
1979
+ "loss": 44.0494,
1980
+ "step": 229
1981
+ },
1982
+ {
1983
+ "epoch": 0.010395245305190844,
1984
+ "grad_norm": 0.5804261565208435,
1985
+ "learning_rate": 0.00011595998950333793,
1986
+ "loss": 44.0785,
1987
+ "step": 230
1988
+ },
1989
+ {
1990
+ "epoch": 0.010395245305190844,
1991
+ "eval_loss": 11.021036148071289,
1992
+ "eval_runtime": 176.2523,
1993
+ "eval_samples_per_second": 52.862,
1994
+ "eval_steps_per_second": 26.434,
1995
+ "step": 230
1996
  }
1997
  ],
1998
  "logging_steps": 1,
 
2021
  "attributes": {}
2022
  }
2023
  },
2024
+ "total_flos": 2416548249600.0,
2025
  "train_batch_size": 2,
2026
  "trial_name": null,
2027
  "trial_params": null