Training in progress, step 400, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 201353800
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1dc391235528041d2fc64c517de0de5a35d6e73291b9575d80be3c327678412e
|
3 |
size 201353800
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 102463162
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b759274f0d85a5e93ff596ccde0cd1ab9ef3d3a503db9c3340bcdec7982c30b6
|
3 |
size 102463162
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ea1cdf60dba2aa43fbb9730847255fb9348f31329cac03f340a7414262a148e2
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ce6af8c2b755ab309a73060b69c0ffea28d5cf03ddedb3dc24064e45dedc183e
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": 0.008372417651116848,
|
3 |
"best_model_checkpoint": "miner_id_24/checkpoint-200",
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 100,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -2139,6 +2139,714 @@
|
|
2139 |
"eval_samples_per_second": 6.896,
|
2140 |
"eval_steps_per_second": 1.726,
|
2141 |
"step": 300
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2142 |
}
|
2143 |
],
|
2144 |
"logging_steps": 1,
|
@@ -2153,7 +2861,7 @@
|
|
2153 |
"early_stopping_threshold": 0.0
|
2154 |
},
|
2155 |
"attributes": {
|
2156 |
-
"early_stopping_patience_counter":
|
2157 |
}
|
2158 |
},
|
2159 |
"TrainerControl": {
|
@@ -2162,12 +2870,12 @@
|
|
2162 |
"should_evaluate": false,
|
2163 |
"should_log": false,
|
2164 |
"should_save": true,
|
2165 |
-
"should_training_stop":
|
2166 |
},
|
2167 |
"attributes": {}
|
2168 |
}
|
2169 |
},
|
2170 |
-
"total_flos": 1.
|
2171 |
"train_batch_size": 4,
|
2172 |
"trial_name": null,
|
2173 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": 0.008372417651116848,
|
3 |
"best_model_checkpoint": "miner_id_24/checkpoint-200",
|
4 |
+
"epoch": 0.3273657289002558,
|
5 |
"eval_steps": 100,
|
6 |
+
"global_step": 400,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
2139 |
"eval_samples_per_second": 6.896,
|
2140 |
"eval_steps_per_second": 1.726,
|
2141 |
"step": 300
|
2142 |
+
},
|
2143 |
+
{
|
2144 |
+
"epoch": 0.24634271099744245,
|
2145 |
+
"grad_norm": 1.1538922786712646,
|
2146 |
+
"learning_rate": 0.000193028873651704,
|
2147 |
+
"loss": 0.0128,
|
2148 |
+
"step": 301
|
2149 |
+
},
|
2150 |
+
{
|
2151 |
+
"epoch": 0.2471611253196931,
|
2152 |
+
"grad_norm": 1.4959121942520142,
|
2153 |
+
"learning_rate": 0.000192981449234947,
|
2154 |
+
"loss": 0.0151,
|
2155 |
+
"step": 302
|
2156 |
+
},
|
2157 |
+
{
|
2158 |
+
"epoch": 0.24797953964194375,
|
2159 |
+
"grad_norm": 0.7326957583427429,
|
2160 |
+
"learning_rate": 0.00019293386991696648,
|
2161 |
+
"loss": 0.0074,
|
2162 |
+
"step": 303
|
2163 |
+
},
|
2164 |
+
{
|
2165 |
+
"epoch": 0.24879795396419438,
|
2166 |
+
"grad_norm": 5.472940921783447,
|
2167 |
+
"learning_rate": 0.00019288613577702655,
|
2168 |
+
"loss": 0.1211,
|
2169 |
+
"step": 304
|
2170 |
+
},
|
2171 |
+
{
|
2172 |
+
"epoch": 0.249616368286445,
|
2173 |
+
"grad_norm": 2.403130531311035,
|
2174 |
+
"learning_rate": 0.00019283824689464926,
|
2175 |
+
"loss": 0.059,
|
2176 |
+
"step": 305
|
2177 |
+
},
|
2178 |
+
{
|
2179 |
+
"epoch": 0.25043478260869567,
|
2180 |
+
"grad_norm": 4.317528247833252,
|
2181 |
+
"learning_rate": 0.00019279020334961447,
|
2182 |
+
"loss": 0.1937,
|
2183 |
+
"step": 306
|
2184 |
+
},
|
2185 |
+
{
|
2186 |
+
"epoch": 0.2512531969309463,
|
2187 |
+
"grad_norm": 2.334576368331909,
|
2188 |
+
"learning_rate": 0.0001927420052219597,
|
2189 |
+
"loss": 0.1942,
|
2190 |
+
"step": 307
|
2191 |
+
},
|
2192 |
+
{
|
2193 |
+
"epoch": 0.25207161125319694,
|
2194 |
+
"grad_norm": 2.2626686096191406,
|
2195 |
+
"learning_rate": 0.00019269365259198,
|
2196 |
+
"loss": 0.0893,
|
2197 |
+
"step": 308
|
2198 |
+
},
|
2199 |
+
{
|
2200 |
+
"epoch": 0.2528900255754476,
|
2201 |
+
"grad_norm": 1.1748579740524292,
|
2202 |
+
"learning_rate": 0.0001926451455402277,
|
2203 |
+
"loss": 0.011,
|
2204 |
+
"step": 309
|
2205 |
+
},
|
2206 |
+
{
|
2207 |
+
"epoch": 0.2537084398976982,
|
2208 |
+
"grad_norm": 0.6484130620956421,
|
2209 |
+
"learning_rate": 0.00019259648414751265,
|
2210 |
+
"loss": 0.0094,
|
2211 |
+
"step": 310
|
2212 |
+
},
|
2213 |
+
{
|
2214 |
+
"epoch": 0.25452685421994886,
|
2215 |
+
"grad_norm": 0.47816258668899536,
|
2216 |
+
"learning_rate": 0.00019254766849490152,
|
2217 |
+
"loss": 0.0062,
|
2218 |
+
"step": 311
|
2219 |
+
},
|
2220 |
+
{
|
2221 |
+
"epoch": 0.25534526854219947,
|
2222 |
+
"grad_norm": 3.412865161895752,
|
2223 |
+
"learning_rate": 0.00019249869866371817,
|
2224 |
+
"loss": 0.1235,
|
2225 |
+
"step": 312
|
2226 |
+
},
|
2227 |
+
{
|
2228 |
+
"epoch": 0.2561636828644501,
|
2229 |
+
"grad_norm": 12.466361999511719,
|
2230 |
+
"learning_rate": 0.00019244957473554323,
|
2231 |
+
"loss": 0.0131,
|
2232 |
+
"step": 313
|
2233 |
+
},
|
2234 |
+
{
|
2235 |
+
"epoch": 0.2569820971867008,
|
2236 |
+
"grad_norm": 1.225807547569275,
|
2237 |
+
"learning_rate": 0.00019240029679221408,
|
2238 |
+
"loss": 0.0215,
|
2239 |
+
"step": 314
|
2240 |
+
},
|
2241 |
+
{
|
2242 |
+
"epoch": 0.2578005115089514,
|
2243 |
+
"grad_norm": 0.0476732961833477,
|
2244 |
+
"learning_rate": 0.00019235086491582463,
|
2245 |
+
"loss": 0.0017,
|
2246 |
+
"step": 315
|
2247 |
+
},
|
2248 |
+
{
|
2249 |
+
"epoch": 0.25861892583120205,
|
2250 |
+
"grad_norm": 4.701657772064209,
|
2251 |
+
"learning_rate": 0.0001923012791887253,
|
2252 |
+
"loss": 0.0362,
|
2253 |
+
"step": 316
|
2254 |
+
},
|
2255 |
+
{
|
2256 |
+
"epoch": 0.2594373401534527,
|
2257 |
+
"grad_norm": 18.314184188842773,
|
2258 |
+
"learning_rate": 0.00019225153969352275,
|
2259 |
+
"loss": 0.4503,
|
2260 |
+
"step": 317
|
2261 |
+
},
|
2262 |
+
{
|
2263 |
+
"epoch": 0.2602557544757033,
|
2264 |
+
"grad_norm": 2.99334716796875,
|
2265 |
+
"learning_rate": 0.00019220164651307986,
|
2266 |
+
"loss": 0.0408,
|
2267 |
+
"step": 318
|
2268 |
+
},
|
2269 |
+
{
|
2270 |
+
"epoch": 0.261074168797954,
|
2271 |
+
"grad_norm": 3.7102837562561035,
|
2272 |
+
"learning_rate": 0.00019215159973051552,
|
2273 |
+
"loss": 0.1288,
|
2274 |
+
"step": 319
|
2275 |
+
},
|
2276 |
+
{
|
2277 |
+
"epoch": 0.2618925831202046,
|
2278 |
+
"grad_norm": 2.6803882122039795,
|
2279 |
+
"learning_rate": 0.0001921013994292045,
|
2280 |
+
"loss": 0.0465,
|
2281 |
+
"step": 320
|
2282 |
+
},
|
2283 |
+
{
|
2284 |
+
"epoch": 0.26271099744245524,
|
2285 |
+
"grad_norm": 8.001237869262695,
|
2286 |
+
"learning_rate": 0.00019205104569277733,
|
2287 |
+
"loss": 0.0932,
|
2288 |
+
"step": 321
|
2289 |
+
},
|
2290 |
+
{
|
2291 |
+
"epoch": 0.2635294117647059,
|
2292 |
+
"grad_norm": 0.7996736764907837,
|
2293 |
+
"learning_rate": 0.00019200053860512014,
|
2294 |
+
"loss": 0.0281,
|
2295 |
+
"step": 322
|
2296 |
+
},
|
2297 |
+
{
|
2298 |
+
"epoch": 0.2643478260869565,
|
2299 |
+
"grad_norm": 0.10956323891878128,
|
2300 |
+
"learning_rate": 0.0001919498782503746,
|
2301 |
+
"loss": 0.0034,
|
2302 |
+
"step": 323
|
2303 |
+
},
|
2304 |
+
{
|
2305 |
+
"epoch": 0.26516624040920717,
|
2306 |
+
"grad_norm": 10.366631507873535,
|
2307 |
+
"learning_rate": 0.0001918990647129376,
|
2308 |
+
"loss": 0.054,
|
2309 |
+
"step": 324
|
2310 |
+
},
|
2311 |
+
{
|
2312 |
+
"epoch": 0.2659846547314578,
|
2313 |
+
"grad_norm": 16.79122543334961,
|
2314 |
+
"learning_rate": 0.0001918480980774613,
|
2315 |
+
"loss": 0.3747,
|
2316 |
+
"step": 325
|
2317 |
+
},
|
2318 |
+
{
|
2319 |
+
"epoch": 0.26680306905370843,
|
2320 |
+
"grad_norm": 16.79612159729004,
|
2321 |
+
"learning_rate": 0.00019179697842885293,
|
2322 |
+
"loss": 0.1583,
|
2323 |
+
"step": 326
|
2324 |
+
},
|
2325 |
+
{
|
2326 |
+
"epoch": 0.2676214833759591,
|
2327 |
+
"grad_norm": 0.3873645067214966,
|
2328 |
+
"learning_rate": 0.0001917457058522746,
|
2329 |
+
"loss": 0.0056,
|
2330 |
+
"step": 327
|
2331 |
+
},
|
2332 |
+
{
|
2333 |
+
"epoch": 0.2684398976982097,
|
2334 |
+
"grad_norm": 1.963756799697876,
|
2335 |
+
"learning_rate": 0.00019169428043314314,
|
2336 |
+
"loss": 0.073,
|
2337 |
+
"step": 328
|
2338 |
+
},
|
2339 |
+
{
|
2340 |
+
"epoch": 0.26925831202046036,
|
2341 |
+
"grad_norm": 9.859981536865234,
|
2342 |
+
"learning_rate": 0.00019164270225713008,
|
2343 |
+
"loss": 0.0644,
|
2344 |
+
"step": 329
|
2345 |
+
},
|
2346 |
+
{
|
2347 |
+
"epoch": 0.270076726342711,
|
2348 |
+
"grad_norm": 3.775178909301758,
|
2349 |
+
"learning_rate": 0.0001915909714101614,
|
2350 |
+
"loss": 0.1595,
|
2351 |
+
"step": 330
|
2352 |
+
},
|
2353 |
+
{
|
2354 |
+
"epoch": 0.2708951406649616,
|
2355 |
+
"grad_norm": 2.9009556770324707,
|
2356 |
+
"learning_rate": 0.00019153908797841742,
|
2357 |
+
"loss": 0.0377,
|
2358 |
+
"step": 331
|
2359 |
+
},
|
2360 |
+
{
|
2361 |
+
"epoch": 0.2717135549872123,
|
2362 |
+
"grad_norm": 11.263628005981445,
|
2363 |
+
"learning_rate": 0.0001914870520483327,
|
2364 |
+
"loss": 0.061,
|
2365 |
+
"step": 332
|
2366 |
+
},
|
2367 |
+
{
|
2368 |
+
"epoch": 0.27253196930946294,
|
2369 |
+
"grad_norm": 0.01919454149901867,
|
2370 |
+
"learning_rate": 0.00019143486370659573,
|
2371 |
+
"loss": 0.0008,
|
2372 |
+
"step": 333
|
2373 |
+
},
|
2374 |
+
{
|
2375 |
+
"epoch": 0.27335038363171354,
|
2376 |
+
"grad_norm": 2.147348642349243,
|
2377 |
+
"learning_rate": 0.00019138252304014907,
|
2378 |
+
"loss": 0.2487,
|
2379 |
+
"step": 334
|
2380 |
+
},
|
2381 |
+
{
|
2382 |
+
"epoch": 0.2741687979539642,
|
2383 |
+
"grad_norm": 0.05764702707529068,
|
2384 |
+
"learning_rate": 0.0001913300301361889,
|
2385 |
+
"loss": 0.0016,
|
2386 |
+
"step": 335
|
2387 |
+
},
|
2388 |
+
{
|
2389 |
+
"epoch": 0.2749872122762148,
|
2390 |
+
"grad_norm": 0.025458738207817078,
|
2391 |
+
"learning_rate": 0.00019127738508216516,
|
2392 |
+
"loss": 0.0009,
|
2393 |
+
"step": 336
|
2394 |
+
},
|
2395 |
+
{
|
2396 |
+
"epoch": 0.27580562659846547,
|
2397 |
+
"grad_norm": 2.147672653198242,
|
2398 |
+
"learning_rate": 0.00019122458796578114,
|
2399 |
+
"loss": 0.1389,
|
2400 |
+
"step": 337
|
2401 |
+
},
|
2402 |
+
{
|
2403 |
+
"epoch": 0.27662404092071613,
|
2404 |
+
"grad_norm": 2.0472524166107178,
|
2405 |
+
"learning_rate": 0.0001911716388749935,
|
2406 |
+
"loss": 0.0667,
|
2407 |
+
"step": 338
|
2408 |
+
},
|
2409 |
+
{
|
2410 |
+
"epoch": 0.27744245524296673,
|
2411 |
+
"grad_norm": 0.15356223285198212,
|
2412 |
+
"learning_rate": 0.0001911185378980121,
|
2413 |
+
"loss": 0.0025,
|
2414 |
+
"step": 339
|
2415 |
+
},
|
2416 |
+
{
|
2417 |
+
"epoch": 0.2782608695652174,
|
2418 |
+
"grad_norm": 0.0540032684803009,
|
2419 |
+
"learning_rate": 0.00019106528512329978,
|
2420 |
+
"loss": 0.002,
|
2421 |
+
"step": 340
|
2422 |
+
},
|
2423 |
+
{
|
2424 |
+
"epoch": 0.27907928388746805,
|
2425 |
+
"grad_norm": 3.762300968170166,
|
2426 |
+
"learning_rate": 0.00019101188063957235,
|
2427 |
+
"loss": 0.1637,
|
2428 |
+
"step": 341
|
2429 |
+
},
|
2430 |
+
{
|
2431 |
+
"epoch": 0.27989769820971866,
|
2432 |
+
"grad_norm": 1.0720164775848389,
|
2433 |
+
"learning_rate": 0.0001909583245357983,
|
2434 |
+
"loss": 0.0124,
|
2435 |
+
"step": 342
|
2436 |
+
},
|
2437 |
+
{
|
2438 |
+
"epoch": 0.2807161125319693,
|
2439 |
+
"grad_norm": 3.95150089263916,
|
2440 |
+
"learning_rate": 0.00019090461690119868,
|
2441 |
+
"loss": 0.021,
|
2442 |
+
"step": 343
|
2443 |
+
},
|
2444 |
+
{
|
2445 |
+
"epoch": 0.2815345268542199,
|
2446 |
+
"grad_norm": 5.743108749389648,
|
2447 |
+
"learning_rate": 0.00019085075782524703,
|
2448 |
+
"loss": 0.1296,
|
2449 |
+
"step": 344
|
2450 |
+
},
|
2451 |
+
{
|
2452 |
+
"epoch": 0.2823529411764706,
|
2453 |
+
"grad_norm": 1.5119516849517822,
|
2454 |
+
"learning_rate": 0.00019079674739766917,
|
2455 |
+
"loss": 0.0188,
|
2456 |
+
"step": 345
|
2457 |
+
},
|
2458 |
+
{
|
2459 |
+
"epoch": 0.28317135549872124,
|
2460 |
+
"grad_norm": 0.20997177064418793,
|
2461 |
+
"learning_rate": 0.0001907425857084431,
|
2462 |
+
"loss": 0.0049,
|
2463 |
+
"step": 346
|
2464 |
+
},
|
2465 |
+
{
|
2466 |
+
"epoch": 0.28398976982097185,
|
2467 |
+
"grad_norm": 3.7278308868408203,
|
2468 |
+
"learning_rate": 0.00019068827284779874,
|
2469 |
+
"loss": 0.1293,
|
2470 |
+
"step": 347
|
2471 |
+
},
|
2472 |
+
{
|
2473 |
+
"epoch": 0.2848081841432225,
|
2474 |
+
"grad_norm": 3.974945545196533,
|
2475 |
+
"learning_rate": 0.0001906338089062179,
|
2476 |
+
"loss": 0.0574,
|
2477 |
+
"step": 348
|
2478 |
+
},
|
2479 |
+
{
|
2480 |
+
"epoch": 0.28562659846547317,
|
2481 |
+
"grad_norm": 34.30356216430664,
|
2482 |
+
"learning_rate": 0.00019057919397443403,
|
2483 |
+
"loss": 0.8184,
|
2484 |
+
"step": 349
|
2485 |
+
},
|
2486 |
+
{
|
2487 |
+
"epoch": 0.2864450127877238,
|
2488 |
+
"grad_norm": 0.13904468715190887,
|
2489 |
+
"learning_rate": 0.0001905244281434322,
|
2490 |
+
"loss": 0.0013,
|
2491 |
+
"step": 350
|
2492 |
+
},
|
2493 |
+
{
|
2494 |
+
"epoch": 0.28726342710997443,
|
2495 |
+
"grad_norm": 1.397051215171814,
|
2496 |
+
"learning_rate": 0.00019046951150444882,
|
2497 |
+
"loss": 0.0348,
|
2498 |
+
"step": 351
|
2499 |
+
},
|
2500 |
+
{
|
2501 |
+
"epoch": 0.28808184143222504,
|
2502 |
+
"grad_norm": 0.16247797012329102,
|
2503 |
+
"learning_rate": 0.00019041444414897153,
|
2504 |
+
"loss": 0.0031,
|
2505 |
+
"step": 352
|
2506 |
+
},
|
2507 |
+
{
|
2508 |
+
"epoch": 0.2889002557544757,
|
2509 |
+
"grad_norm": 0.17214684188365936,
|
2510 |
+
"learning_rate": 0.0001903592261687391,
|
2511 |
+
"loss": 0.0038,
|
2512 |
+
"step": 353
|
2513 |
+
},
|
2514 |
+
{
|
2515 |
+
"epoch": 0.28971867007672636,
|
2516 |
+
"grad_norm": 0.03287775442004204,
|
2517 |
+
"learning_rate": 0.00019030385765574114,
|
2518 |
+
"loss": 0.0008,
|
2519 |
+
"step": 354
|
2520 |
+
},
|
2521 |
+
{
|
2522 |
+
"epoch": 0.29053708439897696,
|
2523 |
+
"grad_norm": 1.5617440938949585,
|
2524 |
+
"learning_rate": 0.00019024833870221817,
|
2525 |
+
"loss": 0.128,
|
2526 |
+
"step": 355
|
2527 |
+
},
|
2528 |
+
{
|
2529 |
+
"epoch": 0.2913554987212276,
|
2530 |
+
"grad_norm": 0.4113420248031616,
|
2531 |
+
"learning_rate": 0.00019019266940066117,
|
2532 |
+
"loss": 0.0022,
|
2533 |
+
"step": 356
|
2534 |
+
},
|
2535 |
+
{
|
2536 |
+
"epoch": 0.2921739130434783,
|
2537 |
+
"grad_norm": 9.879963874816895,
|
2538 |
+
"learning_rate": 0.00019013684984381176,
|
2539 |
+
"loss": 0.1339,
|
2540 |
+
"step": 357
|
2541 |
+
},
|
2542 |
+
{
|
2543 |
+
"epoch": 0.2929923273657289,
|
2544 |
+
"grad_norm": 2.0313758850097656,
|
2545 |
+
"learning_rate": 0.00019008088012466179,
|
2546 |
+
"loss": 0.0137,
|
2547 |
+
"step": 358
|
2548 |
+
},
|
2549 |
+
{
|
2550 |
+
"epoch": 0.29381074168797955,
|
2551 |
+
"grad_norm": 2.5029163360595703,
|
2552 |
+
"learning_rate": 0.00019002476033645326,
|
2553 |
+
"loss": 0.0366,
|
2554 |
+
"step": 359
|
2555 |
+
},
|
2556 |
+
{
|
2557 |
+
"epoch": 0.29462915601023015,
|
2558 |
+
"grad_norm": 2.537306785583496,
|
2559 |
+
"learning_rate": 0.00018996849057267815,
|
2560 |
+
"loss": 0.0249,
|
2561 |
+
"step": 360
|
2562 |
+
},
|
2563 |
+
{
|
2564 |
+
"epoch": 0.2954475703324808,
|
2565 |
+
"grad_norm": 0.12636743485927582,
|
2566 |
+
"learning_rate": 0.0001899120709270784,
|
2567 |
+
"loss": 0.0024,
|
2568 |
+
"step": 361
|
2569 |
+
},
|
2570 |
+
{
|
2571 |
+
"epoch": 0.2962659846547315,
|
2572 |
+
"grad_norm": 2.858910083770752,
|
2573 |
+
"learning_rate": 0.00018985550149364552,
|
2574 |
+
"loss": 0.0341,
|
2575 |
+
"step": 362
|
2576 |
+
},
|
2577 |
+
{
|
2578 |
+
"epoch": 0.2970843989769821,
|
2579 |
+
"grad_norm": 0.029364800080657005,
|
2580 |
+
"learning_rate": 0.0001897987823666207,
|
2581 |
+
"loss": 0.0006,
|
2582 |
+
"step": 363
|
2583 |
+
},
|
2584 |
+
{
|
2585 |
+
"epoch": 0.29790281329923274,
|
2586 |
+
"grad_norm": 4.783391952514648,
|
2587 |
+
"learning_rate": 0.00018974191364049434,
|
2588 |
+
"loss": 0.242,
|
2589 |
+
"step": 364
|
2590 |
+
},
|
2591 |
+
{
|
2592 |
+
"epoch": 0.2987212276214834,
|
2593 |
+
"grad_norm": 1.4791219234466553,
|
2594 |
+
"learning_rate": 0.0001896848954100062,
|
2595 |
+
"loss": 0.0586,
|
2596 |
+
"step": 365
|
2597 |
+
},
|
2598 |
+
{
|
2599 |
+
"epoch": 0.299539641943734,
|
2600 |
+
"grad_norm": 7.496979713439941,
|
2601 |
+
"learning_rate": 0.000189627727770145,
|
2602 |
+
"loss": 0.0033,
|
2603 |
+
"step": 366
|
2604 |
+
},
|
2605 |
+
{
|
2606 |
+
"epoch": 0.30035805626598466,
|
2607 |
+
"grad_norm": 2.8373289108276367,
|
2608 |
+
"learning_rate": 0.00018957041081614845,
|
2609 |
+
"loss": 0.0062,
|
2610 |
+
"step": 367
|
2611 |
+
},
|
2612 |
+
{
|
2613 |
+
"epoch": 0.30117647058823527,
|
2614 |
+
"grad_norm": 0.04176490381360054,
|
2615 |
+
"learning_rate": 0.000189512944643503,
|
2616 |
+
"loss": 0.0006,
|
2617 |
+
"step": 368
|
2618 |
+
},
|
2619 |
+
{
|
2620 |
+
"epoch": 0.30199488491048593,
|
2621 |
+
"grad_norm": 6.637153625488281,
|
2622 |
+
"learning_rate": 0.00018945532934794363,
|
2623 |
+
"loss": 0.0217,
|
2624 |
+
"step": 369
|
2625 |
+
},
|
2626 |
+
{
|
2627 |
+
"epoch": 0.3028132992327366,
|
2628 |
+
"grad_norm": 5.947137355804443,
|
2629 |
+
"learning_rate": 0.0001893975650254538,
|
2630 |
+
"loss": 0.0624,
|
2631 |
+
"step": 370
|
2632 |
+
},
|
2633 |
+
{
|
2634 |
+
"epoch": 0.3036317135549872,
|
2635 |
+
"grad_norm": 4.605132102966309,
|
2636 |
+
"learning_rate": 0.0001893396517722652,
|
2637 |
+
"loss": 0.0287,
|
2638 |
+
"step": 371
|
2639 |
+
},
|
2640 |
+
{
|
2641 |
+
"epoch": 0.30445012787723785,
|
2642 |
+
"grad_norm": 0.5078949928283691,
|
2643 |
+
"learning_rate": 0.00018928158968485769,
|
2644 |
+
"loss": 0.005,
|
2645 |
+
"step": 372
|
2646 |
+
},
|
2647 |
+
{
|
2648 |
+
"epoch": 0.3052685421994885,
|
2649 |
+
"grad_norm": 2.0189433097839355,
|
2650 |
+
"learning_rate": 0.00018922337885995903,
|
2651 |
+
"loss": 0.0471,
|
2652 |
+
"step": 373
|
2653 |
+
},
|
2654 |
+
{
|
2655 |
+
"epoch": 0.3060869565217391,
|
2656 |
+
"grad_norm": 1.7046236991882324,
|
2657 |
+
"learning_rate": 0.00018916501939454476,
|
2658 |
+
"loss": 0.1305,
|
2659 |
+
"step": 374
|
2660 |
+
},
|
2661 |
+
{
|
2662 |
+
"epoch": 0.3069053708439898,
|
2663 |
+
"grad_norm": 23.23792839050293,
|
2664 |
+
"learning_rate": 0.00018910651138583808,
|
2665 |
+
"loss": 0.1648,
|
2666 |
+
"step": 375
|
2667 |
+
},
|
2668 |
+
{
|
2669 |
+
"epoch": 0.3077237851662404,
|
2670 |
+
"grad_norm": 10.033699989318848,
|
2671 |
+
"learning_rate": 0.00018904785493130963,
|
2672 |
+
"loss": 0.1637,
|
2673 |
+
"step": 376
|
2674 |
+
},
|
2675 |
+
{
|
2676 |
+
"epoch": 0.30854219948849104,
|
2677 |
+
"grad_norm": 1.7353355884552002,
|
2678 |
+
"learning_rate": 0.00018898905012867736,
|
2679 |
+
"loss": 0.041,
|
2680 |
+
"step": 377
|
2681 |
+
},
|
2682 |
+
{
|
2683 |
+
"epoch": 0.3093606138107417,
|
2684 |
+
"grad_norm": 3.0973310470581055,
|
2685 |
+
"learning_rate": 0.00018893009707590636,
|
2686 |
+
"loss": 0.1848,
|
2687 |
+
"step": 378
|
2688 |
+
},
|
2689 |
+
{
|
2690 |
+
"epoch": 0.3101790281329923,
|
2691 |
+
"grad_norm": 6.835923194885254,
|
2692 |
+
"learning_rate": 0.0001888709958712087,
|
2693 |
+
"loss": 0.1367,
|
2694 |
+
"step": 379
|
2695 |
+
},
|
2696 |
+
{
|
2697 |
+
"epoch": 0.31099744245524297,
|
2698 |
+
"grad_norm": 5.92802619934082,
|
2699 |
+
"learning_rate": 0.00018881174661304327,
|
2700 |
+
"loss": 0.0561,
|
2701 |
+
"step": 380
|
2702 |
+
},
|
2703 |
+
{
|
2704 |
+
"epoch": 0.3118158567774936,
|
2705 |
+
"grad_norm": 2.9860589504241943,
|
2706 |
+
"learning_rate": 0.00018875234940011557,
|
2707 |
+
"loss": 0.106,
|
2708 |
+
"step": 381
|
2709 |
+
},
|
2710 |
+
{
|
2711 |
+
"epoch": 0.31263427109974423,
|
2712 |
+
"grad_norm": 2.7648398876190186,
|
2713 |
+
"learning_rate": 0.00018869280433137759,
|
2714 |
+
"loss": 0.0348,
|
2715 |
+
"step": 382
|
2716 |
+
},
|
2717 |
+
{
|
2718 |
+
"epoch": 0.3134526854219949,
|
2719 |
+
"grad_norm": 1.2207233905792236,
|
2720 |
+
"learning_rate": 0.00018863311150602773,
|
2721 |
+
"loss": 0.0328,
|
2722 |
+
"step": 383
|
2723 |
+
},
|
2724 |
+
{
|
2725 |
+
"epoch": 0.3142710997442455,
|
2726 |
+
"grad_norm": 0.7907407879829407,
|
2727 |
+
"learning_rate": 0.00018857327102351034,
|
2728 |
+
"loss": 0.0191,
|
2729 |
+
"step": 384
|
2730 |
+
},
|
2731 |
+
{
|
2732 |
+
"epoch": 0.31508951406649616,
|
2733 |
+
"grad_norm": 0.05348999425768852,
|
2734 |
+
"learning_rate": 0.000188513282983516,
|
2735 |
+
"loss": 0.0023,
|
2736 |
+
"step": 385
|
2737 |
+
},
|
2738 |
+
{
|
2739 |
+
"epoch": 0.3159079283887468,
|
2740 |
+
"grad_norm": 1.7986161708831787,
|
2741 |
+
"learning_rate": 0.00018845314748598094,
|
2742 |
+
"loss": 0.0403,
|
2743 |
+
"step": 386
|
2744 |
+
},
|
2745 |
+
{
|
2746 |
+
"epoch": 0.3167263427109974,
|
2747 |
+
"grad_norm": 0.08972010016441345,
|
2748 |
+
"learning_rate": 0.00018839286463108717,
|
2749 |
+
"loss": 0.0015,
|
2750 |
+
"step": 387
|
2751 |
+
},
|
2752 |
+
{
|
2753 |
+
"epoch": 0.3175447570332481,
|
2754 |
+
"grad_norm": 2.7837443351745605,
|
2755 |
+
"learning_rate": 0.000188332434519262,
|
2756 |
+
"loss": 0.0452,
|
2757 |
+
"step": 388
|
2758 |
+
},
|
2759 |
+
{
|
2760 |
+
"epoch": 0.31836317135549874,
|
2761 |
+
"grad_norm": 0.03810626640915871,
|
2762 |
+
"learning_rate": 0.00018827185725117827,
|
2763 |
+
"loss": 0.0005,
|
2764 |
+
"step": 389
|
2765 |
+
},
|
2766 |
+
{
|
2767 |
+
"epoch": 0.31918158567774935,
|
2768 |
+
"grad_norm": 3.4926528930664062,
|
2769 |
+
"learning_rate": 0.00018821113292775388,
|
2770 |
+
"loss": 0.1284,
|
2771 |
+
"step": 390
|
2772 |
+
},
|
2773 |
+
{
|
2774 |
+
"epoch": 0.32,
|
2775 |
+
"grad_norm": 4.0867791175842285,
|
2776 |
+
"learning_rate": 0.0001881502616501517,
|
2777 |
+
"loss": 0.0975,
|
2778 |
+
"step": 391
|
2779 |
+
},
|
2780 |
+
{
|
2781 |
+
"epoch": 0.32081841432225067,
|
2782 |
+
"grad_norm": 0.2339545637369156,
|
2783 |
+
"learning_rate": 0.00018808924351977944,
|
2784 |
+
"loss": 0.0018,
|
2785 |
+
"step": 392
|
2786 |
+
},
|
2787 |
+
{
|
2788 |
+
"epoch": 0.32163682864450127,
|
2789 |
+
"grad_norm": 9.076642990112305,
|
2790 |
+
"learning_rate": 0.00018802807863828945,
|
2791 |
+
"loss": 0.1044,
|
2792 |
+
"step": 393
|
2793 |
+
},
|
2794 |
+
{
|
2795 |
+
"epoch": 0.32245524296675193,
|
2796 |
+
"grad_norm": 11.172904014587402,
|
2797 |
+
"learning_rate": 0.00018796676710757854,
|
2798 |
+
"loss": 0.0998,
|
2799 |
+
"step": 394
|
2800 |
+
},
|
2801 |
+
{
|
2802 |
+
"epoch": 0.32327365728900254,
|
2803 |
+
"grad_norm": 5.419793128967285,
|
2804 |
+
"learning_rate": 0.00018790530902978788,
|
2805 |
+
"loss": 0.0529,
|
2806 |
+
"step": 395
|
2807 |
+
},
|
2808 |
+
{
|
2809 |
+
"epoch": 0.3240920716112532,
|
2810 |
+
"grad_norm": 0.1783570945262909,
|
2811 |
+
"learning_rate": 0.00018784370450730274,
|
2812 |
+
"loss": 0.0013,
|
2813 |
+
"step": 396
|
2814 |
+
},
|
2815 |
+
{
|
2816 |
+
"epoch": 0.32491048593350386,
|
2817 |
+
"grad_norm": 2.391124725341797,
|
2818 |
+
"learning_rate": 0.00018778195364275234,
|
2819 |
+
"loss": 0.0167,
|
2820 |
+
"step": 397
|
2821 |
+
},
|
2822 |
+
{
|
2823 |
+
"epoch": 0.32572890025575446,
|
2824 |
+
"grad_norm": 11.476174354553223,
|
2825 |
+
"learning_rate": 0.00018772005653900977,
|
2826 |
+
"loss": 0.0821,
|
2827 |
+
"step": 398
|
2828 |
+
},
|
2829 |
+
{
|
2830 |
+
"epoch": 0.3265473145780051,
|
2831 |
+
"grad_norm": 4.880053997039795,
|
2832 |
+
"learning_rate": 0.00018765801329919166,
|
2833 |
+
"loss": 0.0475,
|
2834 |
+
"step": 399
|
2835 |
+
},
|
2836 |
+
{
|
2837 |
+
"epoch": 0.3273657289002558,
|
2838 |
+
"grad_norm": 4.687880039215088,
|
2839 |
+
"learning_rate": 0.00018759582402665814,
|
2840 |
+
"loss": 0.3302,
|
2841 |
+
"step": 400
|
2842 |
+
},
|
2843 |
+
{
|
2844 |
+
"epoch": 0.3273657289002558,
|
2845 |
+
"eval_loss": 0.01040154229849577,
|
2846 |
+
"eval_runtime": 236.3558,
|
2847 |
+
"eval_samples_per_second": 6.896,
|
2848 |
+
"eval_steps_per_second": 1.726,
|
2849 |
+
"step": 400
|
2850 |
}
|
2851 |
],
|
2852 |
"logging_steps": 1,
|
|
|
2861 |
"early_stopping_threshold": 0.0
|
2862 |
},
|
2863 |
"attributes": {
|
2864 |
+
"early_stopping_patience_counter": 2
|
2865 |
}
|
2866 |
},
|
2867 |
"TrainerControl": {
|
|
|
2870 |
"should_evaluate": false,
|
2871 |
"should_log": false,
|
2872 |
"should_save": true,
|
2873 |
+
"should_training_stop": true
|
2874 |
},
|
2875 |
"attributes": {}
|
2876 |
}
|
2877 |
},
|
2878 |
+
"total_flos": 1.980132394795008e+17,
|
2879 |
"train_batch_size": 4,
|
2880 |
"trial_name": null,
|
2881 |
"trial_params": null
|