|
{ |
|
"best_metric": 0.3925156891345978, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGB-b0_3/checkpoint-320", |
|
"epoch": 106.66666666666667, |
|
"global_step": 320, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.111111111111111e-06, |
|
"loss": 1.1153, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 2.222222222222222e-06, |
|
"loss": 1.1199, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 1.1156, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 4.444444444444444e-06, |
|
"loss": 1.1182, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 5.555555555555557e-06, |
|
"loss": 1.1161, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 1.1106, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 7.77777777777778e-06, |
|
"loss": 1.1141, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 8.888888888888888e-06, |
|
"loss": 1.1085, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 1e-05, |
|
"loss": 1.0992, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 1.1111111111111113e-05, |
|
"loss": 1.1015, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"eval_accuracy_dropoff": 0.5975256737197602, |
|
"eval_accuracy_undropoff": 0.31695782815139334, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.04272275097162494, |
|
"eval_iou_undropoff": 0.3123742688218148, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.0990227460861206, |
|
"eval_mean_accuracy": 0.45724175093557673, |
|
"eval_mean_iou": 0.1183656732644799, |
|
"eval_overall_accuracy": 0.32938079833984374, |
|
"eval_runtime": 1.6795, |
|
"eval_samples_per_second": 8.931, |
|
"eval_steps_per_second": 0.595, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 1.2222222222222224e-05, |
|
"loss": 1.102, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 1.0912, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 1.4444444444444446e-05, |
|
"loss": 1.0939, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 1.555555555555556e-05, |
|
"loss": 1.0846, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 1.0804, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 1.7777777777777777e-05, |
|
"loss": 1.0734, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 1.888888888888889e-05, |
|
"loss": 1.0642, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 2e-05, |
|
"loss": 1.0945, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"learning_rate": 1.994152046783626e-05, |
|
"loss": 1.0588, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"learning_rate": 1.9883040935672515e-05, |
|
"loss": 1.0478, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"eval_accuracy_dropoff": 0.8648482551060261, |
|
"eval_accuracy_undropoff": 0.5515245664509166, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.08793587759511783, |
|
"eval_iou_undropoff": 0.5482206998703943, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.0755951404571533, |
|
"eval_mean_accuracy": 0.7081864107784714, |
|
"eval_mean_iou": 0.21205219248850404, |
|
"eval_overall_accuracy": 0.5653978983561198, |
|
"eval_runtime": 1.8074, |
|
"eval_samples_per_second": 8.299, |
|
"eval_steps_per_second": 0.553, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 1.9824561403508773e-05, |
|
"loss": 1.0748, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 1.976608187134503e-05, |
|
"loss": 1.0343, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 7.67, |
|
"learning_rate": 1.970760233918129e-05, |
|
"loss": 1.0348, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 1.9649122807017544e-05, |
|
"loss": 1.0366, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 1.9590643274853802e-05, |
|
"loss": 1.0351, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 8.67, |
|
"learning_rate": 1.953216374269006e-05, |
|
"loss": 1.0085, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 1.9473684210526318e-05, |
|
"loss": 1.0048, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 1.9415204678362573e-05, |
|
"loss": 1.0001, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 9.67, |
|
"learning_rate": 1.935672514619883e-05, |
|
"loss": 0.9877, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 1.929824561403509e-05, |
|
"loss": 1.0451, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.8842327750591586, |
|
"eval_accuracy_undropoff": 0.7264455095352592, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.1312558881078339, |
|
"eval_iou_undropoff": 0.7226098464796189, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.026917576789856, |
|
"eval_mean_accuracy": 0.8053391422972089, |
|
"eval_mean_iou": 0.2846219115291509, |
|
"eval_overall_accuracy": 0.7334320068359375, |
|
"eval_runtime": 1.7831, |
|
"eval_samples_per_second": 8.412, |
|
"eval_steps_per_second": 0.561, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 10.33, |
|
"learning_rate": 1.9239766081871347e-05, |
|
"loss": 0.9774, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 10.67, |
|
"learning_rate": 1.9181286549707602e-05, |
|
"loss": 0.9797, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 1.912280701754386e-05, |
|
"loss": 0.9325, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 11.33, |
|
"learning_rate": 1.9064327485380118e-05, |
|
"loss": 0.9775, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 11.67, |
|
"learning_rate": 1.9005847953216376e-05, |
|
"loss": 0.9393, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 1.894736842105263e-05, |
|
"loss": 0.9431, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 12.33, |
|
"learning_rate": 1.888888888888889e-05, |
|
"loss": 0.9369, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 12.67, |
|
"learning_rate": 1.8830409356725147e-05, |
|
"loss": 0.9234, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 1.8771929824561405e-05, |
|
"loss": 1.0184, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"learning_rate": 1.871345029239766e-05, |
|
"loss": 0.9095, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"eval_accuracy_dropoff": 0.7349346382704988, |
|
"eval_accuracy_undropoff": 0.8459984055569215, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.17233702408257653, |
|
"eval_iou_undropoff": 0.835792668708584, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.947559118270874, |
|
"eval_mean_accuracy": 0.7904665219137101, |
|
"eval_mean_iou": 0.33604323093038685, |
|
"eval_overall_accuracy": 0.8410807291666667, |
|
"eval_runtime": 1.7664, |
|
"eval_samples_per_second": 8.492, |
|
"eval_steps_per_second": 0.566, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 13.67, |
|
"learning_rate": 1.8654970760233918e-05, |
|
"loss": 0.9068, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 1.8596491228070176e-05, |
|
"loss": 0.9478, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 14.33, |
|
"learning_rate": 1.8538011695906434e-05, |
|
"loss": 0.9028, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 14.67, |
|
"learning_rate": 1.847953216374269e-05, |
|
"loss": 0.8937, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 1.8421052631578947e-05, |
|
"loss": 0.7988, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 15.33, |
|
"learning_rate": 1.8362573099415205e-05, |
|
"loss": 0.8873, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 15.67, |
|
"learning_rate": 1.8304093567251464e-05, |
|
"loss": 0.8282, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 1.824561403508772e-05, |
|
"loss": 0.868, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 16.33, |
|
"learning_rate": 1.8187134502923976e-05, |
|
"loss": 0.8684, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"learning_rate": 1.8128654970760235e-05, |
|
"loss": 0.8091, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"eval_accuracy_dropoff": 0.5974624945436166, |
|
"eval_accuracy_undropoff": 0.9315254285997107, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2429297387681515, |
|
"eval_iou_undropoff": 0.914512280023459, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.8424645662307739, |
|
"eval_mean_accuracy": 0.7644939615716636, |
|
"eval_mean_iou": 0.3858140062638702, |
|
"eval_overall_accuracy": 0.9167338053385417, |
|
"eval_runtime": 1.7428, |
|
"eval_samples_per_second": 8.607, |
|
"eval_steps_per_second": 0.574, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 1.8070175438596493e-05, |
|
"loss": 0.7826, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 17.33, |
|
"learning_rate": 1.8011695906432747e-05, |
|
"loss": 0.8106, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 17.67, |
|
"learning_rate": 1.7953216374269006e-05, |
|
"loss": 0.7961, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 1.7894736842105264e-05, |
|
"loss": 0.7195, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 18.33, |
|
"learning_rate": 1.7836257309941522e-05, |
|
"loss": 0.7777, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 18.67, |
|
"learning_rate": 1.7777777777777777e-05, |
|
"loss": 0.7961, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 1.7719298245614035e-05, |
|
"loss": 0.9039, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 19.33, |
|
"learning_rate": 1.7660818713450293e-05, |
|
"loss": 0.7125, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 19.67, |
|
"learning_rate": 1.760233918128655e-05, |
|
"loss": 0.7704, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 1.754385964912281e-05, |
|
"loss": 0.8094, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.5280687848921359, |
|
"eval_accuracy_undropoff": 0.9608414146478016, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2866027007818053, |
|
"eval_iou_undropoff": 0.9402828328280236, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7488501071929932, |
|
"eval_mean_accuracy": 0.7444550997699688, |
|
"eval_mean_iou": 0.40896184453660966, |
|
"eval_overall_accuracy": 0.9416791280110677, |
|
"eval_runtime": 1.8293, |
|
"eval_samples_per_second": 8.2, |
|
"eval_steps_per_second": 0.547, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 20.33, |
|
"learning_rate": 1.7485380116959064e-05, |
|
"loss": 0.7233, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 20.67, |
|
"learning_rate": 1.7426900584795322e-05, |
|
"loss": 0.7181, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 1.736842105263158e-05, |
|
"loss": 0.9374, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 21.33, |
|
"learning_rate": 1.7309941520467838e-05, |
|
"loss": 0.7476, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 21.67, |
|
"learning_rate": 1.7251461988304093e-05, |
|
"loss": 0.7718, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 1.719298245614035e-05, |
|
"loss": 0.7803, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 22.33, |
|
"learning_rate": 1.713450292397661e-05, |
|
"loss": 0.761, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 22.67, |
|
"learning_rate": 1.7076023391812867e-05, |
|
"loss": 0.6901, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 1.7017543859649125e-05, |
|
"loss": 0.9088, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 23.33, |
|
"learning_rate": 1.695906432748538e-05, |
|
"loss": 0.6945, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 23.33, |
|
"eval_accuracy_dropoff": 0.5298435453856227, |
|
"eval_accuracy_undropoff": 0.9645688777057901, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.30036630036630035, |
|
"eval_iou_undropoff": 0.9440065001523473, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7005425095558167, |
|
"eval_mean_accuracy": 0.7472062115457064, |
|
"eval_mean_iou": 0.4147909335062159, |
|
"eval_overall_accuracy": 0.9453201293945312, |
|
"eval_runtime": 1.7223, |
|
"eval_samples_per_second": 8.71, |
|
"eval_steps_per_second": 0.581, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 23.67, |
|
"learning_rate": 1.690058479532164e-05, |
|
"loss": 0.6711, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 1.6842105263157896e-05, |
|
"loss": 0.7554, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 24.33, |
|
"learning_rate": 1.6783625730994155e-05, |
|
"loss": 0.6708, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 24.67, |
|
"learning_rate": 1.672514619883041e-05, |
|
"loss": 0.6926, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.9247, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 25.33, |
|
"learning_rate": 1.6608187134502926e-05, |
|
"loss": 0.6425, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 25.67, |
|
"learning_rate": 1.6549707602339184e-05, |
|
"loss": 0.6567, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 1.649122807017544e-05, |
|
"loss": 0.8971, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 26.33, |
|
"learning_rate": 1.6432748538011697e-05, |
|
"loss": 0.6993, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 26.67, |
|
"learning_rate": 1.6374269005847955e-05, |
|
"loss": 0.6337, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 26.67, |
|
"eval_accuracy_dropoff": 0.49581294369012335, |
|
"eval_accuracy_undropoff": 0.9709405298276873, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3046961862238145, |
|
"eval_iou_undropoff": 0.9487783537255239, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.6331071257591248, |
|
"eval_mean_accuracy": 0.7333767367589054, |
|
"eval_mean_iou": 0.6267372699746692, |
|
"eval_overall_accuracy": 0.9499028523763021, |
|
"eval_runtime": 1.7079, |
|
"eval_samples_per_second": 8.783, |
|
"eval_steps_per_second": 0.586, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 1.6315789473684213e-05, |
|
"loss": 0.7322, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 27.33, |
|
"learning_rate": 1.625730994152047e-05, |
|
"loss": 0.6814, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 27.67, |
|
"learning_rate": 1.6198830409356726e-05, |
|
"loss": 0.6695, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 1.6140350877192984e-05, |
|
"loss": 0.6004, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 28.33, |
|
"learning_rate": 1.6081871345029242e-05, |
|
"loss": 0.647, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 28.67, |
|
"learning_rate": 1.60233918128655e-05, |
|
"loss": 0.5803, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 1.5964912280701755e-05, |
|
"loss": 0.8716, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 29.33, |
|
"learning_rate": 1.5906432748538013e-05, |
|
"loss": 0.6239, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 29.67, |
|
"learning_rate": 1.584795321637427e-05, |
|
"loss": 0.597, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 1.578947368421053e-05, |
|
"loss": 0.603, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.40565625933328736, |
|
"eval_accuracy_undropoff": 0.9813906247172738, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28940801416143874, |
|
"eval_iou_undropoff": 0.9550916579222961, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.5726452469825745, |
|
"eval_mean_accuracy": 0.6935234420252806, |
|
"eval_mean_iou": 0.6222498360418675, |
|
"eval_overall_accuracy": 0.9558982849121094, |
|
"eval_runtime": 1.7573, |
|
"eval_samples_per_second": 8.536, |
|
"eval_steps_per_second": 0.569, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 30.33, |
|
"learning_rate": 1.5730994152046787e-05, |
|
"loss": 0.5913, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 30.67, |
|
"learning_rate": 1.5672514619883042e-05, |
|
"loss": 0.6683, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 1.56140350877193e-05, |
|
"loss": 0.8263, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 31.33, |
|
"learning_rate": 1.555555555555556e-05, |
|
"loss": 0.5791, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 31.67, |
|
"learning_rate": 1.5497076023391816e-05, |
|
"loss": 0.6118, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 1.543859649122807e-05, |
|
"loss": 0.6546, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 32.33, |
|
"learning_rate": 1.538011695906433e-05, |
|
"loss": 0.6008, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 32.67, |
|
"learning_rate": 1.5321637426900587e-05, |
|
"loss": 0.5931, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 1.5263157894736846e-05, |
|
"loss": 0.8761, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 1.52046783625731e-05, |
|
"loss": 0.5903, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"eval_accuracy_dropoff": 0.4546373515289361, |
|
"eval_accuracy_undropoff": 0.9756530777115378, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2980214980892679, |
|
"eval_iou_undropoff": 0.9516094455131633, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.5840517282485962, |
|
"eval_mean_accuracy": 0.715145214620237, |
|
"eval_mean_iou": 0.6248154718012155, |
|
"eval_overall_accuracy": 0.9525835673014323, |
|
"eval_runtime": 1.6835, |
|
"eval_samples_per_second": 8.91, |
|
"eval_steps_per_second": 0.594, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 33.67, |
|
"learning_rate": 1.5146198830409358e-05, |
|
"loss": 0.5691, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 1.5087719298245615e-05, |
|
"loss": 0.8078, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 34.33, |
|
"learning_rate": 1.5029239766081873e-05, |
|
"loss": 0.5583, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 34.67, |
|
"learning_rate": 1.497076023391813e-05, |
|
"loss": 0.5857, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 1.4912280701754388e-05, |
|
"loss": 0.5148, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 35.33, |
|
"learning_rate": 1.4853801169590644e-05, |
|
"loss": 0.5866, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 35.67, |
|
"learning_rate": 1.4795321637426902e-05, |
|
"loss": 0.5276, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 1.4736842105263159e-05, |
|
"loss": 0.8399, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 36.33, |
|
"learning_rate": 1.4678362573099417e-05, |
|
"loss": 0.5761, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 36.67, |
|
"learning_rate": 1.4619883040935675e-05, |
|
"loss": 0.5514, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 36.67, |
|
"eval_accuracy_dropoff": 0.3781273692191054, |
|
"eval_accuracy_undropoff": 0.9853985522286546, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28751293775466086, |
|
"eval_iou_undropoff": 0.9578033403813699, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.5156698822975159, |
|
"eval_mean_accuracy": 0.68176296072388, |
|
"eval_mean_iou": 0.6226581390680154, |
|
"eval_overall_accuracy": 0.9585098266601563, |
|
"eval_runtime": 1.8267, |
|
"eval_samples_per_second": 8.212, |
|
"eval_steps_per_second": 0.547, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 1.4561403508771931e-05, |
|
"loss": 0.6281, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 37.33, |
|
"learning_rate": 1.4502923976608188e-05, |
|
"loss": 0.5795, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 37.67, |
|
"learning_rate": 1.4444444444444446e-05, |
|
"loss": 0.5427, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 1.4385964912280704e-05, |
|
"loss": 0.8123, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 38.33, |
|
"learning_rate": 1.432748538011696e-05, |
|
"loss": 0.5282, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 38.67, |
|
"learning_rate": 1.4269005847953217e-05, |
|
"loss": 0.6357, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 1.4210526315789475e-05, |
|
"loss": 0.5648, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 39.33, |
|
"learning_rate": 1.4152046783625733e-05, |
|
"loss": 0.5177, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 39.67, |
|
"learning_rate": 1.409356725146199e-05, |
|
"loss": 0.5163, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 1.4035087719298246e-05, |
|
"loss": 0.6464, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.3941002136604866, |
|
"eval_accuracy_undropoff": 0.983633542058492, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.29122208357737656, |
|
"eval_iou_undropoff": 0.9567759549263578, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.5141220092773438, |
|
"eval_mean_accuracy": 0.6888668778594893, |
|
"eval_mean_iou": 0.6239990192518672, |
|
"eval_overall_accuracy": 0.9575302124023437, |
|
"eval_runtime": 1.7894, |
|
"eval_samples_per_second": 8.383, |
|
"eval_steps_per_second": 0.559, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 40.33, |
|
"learning_rate": 1.3976608187134504e-05, |
|
"loss": 0.5223, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 40.67, |
|
"learning_rate": 1.3918128654970762e-05, |
|
"loss": 0.5158, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 1.385964912280702e-05, |
|
"loss": 0.7741, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 41.33, |
|
"learning_rate": 1.3801169590643275e-05, |
|
"loss": 0.5087, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 41.67, |
|
"learning_rate": 1.3742690058479533e-05, |
|
"loss": 0.5203, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 1.3684210526315791e-05, |
|
"loss": 0.5061, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 42.33, |
|
"learning_rate": 1.362573099415205e-05, |
|
"loss": 0.4992, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 42.67, |
|
"learning_rate": 1.3567251461988304e-05, |
|
"loss": 0.5293, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 1.3508771929824562e-05, |
|
"loss": 0.7778, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 43.33, |
|
"learning_rate": 1.345029239766082e-05, |
|
"loss": 0.5198, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 43.33, |
|
"eval_accuracy_dropoff": 0.3656810715188274, |
|
"eval_accuracy_undropoff": 0.9866212601635103, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28375204453179664, |
|
"eval_iou_undropoff": 0.9584546122881992, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.48896968364715576, |
|
"eval_mean_accuracy": 0.6761511658411689, |
|
"eval_mean_iou": 0.41406888560666527, |
|
"eval_overall_accuracy": 0.9591272989908854, |
|
"eval_runtime": 1.7404, |
|
"eval_samples_per_second": 8.619, |
|
"eval_steps_per_second": 0.575, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 43.67, |
|
"learning_rate": 1.3391812865497079e-05, |
|
"loss": 0.4739, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.7511, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 44.33, |
|
"learning_rate": 1.3274853801169591e-05, |
|
"loss": 0.4826, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 44.67, |
|
"learning_rate": 1.321637426900585e-05, |
|
"loss": 0.5534, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 1.3157894736842108e-05, |
|
"loss": 0.5636, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 45.33, |
|
"learning_rate": 1.3099415204678362e-05, |
|
"loss": 0.5102, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 45.67, |
|
"learning_rate": 1.304093567251462e-05, |
|
"loss": 0.485, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 1.2982456140350879e-05, |
|
"loss": 0.486, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 46.33, |
|
"learning_rate": 1.2923976608187137e-05, |
|
"loss": 0.4934, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 46.67, |
|
"learning_rate": 1.2865497076023392e-05, |
|
"loss": 0.5077, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 46.67, |
|
"eval_accuracy_dropoff": 0.3572322925999954, |
|
"eval_accuracy_undropoff": 0.986647337503579, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.27731853040841803, |
|
"eval_iou_undropoff": 0.95811562022686, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.48549026250839233, |
|
"eval_mean_accuracy": 0.6719398150517872, |
|
"eval_mean_iou": 0.4118113835450927, |
|
"eval_overall_accuracy": 0.9587781270345052, |
|
"eval_runtime": 1.7229, |
|
"eval_samples_per_second": 8.706, |
|
"eval_steps_per_second": 0.58, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 1.280701754385965e-05, |
|
"loss": 0.647, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 47.33, |
|
"learning_rate": 1.2748538011695908e-05, |
|
"loss": 0.4891, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 47.67, |
|
"learning_rate": 1.2690058479532166e-05, |
|
"loss": 0.4703, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 1.263157894736842e-05, |
|
"loss": 0.7596, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 48.33, |
|
"learning_rate": 1.2573099415204679e-05, |
|
"loss": 0.4825, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 48.67, |
|
"learning_rate": 1.2514619883040937e-05, |
|
"loss": 0.4702, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 1.2456140350877195e-05, |
|
"loss": 0.6684, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 49.33, |
|
"learning_rate": 1.239766081871345e-05, |
|
"loss": 0.4681, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 49.67, |
|
"learning_rate": 1.2339181286549708e-05, |
|
"loss": 0.497, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 1.2280701754385966e-05, |
|
"loss": 0.4817, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.36020745744020954, |
|
"eval_accuracy_undropoff": 0.9863945469620963, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.27843880695086975, |
|
"eval_iou_undropoff": 0.9579983692751803, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.47096526622772217, |
|
"eval_mean_accuracy": 0.673301002201153, |
|
"eval_mean_iou": 0.6182185881130251, |
|
"eval_overall_accuracy": 0.9586682637532552, |
|
"eval_runtime": 1.7512, |
|
"eval_samples_per_second": 8.565, |
|
"eval_steps_per_second": 0.571, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 50.33, |
|
"learning_rate": 1.2222222222222224e-05, |
|
"loss": 0.4865, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 50.67, |
|
"learning_rate": 1.216374269005848e-05, |
|
"loss": 0.4611, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 1.2105263157894737e-05, |
|
"loss": 0.5011, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 51.33, |
|
"learning_rate": 1.2046783625730995e-05, |
|
"loss": 0.4967, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 51.67, |
|
"learning_rate": 1.1988304093567253e-05, |
|
"loss": 0.4445, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 1.192982456140351e-05, |
|
"loss": 0.7343, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 52.33, |
|
"learning_rate": 1.1871345029239766e-05, |
|
"loss": 0.4597, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 52.67, |
|
"learning_rate": 1.1812865497076024e-05, |
|
"loss": 0.5019, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 1.1754385964912282e-05, |
|
"loss": 0.4556, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 53.33, |
|
"learning_rate": 1.1695906432748539e-05, |
|
"loss": 0.4713, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 53.33, |
|
"eval_accuracy_dropoff": 0.3479047487766214, |
|
"eval_accuracy_undropoff": 0.9886552926888718, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.27947052255677257, |
|
"eval_iou_undropoff": 0.9596627946110987, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.46686360239982605, |
|
"eval_mean_accuracy": 0.6682800207327466, |
|
"eval_mean_iou": 0.6195666585839357, |
|
"eval_overall_accuracy": 0.960284169514974, |
|
"eval_runtime": 1.8139, |
|
"eval_samples_per_second": 8.27, |
|
"eval_steps_per_second": 0.551, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 53.67, |
|
"learning_rate": 1.1637426900584797e-05, |
|
"loss": 0.4486, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 1.1578947368421053e-05, |
|
"loss": 0.7127, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 54.33, |
|
"learning_rate": 1.1520467836257312e-05, |
|
"loss": 0.4508, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 54.67, |
|
"learning_rate": 1.1461988304093568e-05, |
|
"loss": 0.4438, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 1.1403508771929826e-05, |
|
"loss": 0.464, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 55.33, |
|
"learning_rate": 1.1345029239766083e-05, |
|
"loss": 0.4426, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 55.67, |
|
"learning_rate": 1.128654970760234e-05, |
|
"loss": 0.4411, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 1.1228070175438597e-05, |
|
"loss": 0.71, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 56.33, |
|
"learning_rate": 1.1169590643274855e-05, |
|
"loss": 0.4387, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 56.67, |
|
"learning_rate": 1.1111111111111113e-05, |
|
"loss": 0.4516, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 56.67, |
|
"eval_accuracy_dropoff": 0.3265214694327659, |
|
"eval_accuracy_undropoff": 0.9906150846236295, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2715210912425493, |
|
"eval_iou_undropoff": 0.9606413422410078, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4485587179660797, |
|
"eval_mean_accuracy": 0.6585682770281976, |
|
"eval_mean_iou": 0.41072081116118575, |
|
"eval_overall_accuracy": 0.9612103780110677, |
|
"eval_runtime": 1.7967, |
|
"eval_samples_per_second": 8.349, |
|
"eval_steps_per_second": 0.557, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 1.105263157894737e-05, |
|
"loss": 0.6996, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 57.33, |
|
"learning_rate": 1.0994152046783626e-05, |
|
"loss": 0.456, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 57.67, |
|
"learning_rate": 1.0935672514619884e-05, |
|
"loss": 0.4341, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 1.0877192982456142e-05, |
|
"loss": 0.7256, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 58.33, |
|
"learning_rate": 1.0818713450292399e-05, |
|
"loss": 0.4225, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 58.67, |
|
"learning_rate": 1.0760233918128655e-05, |
|
"loss": 0.471, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 1.0701754385964913e-05, |
|
"loss": 0.4369, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 59.33, |
|
"learning_rate": 1.0643274853801172e-05, |
|
"loss": 0.4481, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 59.67, |
|
"learning_rate": 1.0584795321637428e-05, |
|
"loss": 0.4331, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.4059, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_dropoff": 0.31866427734509617, |
|
"eval_accuracy_undropoff": 0.9909349311824317, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.26651615227572956, |
|
"eval_iou_undropoff": 0.9606124109353421, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.4360799789428711, |
|
"eval_mean_accuracy": 0.6547996042637639, |
|
"eval_mean_iou": 0.6135642816055358, |
|
"eval_overall_accuracy": 0.9611681620279948, |
|
"eval_runtime": 1.686, |
|
"eval_samples_per_second": 8.897, |
|
"eval_steps_per_second": 0.593, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 60.33, |
|
"learning_rate": 1.0467836257309943e-05, |
|
"loss": 0.4479, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 60.67, |
|
"learning_rate": 1.04093567251462e-05, |
|
"loss": 0.4304, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 1.0350877192982459e-05, |
|
"loss": 0.4971, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 61.33, |
|
"learning_rate": 1.0292397660818714e-05, |
|
"loss": 0.4302, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 61.67, |
|
"learning_rate": 1.0233918128654972e-05, |
|
"loss": 0.4735, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 1.017543859649123e-05, |
|
"loss": 0.3974, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 62.33, |
|
"learning_rate": 1.0116959064327488e-05, |
|
"loss": 0.4172, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 62.67, |
|
"learning_rate": 1.0058479532163743e-05, |
|
"loss": 0.4458, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 1e-05, |
|
"loss": 0.6849, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 63.33, |
|
"learning_rate": 9.941520467836257e-06, |
|
"loss": 0.4142, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 63.33, |
|
"eval_accuracy_dropoff": 0.3088657614813794, |
|
"eval_accuracy_undropoff": 0.9917302900545283, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2620841577885431, |
|
"eval_iou_undropoff": 0.9609605443916848, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.42670688033103943, |
|
"eval_mean_accuracy": 0.6502980257679538, |
|
"eval_mean_iou": 0.611522351090114, |
|
"eval_overall_accuracy": 0.9614944458007812, |
|
"eval_runtime": 1.7659, |
|
"eval_samples_per_second": 8.494, |
|
"eval_steps_per_second": 0.566, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 63.67, |
|
"learning_rate": 9.883040935672515e-06, |
|
"loss": 0.4708, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 9.824561403508772e-06, |
|
"loss": 0.6601, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 64.33, |
|
"learning_rate": 9.76608187134503e-06, |
|
"loss": 0.4291, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 64.67, |
|
"learning_rate": 9.707602339181286e-06, |
|
"loss": 0.4169, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 9.649122807017545e-06, |
|
"loss": 0.4025, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 65.33, |
|
"learning_rate": 9.590643274853801e-06, |
|
"loss": 0.415, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 65.67, |
|
"learning_rate": 9.532163742690059e-06, |
|
"loss": 0.4125, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 9.473684210526315e-06, |
|
"loss": 0.4011, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 66.33, |
|
"learning_rate": 9.415204678362574e-06, |
|
"loss": 0.3921, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"learning_rate": 9.35672514619883e-06, |
|
"loss": 0.4393, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"eval_accuracy_dropoff": 0.2767822271233947, |
|
"eval_accuracy_undropoff": 0.9940290874101795, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.24518308597943494, |
|
"eval_iou_undropoff": 0.9618027430695911, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.4188476800918579, |
|
"eval_mean_accuracy": 0.6354056572667871, |
|
"eval_mean_iou": 0.603492914524513, |
|
"eval_overall_accuracy": 0.9622708638509114, |
|
"eval_runtime": 1.8501, |
|
"eval_samples_per_second": 8.108, |
|
"eval_steps_per_second": 0.541, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 9.298245614035088e-06, |
|
"loss": 0.669, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 67.33, |
|
"learning_rate": 9.239766081871345e-06, |
|
"loss": 0.4048, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 67.67, |
|
"learning_rate": 9.181286549707603e-06, |
|
"loss": 0.4138, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 9.12280701754386e-06, |
|
"loss": 0.3792, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 68.33, |
|
"learning_rate": 9.064327485380117e-06, |
|
"loss": 0.4212, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 68.67, |
|
"learning_rate": 9.005847953216374e-06, |
|
"loss": 0.4216, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 8.947368421052632e-06, |
|
"loss": 0.6528, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 69.33, |
|
"learning_rate": 8.888888888888888e-06, |
|
"loss": 0.4081, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 69.67, |
|
"learning_rate": 8.830409356725146e-06, |
|
"loss": 0.4353, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 8.771929824561405e-06, |
|
"loss": 0.4071, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_dropoff": 0.31381096790497853, |
|
"eval_accuracy_undropoff": 0.9917361441512784, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2663089040962352, |
|
"eval_iou_undropoff": 0.9611795984188876, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.422432005405426, |
|
"eval_mean_accuracy": 0.6527735560281285, |
|
"eval_mean_iou": 0.6137442512575614, |
|
"eval_overall_accuracy": 0.961719004313151, |
|
"eval_runtime": 1.7566, |
|
"eval_samples_per_second": 8.539, |
|
"eval_steps_per_second": 0.569, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 70.33, |
|
"learning_rate": 8.713450292397661e-06, |
|
"loss": 0.3994, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 70.67, |
|
"learning_rate": 8.654970760233919e-06, |
|
"loss": 0.4181, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 8.596491228070176e-06, |
|
"loss": 0.6651, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 71.33, |
|
"learning_rate": 8.538011695906434e-06, |
|
"loss": 0.4053, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 71.67, |
|
"learning_rate": 8.47953216374269e-06, |
|
"loss": 0.4051, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 8.421052631578948e-06, |
|
"loss": 0.6544, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 72.33, |
|
"learning_rate": 8.362573099415205e-06, |
|
"loss": 0.3965, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 72.67, |
|
"learning_rate": 8.304093567251463e-06, |
|
"loss": 0.4129, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 8.24561403508772e-06, |
|
"loss": 0.629, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 73.33, |
|
"learning_rate": 8.187134502923977e-06, |
|
"loss": 0.4009, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 73.33, |
|
"eval_accuracy_dropoff": 0.3167344406919843, |
|
"eval_accuracy_undropoff": 0.991247327072643, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2664045101231395, |
|
"eval_iou_undropoff": 0.9608319701708583, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.42051607370376587, |
|
"eval_mean_accuracy": 0.6539908838823136, |
|
"eval_mean_iou": 0.613618240146999, |
|
"eval_overall_accuracy": 0.9613812764485677, |
|
"eval_runtime": 1.867, |
|
"eval_samples_per_second": 8.034, |
|
"eval_steps_per_second": 0.536, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 73.67, |
|
"learning_rate": 8.128654970760235e-06, |
|
"loss": 0.3928, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 8.070175438596492e-06, |
|
"loss": 0.4003, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 74.33, |
|
"learning_rate": 8.01169590643275e-06, |
|
"loss": 0.4068, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 74.67, |
|
"learning_rate": 7.953216374269006e-06, |
|
"loss": 0.3951, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 7.894736842105265e-06, |
|
"loss": 0.3757, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 75.33, |
|
"learning_rate": 7.836257309941521e-06, |
|
"loss": 0.4151, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 75.67, |
|
"learning_rate": 7.77777777777778e-06, |
|
"loss": 0.3908, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 7.719298245614036e-06, |
|
"loss": 0.3951, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 76.33, |
|
"learning_rate": 7.660818713450294e-06, |
|
"loss": 0.3945, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 76.67, |
|
"learning_rate": 7.60233918128655e-06, |
|
"loss": 0.4043, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 76.67, |
|
"eval_accuracy_dropoff": 0.3107553932042181, |
|
"eval_accuracy_undropoff": 0.9920240592732618, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.265114022794759, |
|
"eval_iou_undropoff": 0.961326747557732, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.4147787392139435, |
|
"eval_mean_accuracy": 0.6513897262387399, |
|
"eval_mean_iou": 0.6132203851762454, |
|
"eval_overall_accuracy": 0.9618588765462239, |
|
"eval_runtime": 1.7618, |
|
"eval_samples_per_second": 8.514, |
|
"eval_steps_per_second": 0.568, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 7.5438596491228074e-06, |
|
"loss": 0.3996, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 77.33, |
|
"learning_rate": 7.485380116959065e-06, |
|
"loss": 0.3915, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 77.67, |
|
"learning_rate": 7.426900584795322e-06, |
|
"loss": 0.3897, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 7.368421052631579e-06, |
|
"loss": 0.6263, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 78.33, |
|
"learning_rate": 7.309941520467837e-06, |
|
"loss": 0.3799, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 78.67, |
|
"learning_rate": 7.251461988304094e-06, |
|
"loss": 0.426, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 7.192982456140352e-06, |
|
"loss": 0.3634, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 79.33, |
|
"learning_rate": 7.134502923976608e-06, |
|
"loss": 0.3855, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 79.67, |
|
"learning_rate": 7.0760233918128665e-06, |
|
"loss": 0.3971, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 7.017543859649123e-06, |
|
"loss": 0.6302, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_dropoff": 0.3104911893767087, |
|
"eval_accuracy_undropoff": 0.9920998964357066, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.26525905680652806, |
|
"eval_iou_undropoff": 0.9613888343805383, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.41156673431396484, |
|
"eval_mean_accuracy": 0.6512955429062077, |
|
"eval_mean_iou": 0.6133239455935332, |
|
"eval_overall_accuracy": 0.961919657389323, |
|
"eval_runtime": 1.7273, |
|
"eval_samples_per_second": 8.684, |
|
"eval_steps_per_second": 0.579, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 80.33, |
|
"learning_rate": 6.959064327485381e-06, |
|
"loss": 0.3876, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 80.67, |
|
"learning_rate": 6.9005847953216375e-06, |
|
"loss": 0.3886, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"learning_rate": 6.842105263157896e-06, |
|
"loss": 0.3851, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 81.33, |
|
"learning_rate": 6.783625730994152e-06, |
|
"loss": 0.3875, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 81.67, |
|
"learning_rate": 6.72514619883041e-06, |
|
"loss": 0.3796, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.6854, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 82.33, |
|
"learning_rate": 6.608187134502925e-06, |
|
"loss": 0.3814, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 82.67, |
|
"learning_rate": 6.549707602339181e-06, |
|
"loss": 0.3889, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"learning_rate": 6.491228070175439e-06, |
|
"loss": 0.3922, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 83.33, |
|
"learning_rate": 6.432748538011696e-06, |
|
"loss": 0.3859, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 83.33, |
|
"eval_accuracy_dropoff": 0.3173777195763549, |
|
"eval_accuracy_undropoff": 0.9913138508993489, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.26726835662221704, |
|
"eval_iou_undropoff": 0.9609242121673439, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.4113296866416931, |
|
"eval_mean_accuracy": 0.6543457852378519, |
|
"eval_mean_iou": 0.6140962843947805, |
|
"eval_overall_accuracy": 0.9614733378092448, |
|
"eval_runtime": 1.7765, |
|
"eval_samples_per_second": 8.444, |
|
"eval_steps_per_second": 0.563, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 83.67, |
|
"learning_rate": 6.374269005847954e-06, |
|
"loss": 0.3859, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"learning_rate": 6.31578947368421e-06, |
|
"loss": 0.3474, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 84.33, |
|
"learning_rate": 6.2573099415204685e-06, |
|
"loss": 0.377, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 84.67, |
|
"learning_rate": 6.198830409356725e-06, |
|
"loss": 0.4041, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 6.140350877192983e-06, |
|
"loss": 0.3824, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 85.33, |
|
"learning_rate": 6.08187134502924e-06, |
|
"loss": 0.4135, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 85.67, |
|
"learning_rate": 6.023391812865498e-06, |
|
"loss": 0.3715, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"learning_rate": 5.964912280701755e-06, |
|
"loss": 0.3602, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 86.33, |
|
"learning_rate": 5.906432748538012e-06, |
|
"loss": 0.3684, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 86.67, |
|
"learning_rate": 5.847953216374269e-06, |
|
"loss": 0.3791, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 86.67, |
|
"eval_accuracy_dropoff": 0.278223861051761, |
|
"eval_accuracy_undropoff": 0.9940320144585546, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.24647392844059104, |
|
"eval_iou_undropoff": 0.9618677355390541, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.4033331573009491, |
|
"eval_mean_accuracy": 0.6361279377551579, |
|
"eval_mean_iou": 0.6041708319898226, |
|
"eval_overall_accuracy": 0.9623374938964844, |
|
"eval_runtime": 1.68, |
|
"eval_samples_per_second": 8.928, |
|
"eval_steps_per_second": 0.595, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"learning_rate": 5.789473684210527e-06, |
|
"loss": 0.6619, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 87.33, |
|
"learning_rate": 5.730994152046784e-06, |
|
"loss": 0.3884, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 87.67, |
|
"learning_rate": 5.672514619883041e-06, |
|
"loss": 0.3966, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"learning_rate": 5.6140350877192985e-06, |
|
"loss": 0.3558, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 88.33, |
|
"learning_rate": 5.555555555555557e-06, |
|
"loss": 0.3794, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 88.67, |
|
"learning_rate": 5.497076023391813e-06, |
|
"loss": 0.3904, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"learning_rate": 5.438596491228071e-06, |
|
"loss": 0.3461, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 89.33, |
|
"learning_rate": 5.380116959064328e-06, |
|
"loss": 0.3826, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 89.67, |
|
"learning_rate": 5.321637426900586e-06, |
|
"loss": 0.3937, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 0.5716, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy_dropoff": 0.3237243549980472, |
|
"eval_accuracy_undropoff": 0.9912779280329277, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.27243506085477026, |
|
"eval_iou_undropoff": 0.9611633426621673, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.4087795615196228, |
|
"eval_mean_accuracy": 0.6575011415154874, |
|
"eval_mean_iou": 0.6167992017584688, |
|
"eval_overall_accuracy": 0.9617200215657552, |
|
"eval_runtime": 1.8296, |
|
"eval_samples_per_second": 8.199, |
|
"eval_steps_per_second": 0.547, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 90.33, |
|
"learning_rate": 5.2046783625731e-06, |
|
"loss": 0.3782, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 90.67, |
|
"learning_rate": 5.146198830409357e-06, |
|
"loss": 0.3905, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"learning_rate": 5.087719298245615e-06, |
|
"loss": 0.6109, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 91.33, |
|
"learning_rate": 5.029239766081871e-06, |
|
"loss": 0.3883, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 91.67, |
|
"learning_rate": 4.970760233918129e-06, |
|
"loss": 0.3683, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"learning_rate": 4.912280701754386e-06, |
|
"loss": 0.3911, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 92.33, |
|
"learning_rate": 4.853801169590643e-06, |
|
"loss": 0.3763, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 92.67, |
|
"learning_rate": 4.7953216374269005e-06, |
|
"loss": 0.3672, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"learning_rate": 4.736842105263158e-06, |
|
"loss": 0.4146, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 93.33, |
|
"learning_rate": 4.678362573099415e-06, |
|
"loss": 0.3803, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 93.33, |
|
"eval_accuracy_dropoff": 0.32113975233762954, |
|
"eval_accuracy_undropoff": 0.991769938255245, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.27269712295829535, |
|
"eval_iou_undropoff": 0.9615287673876304, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.4023641347885132, |
|
"eval_mean_accuracy": 0.6564548452964373, |
|
"eval_mean_iou": 0.6171129451729629, |
|
"eval_overall_accuracy": 0.9620758056640625, |
|
"eval_runtime": 1.8601, |
|
"eval_samples_per_second": 8.064, |
|
"eval_steps_per_second": 0.538, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 93.67, |
|
"learning_rate": 4.619883040935672e-06, |
|
"loss": 0.3645, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 4.56140350877193e-06, |
|
"loss": 0.5711, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 94.33, |
|
"learning_rate": 4.502923976608187e-06, |
|
"loss": 0.3857, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 94.67, |
|
"learning_rate": 4.444444444444444e-06, |
|
"loss": 0.3704, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 4.385964912280702e-06, |
|
"loss": 0.3785, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 95.33, |
|
"learning_rate": 4.3274853801169596e-06, |
|
"loss": 0.3766, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 95.67, |
|
"learning_rate": 4.269005847953217e-06, |
|
"loss": 0.3818, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"learning_rate": 4.210526315789474e-06, |
|
"loss": 0.3488, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 96.33, |
|
"learning_rate": 4.152046783625731e-06, |
|
"loss": 0.367, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 96.67, |
|
"learning_rate": 4.093567251461989e-06, |
|
"loss": 0.371, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 96.67, |
|
"eval_accuracy_dropoff": 0.31540767799296987, |
|
"eval_accuracy_undropoff": 0.992470833293419, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.27131514849088206, |
|
"eval_iou_undropoff": 0.9619606189446372, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3979186713695526, |
|
"eval_mean_accuracy": 0.6539392556431944, |
|
"eval_mean_iou": 0.6166378837177596, |
|
"eval_overall_accuracy": 0.9624918619791667, |
|
"eval_runtime": 1.7017, |
|
"eval_samples_per_second": 8.815, |
|
"eval_steps_per_second": 0.588, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"learning_rate": 4.035087719298246e-06, |
|
"loss": 0.3744, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 97.33, |
|
"learning_rate": 3.976608187134503e-06, |
|
"loss": 0.3645, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 97.67, |
|
"learning_rate": 3.9181286549707605e-06, |
|
"loss": 0.3771, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"learning_rate": 3.859649122807018e-06, |
|
"loss": 0.3987, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 98.33, |
|
"learning_rate": 3.801169590643275e-06, |
|
"loss": 0.3906, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 98.67, |
|
"learning_rate": 3.7426900584795324e-06, |
|
"loss": 0.3508, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"learning_rate": 3.6842105263157896e-06, |
|
"loss": 0.5944, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 99.33, |
|
"learning_rate": 3.625730994152047e-06, |
|
"loss": 0.3811, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 99.67, |
|
"learning_rate": 3.567251461988304e-06, |
|
"loss": 0.3551, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 3.5087719298245615e-06, |
|
"loss": 0.3656, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_accuracy_dropoff": 0.3316332391389253, |
|
"eval_accuracy_undropoff": 0.9913191728054854, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2793001533393637, |
|
"eval_iou_undropoff": 0.9615449538253468, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.399229496717453, |
|
"eval_mean_accuracy": 0.6614762059722054, |
|
"eval_mean_iou": 0.6204225535823552, |
|
"eval_overall_accuracy": 0.962109629313151, |
|
"eval_runtime": 2.007, |
|
"eval_samples_per_second": 7.474, |
|
"eval_steps_per_second": 0.498, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 100.33, |
|
"learning_rate": 3.4502923976608188e-06, |
|
"loss": 0.3867, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 100.67, |
|
"learning_rate": 3.391812865497076e-06, |
|
"loss": 0.3689, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 101.0, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 0.3547, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 101.33, |
|
"learning_rate": 3.2748538011695906e-06, |
|
"loss": 0.3687, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 101.67, |
|
"learning_rate": 3.216374269005848e-06, |
|
"loss": 0.3784, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 102.0, |
|
"learning_rate": 3.157894736842105e-06, |
|
"loss": 0.3557, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 102.33, |
|
"learning_rate": 3.0994152046783624e-06, |
|
"loss": 0.3867, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 102.67, |
|
"learning_rate": 3.04093567251462e-06, |
|
"loss": 0.3777, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 103.0, |
|
"learning_rate": 2.9824561403508774e-06, |
|
"loss": 0.3422, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 103.33, |
|
"learning_rate": 2.9239766081871347e-06, |
|
"loss": 0.3674, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 103.33, |
|
"eval_accuracy_dropoff": 0.2925023548602017, |
|
"eval_accuracy_undropoff": 0.994085765710533, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2593896115802663, |
|
"eval_iou_undropoff": 0.962535874553267, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3929786682128906, |
|
"eval_mean_accuracy": 0.6432940602853674, |
|
"eval_mean_iou": 0.6109627430667666, |
|
"eval_overall_accuracy": 0.9630210876464844, |
|
"eval_runtime": 1.7736, |
|
"eval_samples_per_second": 8.458, |
|
"eval_steps_per_second": 0.564, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 103.67, |
|
"learning_rate": 2.865497076023392e-06, |
|
"loss": 0.3616, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 104.0, |
|
"learning_rate": 2.8070175438596493e-06, |
|
"loss": 0.6056, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 104.33, |
|
"learning_rate": 2.7485380116959066e-06, |
|
"loss": 0.3567, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 104.67, |
|
"learning_rate": 2.690058479532164e-06, |
|
"loss": 0.369, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"learning_rate": 2.631578947368421e-06, |
|
"loss": 0.3308, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 105.33, |
|
"learning_rate": 2.5730994152046784e-06, |
|
"loss": 0.3748, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 105.67, |
|
"learning_rate": 2.5146198830409357e-06, |
|
"loss": 0.3664, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 106.0, |
|
"learning_rate": 2.456140350877193e-06, |
|
"loss": 0.5831, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 106.33, |
|
"learning_rate": 2.3976608187134502e-06, |
|
"loss": 0.3561, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 106.67, |
|
"learning_rate": 2.3391812865497075e-06, |
|
"loss": 0.378, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 106.67, |
|
"eval_accuracy_dropoff": 0.2980793530452363, |
|
"eval_accuracy_undropoff": 0.9936860905596836, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2623283932145818, |
|
"eval_iou_undropoff": 0.9623896530698185, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3925156891345978, |
|
"eval_mean_accuracy": 0.64588272180246, |
|
"eval_mean_iou": 0.6123590231422001, |
|
"eval_overall_accuracy": 0.9628860473632812, |
|
"eval_runtime": 1.7445, |
|
"eval_samples_per_second": 8.598, |
|
"eval_steps_per_second": 0.573, |
|
"step": 320 |
|
} |
|
], |
|
"max_steps": 360, |
|
"num_train_epochs": 120, |
|
"total_flos": 6.373605557403648e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|