diff --git "a/pytorch-image-models/wandb/run-20250310_192650-s4ytw3km/files/output.log" "b/pytorch-image-models/wandb/run-20250310_192650-s4ytw3km/files/output.log" new file mode 100644--- /dev/null +++ "b/pytorch-image-models/wandb/run-20250310_192650-s4ytw3km/files/output.log" @@ -0,0 +1,2338 @@ +Scheduled epochs: 150 (epochs + cooldown_epochs). Warmup within epochs when warmup_prefix=False. LR stepped per epoch. +Train: 0 [ 0/312 ( 0%)] Loss: 6.95 (6.95) Time: 4.281s, 239.20/s (4.281s, 239.20/s) LR: 1.000e-05 Data: 1.519 (1.519) +Train: 0 [ 50/312 ( 16%)] Loss: 6.93 (6.94) Time: 0.390s, 2626.67/s (0.467s, 2193.55/s) LR: 1.000e-05 Data: 0.027 (0.056) +Train: 0 [ 100/312 ( 32%)] Loss: 6.95 (6.94) Time: 0.395s, 2592.51/s (0.430s, 2379.79/s) LR: 1.000e-05 Data: 0.028 (0.042) +Train: 0 [ 150/312 ( 48%)] Loss: 6.94 (6.94) Time: 0.394s, 2597.42/s (0.418s, 2447.62/s) LR: 1.000e-05 Data: 0.027 (0.037) +Train: 0 [ 200/312 ( 64%)] Loss: 6.92 (6.94) Time: 0.395s, 2594.94/s (0.413s, 2480.67/s) LR: 1.000e-05 Data: 0.026 (0.035) +Train: 0 [ 250/312 ( 80%)] Loss: 6.94 (6.94) Time: 0.399s, 2568.95/s (0.410s, 2498.85/s) LR: 1.000e-05 Data: 0.027 (0.033) +Train: 0 [ 300/312 ( 96%)] Loss: 6.95 (6.94) Time: 0.400s, 2559.18/s (0.408s, 2510.12/s) LR: 1.000e-05 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.579 (1.579) Loss: 6.946 ( 6.946) Acc@1: 0.098 ( 0.098) Acc@5: 0.488 ( 0.488) +Test: [ 48/48] Time: 0.710 (0.338) Loss: 6.940 ( 6.939) Acc@1: 0.118 ( 0.082) Acc@5: 0.354 ( 0.508) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-0.pth.tar', 0.0819999999666214) + +Train: 1 [ 0/312 ( 0%)] Loss: 6.95 (6.95) Time: 1.686s, 607.49/s (1.686s, 607.49/s) LR: 8.001e-02 Data: 1.228 (1.228) +Train: 1 [ 50/312 ( 16%)] Loss: 6.91 (6.93) Time: 0.405s, 2529.60/s (0.430s, 2383.45/s) LR: 8.001e-02 Data: 0.027 (0.050) +Train: 1 [ 100/312 ( 32%)] Loss: 6.91 (6.92) Time: 0.405s, 2528.20/s (0.418s, 2452.58/s) LR: 8.001e-02 Data: 0.028 (0.039) +Train: 1 [ 150/312 ( 48%)] Loss: 6.92 (6.92) Time: 0.403s, 2540.93/s (0.414s, 2475.05/s) LR: 8.001e-02 Data: 0.027 (0.035) +Train: 1 [ 200/312 ( 64%)] Loss: 6.91 (6.92) Time: 0.405s, 2528.99/s (0.412s, 2488.14/s) LR: 8.001e-02 Data: 0.027 (0.033) +Train: 1 [ 250/312 ( 80%)] Loss: 6.92 (6.92) Time: 0.403s, 2537.83/s (0.410s, 2494.86/s) LR: 8.001e-02 Data: 0.027 (0.032) +Train: 1 [ 300/312 ( 96%)] Loss: 6.91 (6.92) Time: 0.405s, 2526.53/s (0.410s, 2500.44/s) LR: 8.001e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.434 (1.434) Loss: 6.871 ( 6.871) Acc@1: 0.098 ( 0.098) Acc@5: 1.074 ( 1.074) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.855 ( 6.868) Acc@1: 0.236 ( 0.292) Acc@5: 1.769 ( 1.444) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-1.pth.tar', 0.29199999993324277) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-0.pth.tar', 0.0819999999666214) + +Train: 2 [ 0/312 ( 0%)] Loss: 6.91 (6.91) Time: 2.181s, 469.47/s (2.181s, 469.47/s) LR: 1.600e-01 Data: 1.808 (1.808) +Train: 2 [ 50/312 ( 16%)] Loss: 6.91 (6.92) Time: 0.405s, 2529.48/s (0.441s, 2323.33/s) LR: 1.600e-01 Data: 0.027 (0.062) +Train: 2 [ 100/312 ( 32%)] Loss: 6.91 (6.92) Time: 0.406s, 2519.57/s (0.423s, 2419.84/s) LR: 1.600e-01 Data: 0.030 (0.045) +Train: 2 [ 150/312 ( 48%)] Loss: 6.91 (6.91) Time: 0.410s, 2498.26/s (0.417s, 2454.55/s) LR: 1.600e-01 Data: 0.033 (0.039) +Train: 2 [ 200/312 ( 64%)] Loss: 6.89 (6.91) Time: 0.403s, 2541.48/s (0.414s, 2471.86/s) LR: 1.600e-01 Data: 0.028 (0.036) +Train: 2 [ 250/312 ( 80%)] Loss: 6.90 (6.91) Time: 0.408s, 2507.19/s (0.412s, 2483.00/s) LR: 1.600e-01 Data: 0.028 (0.035) +Train: 2 [ 300/312 ( 96%)] Loss: 6.91 (6.91) Time: 0.407s, 2516.59/s (0.411s, 2489.96/s) LR: 1.600e-01 Data: 0.029 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.406 (1.406) Loss: 6.833 ( 6.833) Acc@1: 0.586 ( 0.586) Acc@5: 2.344 ( 2.344) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 6.810 ( 6.827) Acc@1: 0.708 ( 0.538) Acc@5: 3.656 ( 2.382) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-2.pth.tar', 0.537999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-1.pth.tar', 0.29199999993324277) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-0.pth.tar', 0.0819999999666214) + +Train: 3 [ 0/312 ( 0%)] Loss: 6.89 (6.89) Time: 1.675s, 611.28/s (1.675s, 611.28/s) LR: 2.400e-01 Data: 1.185 (1.185) +Train: 3 [ 50/312 ( 16%)] Loss: 6.90 (6.90) Time: 0.403s, 2540.30/s (0.428s, 2389.94/s) LR: 2.400e-01 Data: 0.028 (0.050) +Train: 3 [ 100/312 ( 32%)] Loss: 6.92 (6.90) Time: 0.408s, 2508.34/s (0.417s, 2456.91/s) LR: 2.400e-01 Data: 0.027 (0.039) +Train: 3 [ 150/312 ( 48%)] Loss: 6.90 (6.90) Time: 0.402s, 2548.09/s (0.413s, 2481.71/s) LR: 2.400e-01 Data: 0.028 (0.035) +Train: 3 [ 200/312 ( 64%)] Loss: 6.89 (6.90) Time: 0.399s, 2567.91/s (0.410s, 2500.39/s) LR: 2.400e-01 Data: 0.027 (0.033) +Train: 3 [ 250/312 ( 80%)] Loss: 6.89 (6.90) Time: 0.398s, 2575.90/s (0.408s, 2512.36/s) LR: 2.400e-01 Data: 0.027 (0.032) +Train: 3 [ 300/312 ( 96%)] Loss: 6.90 (6.90) Time: 0.400s, 2561.50/s (0.406s, 2520.99/s) LR: 2.400e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.418 (1.418) Loss: 6.797 ( 6.797) Acc@1: 0.781 ( 0.781) Acc@5: 2.930 ( 2.930) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 6.783 ( 6.797) Acc@1: 0.943 ( 0.780) Acc@5: 3.774 ( 3.072) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-3.pth.tar', 0.7799999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-2.pth.tar', 0.537999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-1.pth.tar', 0.29199999993324277) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-0.pth.tar', 0.0819999999666214) + +Train: 4 [ 0/312 ( 0%)] Loss: 6.90 (6.90) Time: 1.608s, 636.89/s (1.608s, 636.89/s) LR: 3.200e-01 Data: 1.004 (1.004) +Train: 4 [ 50/312 ( 16%)] Loss: 6.89 (6.89) Time: 0.403s, 2539.90/s (0.426s, 2404.54/s) LR: 3.200e-01 Data: 0.027 (0.047) +Train: 4 [ 100/312 ( 32%)] Loss: 6.89 (6.89) Time: 0.405s, 2527.47/s (0.415s, 2467.11/s) LR: 3.200e-01 Data: 0.027 (0.037) +Train: 4 [ 150/312 ( 48%)] Loss: 6.90 (6.89) Time: 0.405s, 2526.11/s (0.412s, 2485.89/s) LR: 3.200e-01 Data: 0.028 (0.034) +Train: 4 [ 200/312 ( 64%)] Loss: 6.89 (6.89) Time: 0.402s, 2545.22/s (0.410s, 2500.21/s) LR: 3.200e-01 Data: 0.025 (0.032) +Train: 4 [ 250/312 ( 80%)] Loss: 6.91 (6.89) Time: 0.401s, 2550.48/s (0.408s, 2510.11/s) LR: 3.200e-01 Data: 0.029 (0.032) +Train: 4 [ 300/312 ( 96%)] Loss: 6.90 (6.89) Time: 0.402s, 2548.85/s (0.407s, 2516.73/s) LR: 3.200e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.449 (1.449) Loss: 6.781 ( 6.781) Acc@1: 0.488 ( 0.488) Acc@5: 3.223 ( 3.223) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 6.760 ( 6.778) Acc@1: 0.943 ( 0.874) Acc@5: 3.538 ( 3.456) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-4.pth.tar', 0.8739999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-3.pth.tar', 0.7799999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-2.pth.tar', 0.537999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-1.pth.tar', 0.29199999993324277) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-0.pth.tar', 0.0819999999666214) + +Train: 5 [ 0/312 ( 0%)] Loss: 6.89 (6.89) Time: 1.785s, 573.57/s (1.785s, 573.57/s) LR: 3.989e-01 Data: 1.414 (1.414) +Train: 5 [ 50/312 ( 16%)] Loss: 6.89 (6.89) Time: 0.404s, 2535.64/s (0.432s, 2372.35/s) LR: 3.989e-01 Data: 0.027 (0.055) +Train: 5 [ 100/312 ( 32%)] Loss: 6.90 (6.89) Time: 0.406s, 2523.52/s (0.419s, 2443.19/s) LR: 3.989e-01 Data: 0.028 (0.041) +Train: 5 [ 150/312 ( 48%)] Loss: 6.89 (6.89) Time: 0.403s, 2539.50/s (0.414s, 2471.21/s) LR: 3.989e-01 Data: 0.027 (0.037) +Train: 5 [ 200/312 ( 64%)] Loss: 6.89 (6.89) Time: 0.403s, 2538.54/s (0.412s, 2486.96/s) LR: 3.989e-01 Data: 0.028 (0.035) +Train: 5 [ 250/312 ( 80%)] Loss: 6.89 (6.89) Time: 0.408s, 2512.01/s (0.411s, 2494.19/s) LR: 3.989e-01 Data: 0.027 (0.033) +Train: 5 [ 300/312 ( 96%)] Loss: 6.89 (6.89) Time: 0.407s, 2518.68/s (0.410s, 2497.12/s) LR: 3.989e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.402 (1.402) Loss: 6.763 ( 6.763) Acc@1: 1.172 ( 1.172) Acc@5: 3.809 ( 3.809) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.739 ( 6.757) Acc@1: 1.297 ( 0.998) Acc@5: 5.896 ( 3.854) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-5.pth.tar', 0.9979999998855591) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-4.pth.tar', 0.8739999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-3.pth.tar', 0.7799999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-2.pth.tar', 0.537999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-1.pth.tar', 0.29199999993324277) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-0.pth.tar', 0.0819999999666214) + +Train: 6 [ 0/312 ( 0%)] Loss: 6.89 (6.89) Time: 1.642s, 623.78/s (1.642s, 623.78/s) LR: 3.984e-01 Data: 1.271 (1.271) +Train: 6 [ 50/312 ( 16%)] Loss: 6.89 (6.88) Time: 0.403s, 2541.41/s (0.426s, 2400.95/s) LR: 3.984e-01 Data: 0.028 (0.052) +Train: 6 [ 100/312 ( 32%)] Loss: 6.89 (6.88) Time: 0.406s, 2521.13/s (0.415s, 2468.39/s) LR: 3.984e-01 Data: 0.027 (0.040) +Train: 6 [ 150/312 ( 48%)] Loss: 6.89 (6.88) Time: 0.406s, 2521.41/s (0.412s, 2486.31/s) LR: 3.984e-01 Data: 0.028 (0.036) +Train: 6 [ 200/312 ( 64%)] Loss: 6.88 (6.89) Time: 0.405s, 2530.82/s (0.410s, 2496.40/s) LR: 3.984e-01 Data: 0.029 (0.034) +Train: 6 [ 250/312 ( 80%)] Loss: 6.88 (6.89) Time: 0.402s, 2546.70/s (0.409s, 2504.03/s) LR: 3.984e-01 Data: 0.026 (0.033) +Train: 6 [ 300/312 ( 96%)] Loss: 6.89 (6.89) Time: 0.407s, 2517.85/s (0.408s, 2508.28/s) LR: 3.984e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.410 (1.410) Loss: 6.740 ( 6.740) Acc@1: 1.074 ( 1.074) Acc@5: 3.906 ( 3.906) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.712 ( 6.733) Acc@1: 1.651 ( 1.278) Acc@5: 5.542 ( 4.416) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-6.pth.tar', 1.2780000020599365) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-5.pth.tar', 0.9979999998855591) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-4.pth.tar', 0.8739999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-3.pth.tar', 0.7799999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-2.pth.tar', 0.537999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-1.pth.tar', 0.29199999993324277) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-0.pth.tar', 0.0819999999666214) + +Train: 7 [ 0/312 ( 0%)] Loss: 6.88 (6.88) Time: 2.105s, 486.43/s (2.105s, 486.43/s) LR: 3.979e-01 Data: 1.298 (1.298) +Train: 7 [ 50/312 ( 16%)] Loss: 6.88 (6.88) Time: 0.406s, 2524.31/s (0.439s, 2333.75/s) LR: 3.979e-01 Data: 0.027 (0.052) +Train: 7 [ 100/312 ( 32%)] Loss: 6.87 (6.88) Time: 0.409s, 2506.11/s (0.423s, 2421.45/s) LR: 3.979e-01 Data: 0.027 (0.040) +Train: 7 [ 150/312 ( 48%)] Loss: 6.88 (6.88) Time: 0.407s, 2515.24/s (0.417s, 2454.40/s) LR: 3.979e-01 Data: 0.027 (0.036) +Train: 7 [ 200/312 ( 64%)] Loss: 6.89 (6.88) Time: 0.404s, 2532.59/s (0.414s, 2473.62/s) LR: 3.979e-01 Data: 0.027 (0.034) +Train: 7 [ 250/312 ( 80%)] Loss: 6.88 (6.88) Time: 0.404s, 2533.07/s (0.412s, 2483.29/s) LR: 3.979e-01 Data: 0.027 (0.032) +Train: 7 [ 300/312 ( 96%)] Loss: 6.89 (6.88) Time: 0.408s, 2511.98/s (0.412s, 2487.89/s) LR: 3.979e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 6.710 ( 6.710) Acc@1: 1.074 ( 1.074) Acc@5: 4.102 ( 4.102) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.674 ( 6.699) Acc@1: 1.179 ( 1.268) Acc@5: 5.896 ( 4.984) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-6.pth.tar', 1.2780000020599365) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-7.pth.tar', 1.268000001182556) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-5.pth.tar', 0.9979999998855591) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-4.pth.tar', 0.8739999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-3.pth.tar', 0.7799999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-2.pth.tar', 0.537999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-1.pth.tar', 0.29199999993324277) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-0.pth.tar', 0.0819999999666214) + +Train: 8 [ 0/312 ( 0%)] Loss: 6.87 (6.87) Time: 1.759s, 582.02/s (1.759s, 582.02/s) LR: 3.972e-01 Data: 1.071 (1.071) +Train: 8 [ 50/312 ( 16%)] Loss: 6.87 (6.87) Time: 0.404s, 2533.24/s (0.432s, 2370.48/s) LR: 3.972e-01 Data: 0.027 (0.048) +Train: 8 [ 100/312 ( 32%)] Loss: 6.87 (6.88) Time: 0.404s, 2531.74/s (0.419s, 2444.24/s) LR: 3.972e-01 Data: 0.027 (0.038) +Train: 8 [ 150/312 ( 48%)] Loss: 6.89 (6.88) Time: 0.407s, 2516.09/s (0.415s, 2466.19/s) LR: 3.972e-01 Data: 0.027 (0.034) +Train: 8 [ 200/312 ( 64%)] Loss: 6.89 (6.88) Time: 0.404s, 2532.63/s (0.413s, 2479.76/s) LR: 3.972e-01 Data: 0.028 (0.033) +Train: 8 [ 250/312 ( 80%)] Loss: 6.87 (6.88) Time: 0.407s, 2518.95/s (0.412s, 2488.34/s) LR: 3.972e-01 Data: 0.029 (0.032) +Train: 8 [ 300/312 ( 96%)] Loss: 6.87 (6.88) Time: 0.411s, 2489.77/s (0.411s, 2492.80/s) LR: 3.972e-01 Data: 0.032 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.411 (1.411) Loss: 6.677 ( 6.677) Acc@1: 1.172 ( 1.172) Acc@5: 5.371 ( 5.371) +Test: [ 48/48] Time: 0.090 (0.321) Loss: 6.651 ( 6.668) Acc@1: 1.651 ( 1.432) Acc@5: 5.542 ( 5.752) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-8.pth.tar', 1.4319999980163574) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-6.pth.tar', 1.2780000020599365) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-7.pth.tar', 1.268000001182556) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-5.pth.tar', 0.9979999998855591) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-4.pth.tar', 0.8739999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-3.pth.tar', 0.7799999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-2.pth.tar', 0.537999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-1.pth.tar', 0.29199999993324277) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-0.pth.tar', 0.0819999999666214) + +Train: 9 [ 0/312 ( 0%)] Loss: 6.88 (6.88) Time: 1.513s, 676.70/s (1.513s, 676.70/s) LR: 3.965e-01 Data: 1.131 (1.131) +Train: 9 [ 50/312 ( 16%)] Loss: 6.87 (6.87) Time: 0.406s, 2525.04/s (0.431s, 2374.33/s) LR: 3.965e-01 Data: 0.027 (0.049) +Train: 9 [ 100/312 ( 32%)] Loss: 6.87 (6.87) Time: 0.403s, 2539.76/s (0.418s, 2448.55/s) LR: 3.965e-01 Data: 0.027 (0.039) +Train: 9 [ 150/312 ( 48%)] Loss: 6.89 (6.87) Time: 0.406s, 2520.61/s (0.414s, 2474.72/s) LR: 3.965e-01 Data: 0.028 (0.035) +Train: 9 [ 200/312 ( 64%)] Loss: 6.88 (6.87) Time: 0.408s, 2508.89/s (0.412s, 2484.55/s) LR: 3.965e-01 Data: 0.027 (0.033) +Train: 9 [ 250/312 ( 80%)] Loss: 6.87 (6.87) Time: 0.409s, 2502.06/s (0.411s, 2488.90/s) LR: 3.965e-01 Data: 0.027 (0.032) +Train: 9 [ 300/312 ( 96%)] Loss: 6.87 (6.87) Time: 0.406s, 2519.21/s (0.411s, 2494.18/s) LR: 3.965e-01 Data: 0.029 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.401 (1.401) Loss: 6.647 ( 6.647) Acc@1: 1.074 ( 1.074) Acc@5: 5.859 ( 5.859) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.619 ( 6.638) Acc@1: 1.415 ( 1.740) Acc@5: 6.368 ( 6.352) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-9.pth.tar', 1.7400000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-8.pth.tar', 1.4319999980163574) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-6.pth.tar', 1.2780000020599365) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-7.pth.tar', 1.268000001182556) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-5.pth.tar', 0.9979999998855591) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-4.pth.tar', 0.8739999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-3.pth.tar', 0.7799999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-2.pth.tar', 0.537999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-1.pth.tar', 0.29199999993324277) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-0.pth.tar', 0.0819999999666214) + +Train: 10 [ 0/312 ( 0%)] Loss: 6.86 (6.86) Time: 1.829s, 559.88/s (1.829s, 559.88/s) LR: 3.956e-01 Data: 1.457 (1.457) +Train: 10 [ 50/312 ( 16%)] Loss: 6.86 (6.86) Time: 0.411s, 2492.27/s (0.432s, 2369.62/s) LR: 3.956e-01 Data: 0.030 (0.056) +Train: 10 [ 100/312 ( 32%)] Loss: 6.87 (6.87) Time: 0.407s, 2513.44/s (0.420s, 2440.68/s) LR: 3.956e-01 Data: 0.027 (0.042) +Train: 10 [ 150/312 ( 48%)] Loss: 6.87 (6.87) Time: 0.405s, 2529.28/s (0.416s, 2464.09/s) LR: 3.956e-01 Data: 0.026 (0.037) +Train: 10 [ 200/312 ( 64%)] Loss: 6.88 (6.87) Time: 0.405s, 2527.10/s (0.413s, 2480.13/s) LR: 3.956e-01 Data: 0.028 (0.035) +Train: 10 [ 250/312 ( 80%)] Loss: 6.86 (6.87) Time: 0.405s, 2531.06/s (0.411s, 2489.13/s) LR: 3.956e-01 Data: 0.027 (0.033) +Train: 10 [ 300/312 ( 96%)] Loss: 6.87 (6.87) Time: 0.408s, 2509.10/s (0.411s, 2492.42/s) LR: 3.956e-01 Data: 0.026 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.403 (1.403) Loss: 6.623 ( 6.623) Acc@1: 1.270 ( 1.270) Acc@5: 6.836 ( 6.836) +Test: [ 48/48] Time: 0.090 (0.321) Loss: 6.595 ( 6.611) Acc@1: 2.241 ( 1.984) Acc@5: 6.604 ( 6.708) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-10.pth.tar', 1.9839999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-9.pth.tar', 1.7400000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-8.pth.tar', 1.4319999980163574) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-6.pth.tar', 1.2780000020599365) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-7.pth.tar', 1.268000001182556) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-5.pth.tar', 0.9979999998855591) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-4.pth.tar', 0.8739999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-3.pth.tar', 0.7799999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-2.pth.tar', 0.537999999294281) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-1.pth.tar', 0.29199999993324277) + +Train: 11 [ 0/312 ( 0%)] Loss: 6.87 (6.87) Time: 1.642s, 623.73/s (1.642s, 623.73/s) LR: 3.947e-01 Data: 1.269 (1.269) +Train: 11 [ 50/312 ( 16%)] Loss: 6.87 (6.86) Time: 0.405s, 2528.23/s (0.428s, 2390.11/s) LR: 3.947e-01 Data: 0.027 (0.052) +Train: 11 [ 100/312 ( 32%)] Loss: 6.88 (6.86) Time: 0.406s, 2525.16/s (0.417s, 2457.24/s) LR: 3.947e-01 Data: 0.027 (0.040) +Train: 11 [ 150/312 ( 48%)] Loss: 6.86 (6.86) Time: 0.409s, 2506.50/s (0.414s, 2475.39/s) LR: 3.947e-01 Data: 0.028 (0.036) +Train: 11 [ 200/312 ( 64%)] Loss: 6.88 (6.86) Time: 0.404s, 2537.27/s (0.412s, 2487.00/s) LR: 3.947e-01 Data: 0.027 (0.034) +Train: 11 [ 250/312 ( 80%)] Loss: 6.85 (6.86) Time: 0.404s, 2537.65/s (0.411s, 2494.37/s) LR: 3.947e-01 Data: 0.026 (0.033) +Train: 11 [ 300/312 ( 96%)] Loss: 6.87 (6.87) Time: 0.408s, 2508.03/s (0.410s, 2498.61/s) LR: 3.947e-01 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.415 (1.415) Loss: 6.585 ( 6.585) Acc@1: 2.246 ( 2.246) Acc@5: 8.301 ( 8.301) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 6.566 ( 6.578) Acc@1: 2.358 ( 2.278) Acc@5: 7.429 ( 7.500) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-11.pth.tar', 2.278000002365112) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-10.pth.tar', 1.9839999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-9.pth.tar', 1.7400000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-8.pth.tar', 1.4319999980163574) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-6.pth.tar', 1.2780000020599365) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-7.pth.tar', 1.268000001182556) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-5.pth.tar', 0.9979999998855591) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-4.pth.tar', 0.8739999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-3.pth.tar', 0.7799999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-2.pth.tar', 0.537999999294281) + +Train: 12 [ 0/312 ( 0%)] Loss: 6.85 (6.85) Time: 1.813s, 564.75/s (1.813s, 564.75/s) LR: 3.937e-01 Data: 1.440 (1.440) +Train: 12 [ 50/312 ( 16%)] Loss: 6.85 (6.85) Time: 0.406s, 2519.65/s (0.434s, 2362.07/s) LR: 3.937e-01 Data: 0.028 (0.055) +Train: 12 [ 100/312 ( 32%)] Loss: 6.86 (6.86) Time: 0.409s, 2503.22/s (0.421s, 2434.11/s) LR: 3.937e-01 Data: 0.028 (0.041) +Train: 12 [ 150/312 ( 48%)] Loss: 6.87 (6.86) Time: 0.406s, 2520.82/s (0.416s, 2460.44/s) LR: 3.937e-01 Data: 0.028 (0.037) +Train: 12 [ 200/312 ( 64%)] Loss: 6.87 (6.86) Time: 0.413s, 2481.67/s (0.414s, 2475.24/s) LR: 3.937e-01 Data: 0.031 (0.034) +Train: 12 [ 250/312 ( 80%)] Loss: 6.85 (6.86) Time: 0.408s, 2510.57/s (0.412s, 2482.90/s) LR: 3.937e-01 Data: 0.027 (0.033) +Train: 12 [ 300/312 ( 96%)] Loss: 6.85 (6.86) Time: 0.408s, 2512.42/s (0.412s, 2486.34/s) LR: 3.937e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.413 (1.413) Loss: 6.598 ( 6.598) Acc@1: 1.758 ( 1.758) Acc@5: 6.445 ( 6.445) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 6.574 ( 6.583) Acc@1: 1.887 ( 2.000) Acc@5: 6.958 ( 6.872) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-11.pth.tar', 2.278000002365112) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-12.pth.tar', 2.0000000014877317) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-10.pth.tar', 1.9839999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-9.pth.tar', 1.7400000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-8.pth.tar', 1.4319999980163574) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-6.pth.tar', 1.2780000020599365) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-7.pth.tar', 1.268000001182556) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-5.pth.tar', 0.9979999998855591) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-4.pth.tar', 0.8739999997329712) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-3.pth.tar', 0.7799999997329712) + +Train: 13 [ 0/312 ( 0%)] Loss: 6.85 (6.85) Time: 1.636s, 625.99/s (1.636s, 625.99/s) LR: 3.926e-01 Data: 1.265 (1.265) +Train: 13 [ 50/312 ( 16%)] Loss: 6.85 (6.85) Time: 0.404s, 2536.56/s (0.430s, 2378.96/s) LR: 3.926e-01 Data: 0.028 (0.052) +Train: 13 [ 100/312 ( 32%)] Loss: 6.85 (6.85) Time: 0.405s, 2525.35/s (0.417s, 2454.78/s) LR: 3.926e-01 Data: 0.027 (0.040) +Train: 13 [ 150/312 ( 48%)] Loss: 6.86 (6.85) Time: 0.409s, 2502.58/s (0.414s, 2475.73/s) LR: 3.926e-01 Data: 0.028 (0.036) +Train: 13 [ 200/312 ( 64%)] Loss: 6.86 (6.85) Time: 0.414s, 2471.56/s (0.412s, 2483.02/s) LR: 3.926e-01 Data: 0.028 (0.034) +Train: 13 [ 250/312 ( 80%)] Loss: 6.85 (6.85) Time: 0.407s, 2516.30/s (0.412s, 2487.41/s) LR: 3.926e-01 Data: 0.027 (0.033) +Train: 13 [ 300/312 ( 96%)] Loss: 6.85 (6.86) Time: 0.407s, 2515.20/s (0.411s, 2491.12/s) LR: 3.926e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.415 (1.415) Loss: 6.532 ( 6.532) Acc@1: 2.539 ( 2.539) Acc@5: 8.691 ( 8.691) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 6.523 ( 6.523) Acc@1: 2.594 ( 2.530) Acc@5: 8.137 ( 8.772) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-13.pth.tar', 2.529999999771118) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-11.pth.tar', 2.278000002365112) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-12.pth.tar', 2.0000000014877317) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-10.pth.tar', 1.9839999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-9.pth.tar', 1.7400000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-8.pth.tar', 1.4319999980163574) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-6.pth.tar', 1.2780000020599365) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-7.pth.tar', 1.268000001182556) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-5.pth.tar', 0.9979999998855591) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-4.pth.tar', 0.8739999997329712) + +Train: 14 [ 0/312 ( 0%)] Loss: 6.83 (6.83) Time: 2.030s, 504.51/s (2.030s, 504.51/s) LR: 3.915e-01 Data: 1.656 (1.656) +Train: 14 [ 50/312 ( 16%)] Loss: 6.84 (6.84) Time: 0.408s, 2511.53/s (0.439s, 2330.28/s) LR: 3.915e-01 Data: 0.024 (0.060) +Train: 14 [ 100/312 ( 32%)] Loss: 6.85 (6.85) Time: 0.405s, 2530.40/s (0.423s, 2418.56/s) LR: 3.915e-01 Data: 0.028 (0.044) +Train: 14 [ 150/312 ( 48%)] Loss: 6.85 (6.85) Time: 0.405s, 2531.19/s (0.417s, 2454.53/s) LR: 3.915e-01 Data: 0.029 (0.039) +Train: 14 [ 200/312 ( 64%)] Loss: 6.85 (6.85) Time: 0.403s, 2538.91/s (0.414s, 2473.78/s) LR: 3.915e-01 Data: 0.027 (0.036) +Train: 14 [ 250/312 ( 80%)] Loss: 6.88 (6.85) Time: 0.406s, 2519.36/s (0.412s, 2484.14/s) LR: 3.915e-01 Data: 0.029 (0.034) +Train: 14 [ 300/312 ( 96%)] Loss: 6.87 (6.85) Time: 0.408s, 2509.89/s (0.412s, 2488.44/s) LR: 3.915e-01 Data: 0.027 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.418 (1.418) Loss: 6.511 ( 6.511) Acc@1: 2.441 ( 2.441) Acc@5: 8.789 ( 8.789) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 6.493 ( 6.500) Acc@1: 2.241 ( 2.732) Acc@5: 9.670 ( 9.316) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-14.pth.tar', 2.7319999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-13.pth.tar', 2.529999999771118) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-11.pth.tar', 2.278000002365112) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-12.pth.tar', 2.0000000014877317) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-10.pth.tar', 1.9839999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-9.pth.tar', 1.7400000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-8.pth.tar', 1.4319999980163574) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-6.pth.tar', 1.2780000020599365) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-7.pth.tar', 1.268000001182556) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-5.pth.tar', 0.9979999998855591) + +Train: 15 [ 0/312 ( 0%)] Loss: 6.85 (6.85) Time: 1.692s, 605.04/s (1.692s, 605.04/s) LR: 3.902e-01 Data: 1.322 (1.322) +Train: 15 [ 50/312 ( 16%)] Loss: 6.85 (6.84) Time: 0.400s, 2561.73/s (0.428s, 2394.10/s) LR: 3.902e-01 Data: 0.027 (0.053) +Train: 15 [ 100/312 ( 32%)] Loss: 6.82 (6.84) Time: 0.405s, 2525.71/s (0.415s, 2465.75/s) LR: 3.902e-01 Data: 0.029 (0.040) +Train: 15 [ 150/312 ( 48%)] Loss: 6.84 (6.84) Time: 0.407s, 2518.66/s (0.412s, 2486.21/s) LR: 3.902e-01 Data: 0.028 (0.036) +Train: 15 [ 200/312 ( 64%)] Loss: 6.86 (6.84) Time: 0.408s, 2509.99/s (0.411s, 2492.28/s) LR: 3.902e-01 Data: 0.029 (0.034) +Train: 15 [ 250/312 ( 80%)] Loss: 6.84 (6.84) Time: 0.408s, 2510.05/s (0.410s, 2494.68/s) LR: 3.902e-01 Data: 0.028 (0.033) +Train: 15 [ 300/312 ( 96%)] Loss: 6.83 (6.84) Time: 0.406s, 2524.05/s (0.410s, 2498.32/s) LR: 3.902e-01 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.397 (1.397) Loss: 6.520 ( 6.520) Acc@1: 2.148 ( 2.148) Acc@5: 7.324 ( 7.324) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 6.510 ( 6.503) Acc@1: 2.123 ( 2.402) Acc@5: 7.665 ( 7.836) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-14.pth.tar', 2.7319999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-13.pth.tar', 2.529999999771118) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-15.pth.tar', 2.4020000009155273) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-11.pth.tar', 2.278000002365112) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-12.pth.tar', 2.0000000014877317) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-10.pth.tar', 1.9839999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-9.pth.tar', 1.7400000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-8.pth.tar', 1.4319999980163574) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-6.pth.tar', 1.2780000020599365) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-7.pth.tar', 1.268000001182556) + +Train: 16 [ 0/312 ( 0%)] Loss: 6.84 (6.84) Time: 1.508s, 679.15/s (1.508s, 679.15/s) LR: 3.889e-01 Data: 1.133 (1.133) +Train: 16 [ 50/312 ( 16%)] Loss: 6.83 (6.83) Time: 0.406s, 2523.00/s (0.427s, 2398.21/s) LR: 3.889e-01 Data: 0.028 (0.049) +Train: 16 [ 100/312 ( 32%)] Loss: 6.83 (6.83) Time: 0.412s, 2484.10/s (0.418s, 2451.66/s) LR: 3.889e-01 Data: 0.028 (0.038) +Train: 16 [ 150/312 ( 48%)] Loss: 6.85 (6.83) Time: 0.407s, 2513.95/s (0.414s, 2470.53/s) LR: 3.889e-01 Data: 0.029 (0.035) +Train: 16 [ 200/312 ( 64%)] Loss: 6.85 (6.84) Time: 0.408s, 2511.28/s (0.413s, 2480.33/s) LR: 3.889e-01 Data: 0.028 (0.033) +Train: 16 [ 250/312 ( 80%)] Loss: 6.86 (6.84) Time: 0.409s, 2505.52/s (0.412s, 2484.92/s) LR: 3.889e-01 Data: 0.029 (0.032) +Train: 16 [ 300/312 ( 96%)] Loss: 6.85 (6.84) Time: 0.407s, 2517.15/s (0.411s, 2488.92/s) LR: 3.889e-01 Data: 0.026 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.709 (1.709) Loss: 6.434 ( 6.434) Acc@1: 2.832 ( 2.832) Acc@5: 8.984 ( 8.984) +Test: [ 48/48] Time: 0.091 (0.321) Loss: 6.410 ( 6.428) Acc@1: 3.066 ( 3.166) Acc@5: 11.321 ( 10.388) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-16.pth.tar', 3.165999998626709) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-14.pth.tar', 2.7319999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-13.pth.tar', 2.529999999771118) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-15.pth.tar', 2.4020000009155273) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-11.pth.tar', 2.278000002365112) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-12.pth.tar', 2.0000000014877317) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-10.pth.tar', 1.9839999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-9.pth.tar', 1.7400000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-8.pth.tar', 1.4319999980163574) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-6.pth.tar', 1.2780000020599365) + +Train: 17 [ 0/312 ( 0%)] Loss: 6.83 (6.83) Time: 1.597s, 641.08/s (1.597s, 641.08/s) LR: 3.875e-01 Data: 1.091 (1.091) +Train: 17 [ 50/312 ( 16%)] Loss: 6.84 (6.82) Time: 0.408s, 2508.79/s (0.432s, 2369.93/s) LR: 3.875e-01 Data: 0.029 (0.049) +Train: 17 [ 100/312 ( 32%)] Loss: 6.83 (6.83) Time: 0.407s, 2517.83/s (0.420s, 2440.24/s) LR: 3.875e-01 Data: 0.027 (0.038) +Train: 17 [ 150/312 ( 48%)] Loss: 6.83 (6.83) Time: 0.408s, 2507.14/s (0.416s, 2463.05/s) LR: 3.875e-01 Data: 0.026 (0.035) +Train: 17 [ 200/312 ( 64%)] Loss: 6.83 (6.83) Time: 0.406s, 2519.12/s (0.414s, 2474.07/s) LR: 3.875e-01 Data: 0.027 (0.033) +Train: 17 [ 250/312 ( 80%)] Loss: 6.83 (6.83) Time: 0.410s, 2499.26/s (0.413s, 2481.05/s) LR: 3.875e-01 Data: 0.028 (0.032) +Train: 17 [ 300/312 ( 96%)] Loss: 6.82 (6.83) Time: 0.405s, 2531.31/s (0.412s, 2485.46/s) LR: 3.875e-01 Data: 0.025 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.402 (1.402) Loss: 6.442 ( 6.442) Acc@1: 3.223 ( 3.223) Acc@5: 10.938 ( 10.938) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.417 ( 6.432) Acc@1: 3.538 ( 3.414) Acc@5: 9.906 ( 10.560) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-17.pth.tar', 3.414000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-16.pth.tar', 3.165999998626709) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-14.pth.tar', 2.7319999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-13.pth.tar', 2.529999999771118) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-15.pth.tar', 2.4020000009155273) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-11.pth.tar', 2.278000002365112) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-12.pth.tar', 2.0000000014877317) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-10.pth.tar', 1.9839999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-9.pth.tar', 1.7400000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-8.pth.tar', 1.4319999980163574) + +Train: 18 [ 0/312 ( 0%)] Loss: 6.83 (6.83) Time: 1.600s, 639.86/s (1.600s, 639.86/s) LR: 3.860e-01 Data: 1.227 (1.227) +Train: 18 [ 50/312 ( 16%)] Loss: 6.84 (6.82) Time: 0.406s, 2525.05/s (0.430s, 2379.89/s) LR: 3.860e-01 Data: 0.027 (0.051) +Train: 18 [ 100/312 ( 32%)] Loss: 6.82 (6.82) Time: 0.410s, 2498.36/s (0.420s, 2439.42/s) LR: 3.860e-01 Data: 0.027 (0.039) +Train: 18 [ 150/312 ( 48%)] Loss: 6.80 (6.82) Time: 0.411s, 2494.01/s (0.416s, 2461.40/s) LR: 3.860e-01 Data: 0.031 (0.036) +Train: 18 [ 200/312 ( 64%)] Loss: 6.83 (6.82) Time: 0.407s, 2515.99/s (0.414s, 2472.25/s) LR: 3.860e-01 Data: 0.028 (0.034) +Train: 18 [ 250/312 ( 80%)] Loss: 6.84 (6.83) Time: 0.404s, 2533.87/s (0.413s, 2481.35/s) LR: 3.860e-01 Data: 0.024 (0.032) +Train: 18 [ 300/312 ( 96%)] Loss: 6.83 (6.83) Time: 0.409s, 2506.69/s (0.411s, 2488.92/s) LR: 3.860e-01 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.414 (1.414) Loss: 6.301 ( 6.301) Acc@1: 3.711 ( 3.711) Acc@5: 10.254 ( 10.254) +Test: [ 48/48] Time: 0.090 (0.321) Loss: 6.283 ( 6.300) Acc@1: 3.892 ( 3.728) Acc@5: 12.146 ( 11.908) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-18.pth.tar', 3.728000001678467) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-17.pth.tar', 3.414000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-16.pth.tar', 3.165999998626709) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-14.pth.tar', 2.7319999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-13.pth.tar', 2.529999999771118) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-15.pth.tar', 2.4020000009155273) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-11.pth.tar', 2.278000002365112) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-12.pth.tar', 2.0000000014877317) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-10.pth.tar', 1.9839999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-9.pth.tar', 1.7400000006103515) + +Train: 19 [ 0/312 ( 0%)] Loss: 6.81 (6.81) Time: 1.631s, 627.79/s (1.631s, 627.79/s) LR: 3.844e-01 Data: 1.258 (1.258) +Train: 19 [ 50/312 ( 16%)] Loss: 6.84 (6.81) Time: 0.409s, 2504.37/s (0.432s, 2372.43/s) LR: 3.844e-01 Data: 0.027 (0.051) +Train: 19 [ 100/312 ( 32%)] Loss: 6.82 (6.81) Time: 0.409s, 2505.32/s (0.421s, 2435.08/s) LR: 3.844e-01 Data: 0.027 (0.039) +Train: 19 [ 150/312 ( 48%)] Loss: 6.85 (6.81) Time: 0.407s, 2516.07/s (0.416s, 2462.10/s) LR: 3.844e-01 Data: 0.028 (0.035) +Train: 19 [ 200/312 ( 64%)] Loss: 6.81 (6.82) Time: 0.406s, 2522.70/s (0.413s, 2477.20/s) LR: 3.844e-01 Data: 0.027 (0.033) +Train: 19 [ 250/312 ( 80%)] Loss: 6.81 (6.82) Time: 0.408s, 2508.13/s (0.412s, 2483.72/s) LR: 3.844e-01 Data: 0.027 (0.032) +Train: 19 [ 300/312 ( 96%)] Loss: 6.83 (6.82) Time: 0.405s, 2530.88/s (0.412s, 2487.47/s) LR: 3.844e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.406 (1.406) Loss: 6.338 ( 6.338) Acc@1: 4.297 ( 4.297) Acc@5: 12.109 ( 12.109) +Test: [ 48/48] Time: 0.090 (0.321) Loss: 6.315 ( 6.333) Acc@1: 5.425 ( 4.554) Acc@5: 14.505 ( 13.418) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-19.pth.tar', 4.553999996948242) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-18.pth.tar', 3.728000001678467) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-17.pth.tar', 3.414000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-16.pth.tar', 3.165999998626709) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-14.pth.tar', 2.7319999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-13.pth.tar', 2.529999999771118) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-15.pth.tar', 2.4020000009155273) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-11.pth.tar', 2.278000002365112) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-12.pth.tar', 2.0000000014877317) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-10.pth.tar', 1.9839999996185302) + +Train: 20 [ 0/312 ( 0%)] Loss: 6.78 (6.78) Time: 1.498s, 683.58/s (1.498s, 683.58/s) LR: 3.827e-01 Data: 1.125 (1.125) +Train: 20 [ 50/312 ( 16%)] Loss: 6.80 (6.80) Time: 0.406s, 2523.72/s (0.426s, 2402.50/s) LR: 3.827e-01 Data: 0.028 (0.049) +Train: 20 [ 100/312 ( 32%)] Loss: 6.78 (6.80) Time: 0.409s, 2504.20/s (0.417s, 2457.28/s) LR: 3.827e-01 Data: 0.027 (0.038) +Train: 20 [ 150/312 ( 48%)] Loss: 6.81 (6.81) Time: 0.408s, 2509.46/s (0.414s, 2472.50/s) LR: 3.827e-01 Data: 0.028 (0.035) +Train: 20 [ 200/312 ( 64%)] Loss: 6.80 (6.81) Time: 0.406s, 2521.21/s (0.412s, 2483.71/s) LR: 3.827e-01 Data: 0.028 (0.033) +Train: 20 [ 250/312 ( 80%)] Loss: 6.82 (6.81) Time: 0.408s, 2512.51/s (0.411s, 2491.33/s) LR: 3.827e-01 Data: 0.030 (0.032) +Train: 20 [ 300/312 ( 96%)] Loss: 6.83 (6.81) Time: 0.410s, 2498.42/s (0.410s, 2494.86/s) LR: 3.827e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.400 (1.400) Loss: 6.320 ( 6.320) Acc@1: 3.516 ( 3.516) Acc@5: 12.207 ( 12.207) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.286 ( 6.309) Acc@1: 4.599 ( 4.440) Acc@5: 14.387 ( 12.992) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-19.pth.tar', 4.553999996948242) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-20.pth.tar', 4.440000001983643) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-18.pth.tar', 3.728000001678467) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-17.pth.tar', 3.414000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-16.pth.tar', 3.165999998626709) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-14.pth.tar', 2.7319999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-13.pth.tar', 2.529999999771118) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-15.pth.tar', 2.4020000009155273) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-11.pth.tar', 2.278000002365112) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-12.pth.tar', 2.0000000014877317) + +Train: 21 [ 0/312 ( 0%)] Loss: 6.78 (6.78) Time: 1.630s, 628.15/s (1.630s, 628.15/s) LR: 3.810e-01 Data: 1.256 (1.256) +Train: 21 [ 50/312 ( 16%)] Loss: 6.79 (6.80) Time: 0.406s, 2524.81/s (0.432s, 2372.20/s) LR: 3.810e-01 Data: 0.027 (0.052) +Train: 21 [ 100/312 ( 32%)] Loss: 6.80 (6.80) Time: 0.415s, 2468.99/s (0.420s, 2435.57/s) LR: 3.810e-01 Data: 0.031 (0.040) +Train: 21 [ 150/312 ( 48%)] Loss: 6.82 (6.80) Time: 0.405s, 2531.25/s (0.416s, 2460.47/s) LR: 3.810e-01 Data: 0.029 (0.036) +Train: 21 [ 200/312 ( 64%)] Loss: 6.81 (6.80) Time: 0.405s, 2531.45/s (0.413s, 2477.59/s) LR: 3.810e-01 Data: 0.028 (0.034) +Train: 21 [ 250/312 ( 80%)] Loss: 6.83 (6.80) Time: 0.404s, 2532.95/s (0.411s, 2489.13/s) LR: 3.810e-01 Data: 0.027 (0.033) +Train: 21 [ 300/312 ( 96%)] Loss: 6.83 (6.80) Time: 0.406s, 2521.88/s (0.410s, 2495.57/s) LR: 3.810e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.402 (1.402) Loss: 6.253 ( 6.253) Acc@1: 4.199 ( 4.199) Acc@5: 12.012 ( 12.012) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 6.199 ( 6.228) Acc@1: 4.009 ( 4.344) Acc@5: 13.325 ( 12.824) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-19.pth.tar', 4.553999996948242) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-20.pth.tar', 4.440000001983643) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-21.pth.tar', 4.344000004425049) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-18.pth.tar', 3.728000001678467) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-17.pth.tar', 3.414000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-16.pth.tar', 3.165999998626709) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-14.pth.tar', 2.7319999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-13.pth.tar', 2.529999999771118) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-15.pth.tar', 2.4020000009155273) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-11.pth.tar', 2.278000002365112) + +Train: 22 [ 0/312 ( 0%)] Loss: 6.79 (6.79) Time: 1.517s, 674.94/s (1.517s, 674.94/s) LR: 3.791e-01 Data: 1.142 (1.142) +Train: 22 [ 50/312 ( 16%)] Loss: 6.80 (6.78) Time: 0.410s, 2494.52/s (0.432s, 2371.07/s) LR: 3.791e-01 Data: 0.030 (0.051) +Train: 22 [ 100/312 ( 32%)] Loss: 6.80 (6.79) Time: 0.405s, 2525.71/s (0.420s, 2440.16/s) LR: 3.791e-01 Data: 0.028 (0.040) +Train: 22 [ 150/312 ( 48%)] Loss: 6.80 (6.79) Time: 0.406s, 2524.92/s (0.415s, 2466.93/s) LR: 3.791e-01 Data: 0.027 (0.036) +Train: 22 [ 200/312 ( 64%)] Loss: 6.80 (6.79) Time: 0.408s, 2507.65/s (0.413s, 2478.65/s) LR: 3.791e-01 Data: 0.027 (0.034) +Train: 22 [ 250/312 ( 80%)] Loss: 6.81 (6.79) Time: 0.406s, 2521.92/s (0.412s, 2483.54/s) LR: 3.791e-01 Data: 0.027 (0.032) +Train: 22 [ 300/312 ( 96%)] Loss: 6.79 (6.80) Time: 0.406s, 2520.20/s (0.411s, 2489.37/s) LR: 3.791e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.392 (1.392) Loss: 6.269 ( 6.269) Acc@1: 5.078 ( 5.078) Acc@5: 13.477 ( 13.477) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 6.222 ( 6.255) Acc@1: 4.835 ( 4.732) Acc@5: 15.684 ( 14.232) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-22.pth.tar', 4.731999999389648) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-19.pth.tar', 4.553999996948242) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-20.pth.tar', 4.440000001983643) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-21.pth.tar', 4.344000004425049) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-18.pth.tar', 3.728000001678467) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-17.pth.tar', 3.414000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-16.pth.tar', 3.165999998626709) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-14.pth.tar', 2.7319999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-13.pth.tar', 2.529999999771118) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-15.pth.tar', 2.4020000009155273) + +Train: 23 [ 0/312 ( 0%)] Loss: 6.77 (6.77) Time: 1.661s, 616.61/s (1.661s, 616.61/s) LR: 3.772e-01 Data: 1.286 (1.286) +Train: 23 [ 50/312 ( 16%)] Loss: 6.77 (6.78) Time: 0.407s, 2517.78/s (0.432s, 2373.04/s) LR: 3.772e-01 Data: 0.027 (0.053) +Train: 23 [ 100/312 ( 32%)] Loss: 6.80 (6.78) Time: 0.406s, 2521.13/s (0.419s, 2442.90/s) LR: 3.772e-01 Data: 0.028 (0.041) +Train: 23 [ 150/312 ( 48%)] Loss: 6.80 (6.78) Time: 0.408s, 2512.68/s (0.416s, 2463.10/s) LR: 3.772e-01 Data: 0.028 (0.036) +Train: 23 [ 200/312 ( 64%)] Loss: 6.79 (6.79) Time: 0.409s, 2506.04/s (0.414s, 2475.35/s) LR: 3.772e-01 Data: 0.028 (0.034) +Train: 23 [ 250/312 ( 80%)] Loss: 6.78 (6.79) Time: 0.408s, 2509.64/s (0.413s, 2481.61/s) LR: 3.772e-01 Data: 0.028 (0.033) +Train: 23 [ 300/312 ( 96%)] Loss: 6.78 (6.79) Time: 0.407s, 2515.45/s (0.412s, 2485.87/s) LR: 3.772e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.406 (1.406) Loss: 6.226 ( 6.226) Acc@1: 4.199 ( 4.199) Acc@5: 13.281 ( 13.281) +Test: [ 48/48] Time: 0.091 (0.320) Loss: 6.196 ( 6.233) Acc@1: 5.189 ( 4.934) Acc@5: 15.212 ( 14.368) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-23.pth.tar', 4.933999999542237) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-22.pth.tar', 4.731999999389648) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-19.pth.tar', 4.553999996948242) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-20.pth.tar', 4.440000001983643) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-21.pth.tar', 4.344000004425049) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-18.pth.tar', 3.728000001678467) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-17.pth.tar', 3.414000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-16.pth.tar', 3.165999998626709) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-14.pth.tar', 2.7319999996185302) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-13.pth.tar', 2.529999999771118) + +Train: 24 [ 0/312 ( 0%)] Loss: 6.78 (6.78) Time: 1.782s, 574.75/s (1.782s, 574.75/s) LR: 3.753e-01 Data: 1.405 (1.405) +Train: 24 [ 50/312 ( 16%)] Loss: 6.76 (6.77) Time: 0.406s, 2520.54/s (0.434s, 2359.20/s) LR: 3.753e-01 Data: 0.026 (0.055) +Train: 24 [ 100/312 ( 32%)] Loss: 6.77 (6.77) Time: 0.406s, 2523.21/s (0.420s, 2436.48/s) LR: 3.753e-01 Data: 0.027 (0.041) +Train: 24 [ 150/312 ( 48%)] Loss: 6.76 (6.77) Time: 0.412s, 2487.56/s (0.416s, 2460.35/s) LR: 3.753e-01 Data: 0.028 (0.037) +Train: 24 [ 200/312 ( 64%)] Loss: 6.77 (6.78) Time: 0.407s, 2514.62/s (0.414s, 2471.65/s) LR: 3.753e-01 Data: 0.028 (0.035) +Train: 24 [ 250/312 ( 80%)] Loss: 6.77 (6.78) Time: 0.408s, 2512.50/s (0.413s, 2479.32/s) LR: 3.753e-01 Data: 0.028 (0.033) +Train: 24 [ 300/312 ( 96%)] Loss: 6.82 (6.78) Time: 0.407s, 2515.51/s (0.412s, 2485.44/s) LR: 3.753e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.402 (1.402) Loss: 6.217 ( 6.217) Acc@1: 4.980 ( 4.980) Acc@5: 14.258 ( 14.258) +Test: [ 48/48] Time: 0.091 (0.318) Loss: 6.156 ( 6.205) Acc@1: 7.075 ( 5.460) Acc@5: 20.047 ( 15.936) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-24.pth.tar', 5.459999994964599) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-23.pth.tar', 4.933999999542237) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-22.pth.tar', 4.731999999389648) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-19.pth.tar', 4.553999996948242) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-20.pth.tar', 4.440000001983643) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-21.pth.tar', 4.344000004425049) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-18.pth.tar', 3.728000001678467) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-17.pth.tar', 3.414000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-16.pth.tar', 3.165999998626709) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-14.pth.tar', 2.7319999996185302) + +Train: 25 [ 0/312 ( 0%)] Loss: 6.74 (6.74) Time: 1.685s, 607.70/s (1.685s, 607.70/s) LR: 3.732e-01 Data: 1.309 (1.309) +Train: 25 [ 50/312 ( 16%)] Loss: 6.75 (6.76) Time: 0.407s, 2515.16/s (0.434s, 2360.09/s) LR: 3.732e-01 Data: 0.027 (0.053) +Train: 25 [ 100/312 ( 32%)] Loss: 6.77 (6.76) Time: 0.408s, 2507.35/s (0.421s, 2431.39/s) LR: 3.732e-01 Data: 0.027 (0.041) +Train: 25 [ 150/312 ( 48%)] Loss: 6.75 (6.77) Time: 0.408s, 2510.36/s (0.417s, 2455.63/s) LR: 3.732e-01 Data: 0.027 (0.036) +Train: 25 [ 200/312 ( 64%)] Loss: 6.77 (6.77) Time: 0.407s, 2514.77/s (0.415s, 2467.88/s) LR: 3.732e-01 Data: 0.027 (0.034) +Train: 25 [ 250/312 ( 80%)] Loss: 6.77 (6.77) Time: 0.405s, 2525.47/s (0.413s, 2477.09/s) LR: 3.732e-01 Data: 0.027 (0.033) +Train: 25 [ 300/312 ( 96%)] Loss: 6.81 (6.77) Time: 0.407s, 2516.00/s (0.412s, 2483.19/s) LR: 3.732e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.406 (1.406) Loss: 6.131 ( 6.131) Acc@1: 4.980 ( 4.980) Acc@5: 15.039 ( 15.039) +Test: [ 48/48] Time: 0.091 (0.319) Loss: 6.080 ( 6.123) Acc@1: 6.958 ( 5.836) Acc@5: 17.689 ( 16.462) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-25.pth.tar', 5.836000000305176) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-24.pth.tar', 5.459999994964599) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-23.pth.tar', 4.933999999542237) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-22.pth.tar', 4.731999999389648) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-19.pth.tar', 4.553999996948242) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-20.pth.tar', 4.440000001983643) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-21.pth.tar', 4.344000004425049) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-18.pth.tar', 3.728000001678467) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-17.pth.tar', 3.414000001525879) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-16.pth.tar', 3.165999998626709) + +Train: 26 [ 0/312 ( 0%)] Loss: 6.72 (6.72) Time: 1.839s, 556.82/s (1.839s, 556.82/s) LR: 3.711e-01 Data: 1.464 (1.464) +Train: 26 [ 50/312 ( 16%)] Loss: 6.75 (6.74) Time: 0.404s, 2536.22/s (0.438s, 2340.16/s) LR: 3.711e-01 Data: 0.026 (0.057) +Train: 26 [ 100/312 ( 32%)] Loss: 6.77 (6.75) Time: 0.400s, 2557.00/s (0.420s, 2435.24/s) LR: 3.711e-01 Data: 0.027 (0.042) +Train: 26 [ 150/312 ( 48%)] Loss: 6.77 (6.75) Time: 0.401s, 2554.33/s (0.414s, 2471.83/s) LR: 3.711e-01 Data: 0.027 (0.037) +Train: 26 [ 200/312 ( 64%)] Loss: 6.77 (6.76) Time: 0.403s, 2541.91/s (0.411s, 2489.98/s) LR: 3.711e-01 Data: 0.028 (0.035) +Train: 26 [ 250/312 ( 80%)] Loss: 6.79 (6.76) Time: 0.403s, 2542.71/s (0.410s, 2500.09/s) LR: 3.711e-01 Data: 0.028 (0.034) +Train: 26 [ 300/312 ( 96%)] Loss: 6.78 (6.76) Time: 0.404s, 2531.61/s (0.409s, 2505.39/s) LR: 3.711e-01 Data: 0.027 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.432 (1.432) Loss: 6.151 ( 6.151) Acc@1: 5.371 ( 5.371) Acc@5: 15.527 ( 15.527) +Test: [ 48/48] Time: 0.091 (0.320) Loss: 6.117 ( 6.149) Acc@1: 7.901 ( 6.208) Acc@5: 19.458 ( 17.032) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-26.pth.tar', 6.208000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-25.pth.tar', 5.836000000305176) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-24.pth.tar', 5.459999994964599) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-23.pth.tar', 4.933999999542237) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-22.pth.tar', 4.731999999389648) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-19.pth.tar', 4.553999996948242) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-20.pth.tar', 4.440000001983643) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-21.pth.tar', 4.344000004425049) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-18.pth.tar', 3.728000001678467) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-17.pth.tar', 3.414000001525879) + +Train: 27 [ 0/312 ( 0%)] Loss: 6.74 (6.74) Time: 1.753s, 584.23/s (1.753s, 584.23/s) LR: 3.689e-01 Data: 1.290 (1.290) +Train: 27 [ 50/312 ( 16%)] Loss: 6.77 (6.74) Time: 0.408s, 2512.43/s (0.434s, 2357.11/s) LR: 3.689e-01 Data: 0.025 (0.052) +Train: 27 [ 100/312 ( 32%)] Loss: 6.73 (6.75) Time: 0.406s, 2520.96/s (0.422s, 2426.95/s) LR: 3.689e-01 Data: 0.028 (0.040) +Train: 27 [ 150/312 ( 48%)] Loss: 6.79 (6.75) Time: 0.405s, 2530.26/s (0.417s, 2457.36/s) LR: 3.689e-01 Data: 0.026 (0.036) +Train: 27 [ 200/312 ( 64%)] Loss: 6.80 (6.75) Time: 0.409s, 2506.31/s (0.414s, 2472.61/s) LR: 3.689e-01 Data: 0.028 (0.034) +Train: 27 [ 250/312 ( 80%)] Loss: 6.80 (6.75) Time: 0.407s, 2512.94/s (0.413s, 2479.57/s) LR: 3.689e-01 Data: 0.028 (0.033) +Train: 27 [ 300/312 ( 96%)] Loss: 6.79 (6.75) Time: 0.408s, 2509.71/s (0.412s, 2483.64/s) LR: 3.689e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.416 (1.416) Loss: 6.221 ( 6.221) Acc@1: 4.297 ( 4.297) Acc@5: 14.941 ( 14.941) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 6.155 ( 6.196) Acc@1: 6.014 ( 5.806) Acc@5: 16.981 ( 16.050) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-26.pth.tar', 6.208000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-25.pth.tar', 5.836000000305176) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-27.pth.tar', 5.806000002593994) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-24.pth.tar', 5.459999994964599) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-23.pth.tar', 4.933999999542237) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-22.pth.tar', 4.731999999389648) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-19.pth.tar', 4.553999996948242) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-20.pth.tar', 4.440000001983643) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-21.pth.tar', 4.344000004425049) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-18.pth.tar', 3.728000001678467) + +Train: 28 [ 0/312 ( 0%)] Loss: 6.75 (6.75) Time: 1.647s, 621.68/s (1.647s, 621.68/s) LR: 3.666e-01 Data: 1.206 (1.206) +Train: 28 [ 50/312 ( 16%)] Loss: 6.70 (6.72) Time: 0.407s, 2515.72/s (0.431s, 2375.07/s) LR: 3.666e-01 Data: 0.028 (0.051) +Train: 28 [ 100/312 ( 32%)] Loss: 6.76 (6.73) Time: 0.406s, 2524.26/s (0.420s, 2439.30/s) LR: 3.666e-01 Data: 0.028 (0.039) +Train: 28 [ 150/312 ( 48%)] Loss: 6.77 (6.74) Time: 0.404s, 2532.52/s (0.415s, 2465.67/s) LR: 3.666e-01 Data: 0.026 (0.035) +Train: 28 [ 200/312 ( 64%)] Loss: 6.76 (6.74) Time: 0.409s, 2506.01/s (0.413s, 2480.82/s) LR: 3.666e-01 Data: 0.028 (0.033) +Train: 28 [ 250/312 ( 80%)] Loss: 6.76 (6.74) Time: 0.407s, 2516.64/s (0.411s, 2488.81/s) LR: 3.666e-01 Data: 0.024 (0.032) +Train: 28 [ 300/312 ( 96%)] Loss: 6.78 (6.75) Time: 0.408s, 2510.06/s (0.411s, 2491.87/s) LR: 3.666e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.428 (1.428) Loss: 6.123 ( 6.123) Acc@1: 5.859 ( 5.859) Acc@5: 16.602 ( 16.602) +Test: [ 48/48] Time: 0.089 (0.318) Loss: 6.074 ( 6.122) Acc@1: 7.901 ( 6.060) Acc@5: 19.458 ( 16.686) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-26.pth.tar', 6.208000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-28.pth.tar', 6.059999989929199) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-25.pth.tar', 5.836000000305176) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-27.pth.tar', 5.806000002593994) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-24.pth.tar', 5.459999994964599) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-23.pth.tar', 4.933999999542237) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-22.pth.tar', 4.731999999389648) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-19.pth.tar', 4.553999996948242) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-20.pth.tar', 4.440000001983643) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-21.pth.tar', 4.344000004425049) + +Train: 29 [ 0/312 ( 0%)] Loss: 6.70 (6.70) Time: 1.560s, 656.50/s (1.560s, 656.50/s) LR: 3.642e-01 Data: 1.189 (1.189) +Train: 29 [ 50/312 ( 16%)] Loss: 6.73 (6.72) Time: 0.402s, 2545.62/s (0.426s, 2404.92/s) LR: 3.642e-01 Data: 0.027 (0.051) +Train: 29 [ 100/312 ( 32%)] Loss: 6.74 (6.72) Time: 0.403s, 2540.37/s (0.415s, 2467.88/s) LR: 3.642e-01 Data: 0.027 (0.040) +Train: 29 [ 150/312 ( 48%)] Loss: 6.73 (6.73) Time: 0.405s, 2527.20/s (0.412s, 2487.19/s) LR: 3.642e-01 Data: 0.028 (0.036) +Train: 29 [ 200/312 ( 64%)] Loss: 6.74 (6.73) Time: 0.408s, 2508.28/s (0.411s, 2493.91/s) LR: 3.642e-01 Data: 0.028 (0.034) +Train: 29 [ 250/312 ( 80%)] Loss: 6.76 (6.73) Time: 0.405s, 2530.52/s (0.410s, 2498.22/s) LR: 3.642e-01 Data: 0.028 (0.032) +Train: 29 [ 300/312 ( 96%)] Loss: 6.73 (6.74) Time: 0.405s, 2530.60/s (0.409s, 2503.81/s) LR: 3.642e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.407 (1.407) Loss: 6.060 ( 6.060) Acc@1: 6.055 ( 6.055) Acc@5: 17.969 ( 17.969) +Test: [ 48/48] Time: 0.090 (0.321) Loss: 5.970 ( 6.036) Acc@1: 8.255 ( 7.030) Acc@5: 20.283 ( 18.748) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-29.pth.tar', 7.029999998168945) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-26.pth.tar', 6.208000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-28.pth.tar', 6.059999989929199) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-25.pth.tar', 5.836000000305176) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-27.pth.tar', 5.806000002593994) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-24.pth.tar', 5.459999994964599) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-23.pth.tar', 4.933999999542237) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-22.pth.tar', 4.731999999389648) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-19.pth.tar', 4.553999996948242) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-20.pth.tar', 4.440000001983643) + +Train: 30 [ 0/312 ( 0%)] Loss: 6.69 (6.69) Time: 1.743s, 587.33/s (1.743s, 587.33/s) LR: 3.618e-01 Data: 1.280 (1.280) +Train: 30 [ 50/312 ( 16%)] Loss: 6.72 (6.70) Time: 0.406s, 2519.99/s (0.432s, 2367.73/s) LR: 3.618e-01 Data: 0.028 (0.054) +Train: 30 [ 100/312 ( 32%)] Loss: 6.70 (6.71) Time: 0.407s, 2518.38/s (0.420s, 2439.62/s) LR: 3.618e-01 Data: 0.028 (0.041) +Train: 30 [ 150/312 ( 48%)] Loss: 6.72 (6.72) Time: 0.407s, 2513.06/s (0.416s, 2461.42/s) LR: 3.618e-01 Data: 0.027 (0.037) +Train: 30 [ 200/312 ( 64%)] Loss: 6.73 (6.72) Time: 0.405s, 2529.71/s (0.414s, 2475.08/s) LR: 3.618e-01 Data: 0.028 (0.035) +Train: 30 [ 250/312 ( 80%)] Loss: 6.72 (6.72) Time: 0.405s, 2528.78/s (0.412s, 2485.24/s) LR: 3.618e-01 Data: 0.029 (0.033) +Train: 30 [ 300/312 ( 96%)] Loss: 6.77 (6.73) Time: 0.409s, 2504.63/s (0.411s, 2490.43/s) LR: 3.618e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.415 (1.415) Loss: 6.115 ( 6.115) Acc@1: 6.445 ( 6.445) Acc@5: 15.723 ( 15.723) +Test: [ 48/48] Time: 0.090 (0.321) Loss: 6.045 ( 6.095) Acc@1: 8.137 ( 6.276) Acc@5: 18.160 ( 16.600) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-29.pth.tar', 7.029999998168945) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-30.pth.tar', 6.275999995422363) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-26.pth.tar', 6.208000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-28.pth.tar', 6.059999989929199) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-25.pth.tar', 5.836000000305176) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-27.pth.tar', 5.806000002593994) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-24.pth.tar', 5.459999994964599) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-23.pth.tar', 4.933999999542237) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-22.pth.tar', 4.731999999389648) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-19.pth.tar', 4.553999996948242) + +Train: 31 [ 0/312 ( 0%)] Loss: 6.72 (6.72) Time: 1.833s, 558.78/s (1.833s, 558.78/s) LR: 3.593e-01 Data: 1.459 (1.459) +Train: 31 [ 50/312 ( 16%)] Loss: 6.67 (6.69) Time: 0.408s, 2511.27/s (0.432s, 2369.53/s) LR: 3.593e-01 Data: 0.034 (0.056) +Train: 31 [ 100/312 ( 32%)] Loss: 6.71 (6.70) Time: 0.405s, 2530.70/s (0.418s, 2447.37/s) LR: 3.593e-01 Data: 0.027 (0.042) +Train: 31 [ 150/312 ( 48%)] Loss: 6.71 (6.70) Time: 0.404s, 2533.64/s (0.414s, 2474.81/s) LR: 3.593e-01 Data: 0.028 (0.038) +Train: 31 [ 200/312 ( 64%)] Loss: 6.74 (6.71) Time: 0.406s, 2519.99/s (0.412s, 2485.23/s) LR: 3.593e-01 Data: 0.027 (0.035) +Train: 31 [ 250/312 ( 80%)] Loss: 6.77 (6.71) Time: 0.410s, 2499.32/s (0.411s, 2489.21/s) LR: 3.593e-01 Data: 0.030 (0.034) +Train: 31 [ 300/312 ( 96%)] Loss: 6.70 (6.72) Time: 0.406s, 2525.08/s (0.411s, 2493.99/s) LR: 3.593e-01 Data: 0.029 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.415 (1.415) Loss: 6.018 ( 6.018) Acc@1: 7.520 ( 7.520) Acc@5: 18.555 ( 18.555) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 5.944 ( 5.999) Acc@1: 9.316 ( 7.792) Acc@5: 21.108 ( 20.096) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-31.pth.tar', 7.7920000067138675) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-29.pth.tar', 7.029999998168945) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-30.pth.tar', 6.275999995422363) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-26.pth.tar', 6.208000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-28.pth.tar', 6.059999989929199) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-25.pth.tar', 5.836000000305176) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-27.pth.tar', 5.806000002593994) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-24.pth.tar', 5.459999994964599) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-23.pth.tar', 4.933999999542237) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-22.pth.tar', 4.731999999389648) + +Train: 32 [ 0/312 ( 0%)] Loss: 6.70 (6.70) Time: 1.811s, 565.46/s (1.811s, 565.46/s) LR: 3.567e-01 Data: 1.438 (1.438) +Train: 32 [ 50/312 ( 16%)] Loss: 6.68 (6.68) Time: 0.410s, 2499.61/s (0.434s, 2358.64/s) LR: 3.567e-01 Data: 0.028 (0.055) +Train: 32 [ 100/312 ( 32%)] Loss: 6.69 (6.69) Time: 0.410s, 2497.81/s (0.422s, 2428.47/s) LR: 3.567e-01 Data: 0.027 (0.042) +Train: 32 [ 150/312 ( 48%)] Loss: 6.71 (6.69) Time: 0.403s, 2543.55/s (0.416s, 2458.71/s) LR: 3.567e-01 Data: 0.026 (0.037) +Train: 32 [ 200/312 ( 64%)] Loss: 6.71 (6.70) Time: 0.404s, 2531.90/s (0.413s, 2478.90/s) LR: 3.567e-01 Data: 0.028 (0.035) +Train: 32 [ 250/312 ( 80%)] Loss: 6.70 (6.70) Time: 0.406s, 2525.18/s (0.411s, 2492.07/s) LR: 3.567e-01 Data: 0.029 (0.033) +Train: 32 [ 300/312 ( 96%)] Loss: 6.76 (6.70) Time: 0.411s, 2493.40/s (0.410s, 2499.67/s) LR: 3.567e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.411 (1.411) Loss: 5.993 ( 5.993) Acc@1: 7.910 ( 7.910) Acc@5: 20.996 ( 20.996) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 5.904 ( 5.988) Acc@1: 9.670 ( 8.074) Acc@5: 24.057 ( 20.812) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-32.pth.tar', 8.073999998779296) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-31.pth.tar', 7.7920000067138675) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-29.pth.tar', 7.029999998168945) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-30.pth.tar', 6.275999995422363) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-26.pth.tar', 6.208000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-28.pth.tar', 6.059999989929199) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-25.pth.tar', 5.836000000305176) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-27.pth.tar', 5.806000002593994) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-24.pth.tar', 5.459999994964599) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-23.pth.tar', 4.933999999542237) + +Train: 33 [ 0/312 ( 0%)] Loss: 6.64 (6.64) Time: 1.524s, 672.12/s (1.524s, 672.12/s) LR: 3.541e-01 Data: 1.145 (1.145) +Train: 33 [ 50/312 ( 16%)] Loss: 6.66 (6.67) Time: 0.406s, 2521.57/s (0.427s, 2397.94/s) LR: 3.541e-01 Data: 0.028 (0.050) +Train: 33 [ 100/312 ( 32%)] Loss: 6.70 (6.67) Time: 0.409s, 2503.35/s (0.417s, 2454.73/s) LR: 3.541e-01 Data: 0.029 (0.039) +Train: 33 [ 150/312 ( 48%)] Loss: 6.71 (6.68) Time: 0.408s, 2510.13/s (0.414s, 2471.91/s) LR: 3.541e-01 Data: 0.027 (0.035) +Train: 33 [ 200/312 ( 64%)] Loss: 6.74 (6.68) Time: 0.406s, 2524.96/s (0.413s, 2480.76/s) LR: 3.541e-01 Data: 0.027 (0.033) +Train: 33 [ 250/312 ( 80%)] Loss: 6.71 (6.69) Time: 0.407s, 2516.45/s (0.412s, 2487.71/s) LR: 3.541e-01 Data: 0.026 (0.032) +Train: 33 [ 300/312 ( 96%)] Loss: 6.71 (6.69) Time: 0.408s, 2509.08/s (0.411s, 2490.44/s) LR: 3.541e-01 Data: 0.026 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.424 (1.424) Loss: 5.932 ( 5.932) Acc@1: 7.422 ( 7.422) Acc@5: 20.996 ( 20.996) +Test: [ 48/48] Time: 0.090 (0.321) Loss: 5.846 ( 5.899) Acc@1: 9.906 ( 8.604) Acc@5: 22.642 ( 21.800) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-33.pth.tar', 8.60400000427246) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-32.pth.tar', 8.073999998779296) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-31.pth.tar', 7.7920000067138675) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-29.pth.tar', 7.029999998168945) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-30.pth.tar', 6.275999995422363) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-26.pth.tar', 6.208000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-28.pth.tar', 6.059999989929199) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-25.pth.tar', 5.836000000305176) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-27.pth.tar', 5.806000002593994) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-24.pth.tar', 5.459999994964599) + +Train: 34 [ 0/312 ( 0%)] Loss: 6.60 (6.60) Time: 1.600s, 640.19/s (1.600s, 640.19/s) LR: 3.514e-01 Data: 1.227 (1.227) +Train: 34 [ 50/312 ( 16%)] Loss: 6.68 (6.65) Time: 0.404s, 2533.83/s (0.427s, 2396.41/s) LR: 3.514e-01 Data: 0.027 (0.051) +Train: 34 [ 100/312 ( 32%)] Loss: 6.67 (6.66) Time: 0.405s, 2530.20/s (0.416s, 2460.32/s) LR: 3.514e-01 Data: 0.027 (0.040) +Train: 34 [ 150/312 ( 48%)] Loss: 6.69 (6.67) Time: 0.411s, 2491.89/s (0.413s, 2477.53/s) LR: 3.514e-01 Data: 0.028 (0.036) +Train: 34 [ 200/312 ( 64%)] Loss: 6.66 (6.67) Time: 0.403s, 2540.60/s (0.412s, 2486.43/s) LR: 3.514e-01 Data: 0.027 (0.034) +Train: 34 [ 250/312 ( 80%)] Loss: 6.70 (6.68) Time: 0.401s, 2556.19/s (0.410s, 2498.33/s) LR: 3.514e-01 Data: 0.028 (0.033) +Train: 34 [ 300/312 ( 96%)] Loss: 6.71 (6.68) Time: 0.399s, 2568.70/s (0.408s, 2508.87/s) LR: 3.514e-01 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.421 (1.421) Loss: 5.886 ( 5.886) Acc@1: 9.766 ( 9.766) Acc@5: 22.363 ( 22.363) +Test: [ 48/48] Time: 0.088 (0.318) Loss: 5.785 ( 5.869) Acc@1: 11.557 ( 9.224) Acc@5: 24.764 ( 22.868) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-34.pth.tar', 9.224000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-33.pth.tar', 8.60400000427246) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-32.pth.tar', 8.073999998779296) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-31.pth.tar', 7.7920000067138675) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-29.pth.tar', 7.029999998168945) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-30.pth.tar', 6.275999995422363) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-26.pth.tar', 6.208000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-28.pth.tar', 6.059999989929199) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-25.pth.tar', 5.836000000305176) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-27.pth.tar', 5.806000002593994) + +Train: 35 [ 0/312 ( 0%)] Loss: 6.65 (6.65) Time: 1.641s, 624.10/s (1.641s, 624.10/s) LR: 3.486e-01 Data: 1.177 (1.177) +Train: 35 [ 50/312 ( 16%)] Loss: 6.65 (6.64) Time: 0.398s, 2573.71/s (0.422s, 2426.66/s) LR: 3.486e-01 Data: 0.028 (0.050) +Train: 35 [ 100/312 ( 32%)] Loss: 6.69 (6.65) Time: 0.398s, 2573.44/s (0.410s, 2494.89/s) LR: 3.486e-01 Data: 0.028 (0.039) +Train: 35 [ 150/312 ( 48%)] Loss: 6.65 (6.66) Time: 0.400s, 2560.99/s (0.407s, 2516.30/s) LR: 3.486e-01 Data: 0.028 (0.035) +Train: 35 [ 200/312 ( 64%)] Loss: 6.69 (6.66) Time: 0.402s, 2546.92/s (0.406s, 2524.64/s) LR: 3.486e-01 Data: 0.029 (0.033) +Train: 35 [ 250/312 ( 80%)] Loss: 6.70 (6.67) Time: 0.404s, 2533.99/s (0.405s, 2527.06/s) LR: 3.486e-01 Data: 0.028 (0.032) +Train: 35 [ 300/312 ( 96%)] Loss: 6.68 (6.67) Time: 0.408s, 2512.86/s (0.405s, 2526.28/s) LR: 3.486e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.411 (1.411) Loss: 5.852 ( 5.852) Acc@1: 8.398 ( 8.398) Acc@5: 23.047 ( 23.047) +Test: [ 48/48] Time: 0.092 (0.318) Loss: 5.748 ( 5.823) Acc@1: 10.849 ( 9.544) Acc@5: 25.943 ( 23.502) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-35.pth.tar', 9.543999993896485) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-34.pth.tar', 9.224000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-33.pth.tar', 8.60400000427246) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-32.pth.tar', 8.073999998779296) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-31.pth.tar', 7.7920000067138675) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-29.pth.tar', 7.029999998168945) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-30.pth.tar', 6.275999995422363) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-26.pth.tar', 6.208000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-28.pth.tar', 6.059999989929199) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-25.pth.tar', 5.836000000305176) + +Train: 36 [ 0/312 ( 0%)] Loss: 6.63 (6.63) Time: 1.540s, 664.83/s (1.540s, 664.83/s) LR: 3.458e-01 Data: 1.165 (1.165) +Train: 36 [ 50/312 ( 16%)] Loss: 6.65 (6.63) Time: 0.406s, 2524.59/s (0.431s, 2373.29/s) LR: 3.458e-01 Data: 0.028 (0.050) +Train: 36 [ 100/312 ( 32%)] Loss: 6.69 (6.64) Time: 0.405s, 2528.26/s (0.419s, 2444.43/s) LR: 3.458e-01 Data: 0.029 (0.039) +Train: 36 [ 150/312 ( 48%)] Loss: 6.67 (6.64) Time: 0.406s, 2519.60/s (0.414s, 2470.54/s) LR: 3.458e-01 Data: 0.027 (0.035) +Train: 36 [ 200/312 ( 64%)] Loss: 6.67 (6.65) Time: 0.410s, 2500.35/s (0.413s, 2480.36/s) LR: 3.458e-01 Data: 0.028 (0.033) +Train: 36 [ 250/312 ( 80%)] Loss: 6.65 (6.66) Time: 0.407s, 2516.83/s (0.412s, 2485.87/s) LR: 3.458e-01 Data: 0.027 (0.032) +Train: 36 [ 300/312 ( 96%)] Loss: 6.66 (6.66) Time: 0.407s, 2518.72/s (0.411s, 2489.77/s) LR: 3.458e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.400 (1.400) Loss: 5.931 ( 5.931) Acc@1: 8.789 ( 8.789) Acc@5: 21.680 ( 21.680) +Test: [ 48/48] Time: 0.091 (0.320) Loss: 5.840 ( 5.896) Acc@1: 10.849 ( 9.172) Acc@5: 24.764 ( 22.804) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-35.pth.tar', 9.543999993896485) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-34.pth.tar', 9.224000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-36.pth.tar', 9.172000026245117) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-33.pth.tar', 8.60400000427246) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-32.pth.tar', 8.073999998779296) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-31.pth.tar', 7.7920000067138675) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-29.pth.tar', 7.029999998168945) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-30.pth.tar', 6.275999995422363) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-26.pth.tar', 6.208000006103515) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-28.pth.tar', 6.059999989929199) + +Train: 37 [ 0/312 ( 0%)] Loss: 6.64 (6.64) Time: 1.476s, 693.71/s (1.476s, 693.71/s) LR: 3.429e-01 Data: 1.101 (1.101) +Train: 37 [ 50/312 ( 16%)] Loss: 6.61 (6.61) Time: 0.407s, 2516.67/s (0.430s, 2383.40/s) LR: 3.429e-01 Data: 0.027 (0.048) +Train: 37 [ 100/312 ( 32%)] Loss: 6.65 (6.62) Time: 0.406s, 2519.44/s (0.418s, 2450.88/s) LR: 3.429e-01 Data: 0.027 (0.038) +Train: 37 [ 150/312 ( 48%)] Loss: 6.66 (6.63) Time: 0.403s, 2540.06/s (0.414s, 2475.41/s) LR: 3.429e-01 Data: 0.027 (0.035) +Train: 37 [ 200/312 ( 64%)] Loss: 6.69 (6.64) Time: 0.406s, 2521.77/s (0.412s, 2486.28/s) LR: 3.429e-01 Data: 0.027 (0.033) +Train: 37 [ 250/312 ( 80%)] Loss: 6.65 (6.64) Time: 0.411s, 2492.19/s (0.411s, 2490.02/s) LR: 3.429e-01 Data: 0.029 (0.032) +Train: 37 [ 300/312 ( 96%)] Loss: 6.67 (6.64) Time: 0.405s, 2529.82/s (0.411s, 2492.80/s) LR: 3.429e-01 Data: 0.026 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 5.879 ( 5.879) Acc@1: 10.352 ( 10.352) Acc@5: 22.754 ( 22.754) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 5.823 ( 5.847) Acc@1: 9.198 ( 9.500) Acc@5: 24.882 ( 23.014) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-35.pth.tar', 9.543999993896485) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-37.pth.tar', 9.500000003967285) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-34.pth.tar', 9.224000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-36.pth.tar', 9.172000026245117) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-33.pth.tar', 8.60400000427246) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-32.pth.tar', 8.073999998779296) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-31.pth.tar', 7.7920000067138675) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-29.pth.tar', 7.029999998168945) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-30.pth.tar', 6.275999995422363) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-26.pth.tar', 6.208000006103515) + +Train: 38 [ 0/312 ( 0%)] Loss: 6.60 (6.60) Time: 1.586s, 645.52/s (1.586s, 645.52/s) LR: 3.399e-01 Data: 1.215 (1.215) +Train: 38 [ 50/312 ( 16%)] Loss: 6.57 (6.59) Time: 0.404s, 2534.57/s (0.428s, 2393.89/s) LR: 3.399e-01 Data: 0.026 (0.051) +Train: 38 [ 100/312 ( 32%)] Loss: 6.61 (6.61) Time: 0.408s, 2511.91/s (0.417s, 2454.29/s) LR: 3.399e-01 Data: 0.028 (0.039) +Train: 38 [ 150/312 ( 48%)] Loss: 6.68 (6.62) Time: 0.408s, 2509.99/s (0.414s, 2471.99/s) LR: 3.399e-01 Data: 0.025 (0.035) +Train: 38 [ 200/312 ( 64%)] Loss: 6.67 (6.62) Time: 0.407s, 2514.65/s (0.412s, 2483.55/s) LR: 3.399e-01 Data: 0.027 (0.033) +Train: 38 [ 250/312 ( 80%)] Loss: 6.68 (6.63) Time: 0.407s, 2515.88/s (0.411s, 2490.82/s) LR: 3.399e-01 Data: 0.028 (0.032) +Train: 38 [ 300/312 ( 96%)] Loss: 6.64 (6.63) Time: 0.410s, 2499.40/s (0.411s, 2493.80/s) LR: 3.399e-01 Data: 0.030 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.406 (1.406) Loss: 5.848 ( 5.848) Acc@1: 8.691 ( 8.691) Acc@5: 22.852 ( 22.852) +Test: [ 48/48] Time: 0.090 (0.322) Loss: 5.752 ( 5.819) Acc@1: 11.557 ( 9.544) Acc@5: 26.651 ( 23.128) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-38.pth.tar', 9.544000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-35.pth.tar', 9.543999993896485) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-37.pth.tar', 9.500000003967285) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-34.pth.tar', 9.224000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-36.pth.tar', 9.172000026245117) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-33.pth.tar', 8.60400000427246) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-32.pth.tar', 8.073999998779296) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-31.pth.tar', 7.7920000067138675) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-29.pth.tar', 7.029999998168945) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-30.pth.tar', 6.275999995422363) + +Train: 39 [ 0/312 ( 0%)] Loss: 6.54 (6.54) Time: 1.612s, 635.37/s (1.612s, 635.37/s) LR: 3.369e-01 Data: 1.060 (1.060) +Train: 39 [ 50/312 ( 16%)] Loss: 6.60 (6.59) Time: 0.407s, 2514.18/s (0.430s, 2378.69/s) LR: 3.369e-01 Data: 0.027 (0.047) +Train: 39 [ 100/312 ( 32%)] Loss: 6.59 (6.59) Time: 0.405s, 2526.92/s (0.419s, 2441.16/s) LR: 3.369e-01 Data: 0.029 (0.038) +Train: 39 [ 150/312 ( 48%)] Loss: 6.58 (6.60) Time: 0.403s, 2539.75/s (0.415s, 2469.55/s) LR: 3.369e-01 Data: 0.028 (0.035) +Train: 39 [ 200/312 ( 64%)] Loss: 6.64 (6.61) Time: 0.403s, 2541.93/s (0.412s, 2484.72/s) LR: 3.369e-01 Data: 0.026 (0.033) +Train: 39 [ 250/312 ( 80%)] Loss: 6.64 (6.61) Time: 0.407s, 2518.16/s (0.411s, 2492.48/s) LR: 3.369e-01 Data: 0.028 (0.032) +Train: 39 [ 300/312 ( 96%)] Loss: 6.63 (6.62) Time: 0.412s, 2486.57/s (0.410s, 2495.38/s) LR: 3.369e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.416 (1.416) Loss: 5.855 ( 5.855) Acc@1: 8.887 ( 8.887) Acc@5: 22.754 ( 22.754) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 5.747 ( 5.800) Acc@1: 10.731 ( 10.008) Acc@5: 25.000 ( 23.462) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-39.pth.tar', 10.008000007324219) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-38.pth.tar', 9.544000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-35.pth.tar', 9.543999993896485) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-37.pth.tar', 9.500000003967285) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-34.pth.tar', 9.224000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-36.pth.tar', 9.172000026245117) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-33.pth.tar', 8.60400000427246) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-32.pth.tar', 8.073999998779296) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-31.pth.tar', 7.7920000067138675) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-29.pth.tar', 7.029999998168945) + +Train: 40 [ 0/312 ( 0%)] Loss: 6.56 (6.56) Time: 1.756s, 583.02/s (1.756s, 583.02/s) LR: 3.338e-01 Data: 1.380 (1.380) +Train: 40 [ 50/312 ( 16%)] Loss: 6.57 (6.57) Time: 0.410s, 2498.97/s (0.434s, 2358.17/s) LR: 3.338e-01 Data: 0.029 (0.055) +Train: 40 [ 100/312 ( 32%)] Loss: 6.61 (6.58) Time: 0.413s, 2478.95/s (0.422s, 2427.75/s) LR: 3.338e-01 Data: 0.032 (0.042) +Train: 40 [ 150/312 ( 48%)] Loss: 6.64 (6.59) Time: 0.406s, 2525.00/s (0.417s, 2457.94/s) LR: 3.338e-01 Data: 0.027 (0.037) +Train: 40 [ 200/312 ( 64%)] Loss: 6.64 (6.60) Time: 0.408s, 2508.91/s (0.414s, 2471.49/s) LR: 3.338e-01 Data: 0.029 (0.035) +Train: 40 [ 250/312 ( 80%)] Loss: 6.62 (6.60) Time: 0.408s, 2509.61/s (0.413s, 2478.51/s) LR: 3.338e-01 Data: 0.026 (0.033) +Train: 40 [ 300/312 ( 96%)] Loss: 6.66 (6.61) Time: 0.407s, 2515.62/s (0.412s, 2483.49/s) LR: 3.338e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.409 (1.409) Loss: 5.807 ( 5.807) Acc@1: 11.035 ( 11.035) Acc@5: 23.926 ( 23.926) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 5.723 ( 5.762) Acc@1: 10.731 ( 10.276) Acc@5: 25.236 ( 24.406) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-40.pth.tar', 10.27600000732422) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-39.pth.tar', 10.008000007324219) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-38.pth.tar', 9.544000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-35.pth.tar', 9.543999993896485) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-37.pth.tar', 9.500000003967285) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-34.pth.tar', 9.224000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-36.pth.tar', 9.172000026245117) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-33.pth.tar', 8.60400000427246) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-32.pth.tar', 8.073999998779296) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-31.pth.tar', 7.7920000067138675) + +Train: 41 [ 0/312 ( 0%)] Loss: 6.54 (6.54) Time: 1.840s, 556.42/s (1.840s, 556.42/s) LR: 3.307e-01 Data: 1.467 (1.467) +Train: 41 [ 50/312 ( 16%)] Loss: 6.55 (6.55) Time: 0.408s, 2510.52/s (0.436s, 2350.50/s) LR: 3.307e-01 Data: 0.029 (0.056) +Train: 41 [ 100/312 ( 32%)] Loss: 6.59 (6.56) Time: 0.408s, 2512.53/s (0.422s, 2427.46/s) LR: 3.307e-01 Data: 0.027 (0.042) +Train: 41 [ 150/312 ( 48%)] Loss: 6.60 (6.57) Time: 0.406s, 2524.22/s (0.417s, 2454.50/s) LR: 3.307e-01 Data: 0.028 (0.037) +Train: 41 [ 200/312 ( 64%)] Loss: 6.60 (6.58) Time: 0.406s, 2519.33/s (0.415s, 2468.58/s) LR: 3.307e-01 Data: 0.027 (0.035) +Train: 41 [ 250/312 ( 80%)] Loss: 6.62 (6.59) Time: 0.406s, 2522.32/s (0.413s, 2477.93/s) LR: 3.307e-01 Data: 0.029 (0.034) +Train: 41 [ 300/312 ( 96%)] Loss: 6.64 (6.59) Time: 0.407s, 2514.61/s (0.412s, 2484.39/s) LR: 3.307e-01 Data: 0.028 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.400 (1.400) Loss: 5.853 ( 5.853) Acc@1: 10.742 ( 10.742) Acc@5: 23.730 ( 23.730) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 5.721 ( 5.809) Acc@1: 12.146 ( 10.330) Acc@5: 27.358 ( 24.458) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-41.pth.tar', 10.33000000793457) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-40.pth.tar', 10.27600000732422) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-39.pth.tar', 10.008000007324219) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-38.pth.tar', 9.544000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-35.pth.tar', 9.543999993896485) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-37.pth.tar', 9.500000003967285) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-34.pth.tar', 9.224000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-36.pth.tar', 9.172000026245117) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-33.pth.tar', 8.60400000427246) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-32.pth.tar', 8.073999998779296) + +Train: 42 [ 0/312 ( 0%)] Loss: 6.57 (6.57) Time: 1.983s, 516.29/s (1.983s, 516.29/s) LR: 3.275e-01 Data: 1.609 (1.609) +Train: 42 [ 50/312 ( 16%)] Loss: 6.57 (6.54) Time: 0.406s, 2523.58/s (0.439s, 2334.16/s) LR: 3.275e-01 Data: 0.027 (0.059) +Train: 42 [ 100/312 ( 32%)] Loss: 6.55 (6.55) Time: 0.412s, 2488.03/s (0.422s, 2424.67/s) LR: 3.275e-01 Data: 0.035 (0.043) +Train: 42 [ 150/312 ( 48%)] Loss: 6.54 (6.56) Time: 0.405s, 2527.04/s (0.417s, 2458.36/s) LR: 3.275e-01 Data: 0.027 (0.038) +Train: 42 [ 200/312 ( 64%)] Loss: 6.56 (6.57) Time: 0.408s, 2508.70/s (0.414s, 2473.17/s) LR: 3.275e-01 Data: 0.029 (0.036) +Train: 42 [ 250/312 ( 80%)] Loss: 6.60 (6.57) Time: 0.406s, 2524.92/s (0.413s, 2480.44/s) LR: 3.275e-01 Data: 0.027 (0.034) +Train: 42 [ 300/312 ( 96%)] Loss: 6.59 (6.58) Time: 0.405s, 2527.38/s (0.412s, 2488.29/s) LR: 3.275e-01 Data: 0.028 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 5.741 ( 5.741) Acc@1: 11.816 ( 11.816) Acc@5: 25.391 ( 25.391) +Test: [ 48/48] Time: 0.090 (0.317) Loss: 5.624 ( 5.719) Acc@1: 12.854 ( 11.374) Acc@5: 26.297 ( 25.768) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-42.pth.tar', 11.374000008239745) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-41.pth.tar', 10.33000000793457) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-40.pth.tar', 10.27600000732422) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-39.pth.tar', 10.008000007324219) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-38.pth.tar', 9.544000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-35.pth.tar', 9.543999993896485) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-37.pth.tar', 9.500000003967285) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-34.pth.tar', 9.224000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-36.pth.tar', 9.172000026245117) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-33.pth.tar', 8.60400000427246) + +Train: 43 [ 0/312 ( 0%)] Loss: 6.51 (6.51) Time: 1.876s, 545.88/s (1.876s, 545.88/s) LR: 3.242e-01 Data: 1.505 (1.505) +Train: 43 [ 50/312 ( 16%)] Loss: 6.51 (6.52) Time: 0.408s, 2509.05/s (0.432s, 2369.26/s) LR: 3.242e-01 Data: 0.032 (0.056) +Train: 43 [ 100/312 ( 32%)] Loss: 6.56 (6.54) Time: 0.409s, 2504.48/s (0.419s, 2443.66/s) LR: 3.242e-01 Data: 0.028 (0.042) +Train: 43 [ 150/312 ( 48%)] Loss: 6.54 (6.55) Time: 0.409s, 2501.22/s (0.415s, 2464.54/s) LR: 3.242e-01 Data: 0.029 (0.037) +Train: 43 [ 200/312 ( 64%)] Loss: 6.55 (6.55) Time: 0.408s, 2510.89/s (0.413s, 2476.48/s) LR: 3.242e-01 Data: 0.027 (0.035) +Train: 43 [ 250/312 ( 80%)] Loss: 6.59 (6.56) Time: 0.411s, 2493.54/s (0.412s, 2482.60/s) LR: 3.242e-01 Data: 0.028 (0.034) +Train: 43 [ 300/312 ( 96%)] Loss: 6.59 (6.57) Time: 0.407s, 2516.27/s (0.412s, 2486.51/s) LR: 3.242e-01 Data: 0.027 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.419 (1.419) Loss: 5.761 ( 5.761) Acc@1: 11.328 ( 11.328) Acc@5: 24.512 ( 24.512) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 5.686 ( 5.754) Acc@1: 11.321 ( 10.942) Acc@5: 28.184 ( 25.296) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-42.pth.tar', 11.374000008239745) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-43.pth.tar', 10.942000004882813) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-41.pth.tar', 10.33000000793457) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-40.pth.tar', 10.27600000732422) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-39.pth.tar', 10.008000007324219) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-38.pth.tar', 9.544000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-35.pth.tar', 9.543999993896485) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-37.pth.tar', 9.500000003967285) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-34.pth.tar', 9.224000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-36.pth.tar', 9.172000026245117) + +Train: 44 [ 0/312 ( 0%)] Loss: 6.53 (6.53) Time: 1.609s, 636.27/s (1.609s, 636.27/s) LR: 3.209e-01 Data: 1.207 (1.207) +Train: 44 [ 50/312 ( 16%)] Loss: 6.56 (6.50) Time: 0.406s, 2520.47/s (0.432s, 2369.48/s) LR: 3.209e-01 Data: 0.028 (0.051) +Train: 44 [ 100/312 ( 32%)] Loss: 6.52 (6.52) Time: 0.413s, 2478.64/s (0.420s, 2437.68/s) LR: 3.209e-01 Data: 0.033 (0.039) +Train: 44 [ 150/312 ( 48%)] Loss: 6.53 (6.53) Time: 0.410s, 2494.71/s (0.416s, 2460.05/s) LR: 3.209e-01 Data: 0.032 (0.036) +Train: 44 [ 200/312 ( 64%)] Loss: 6.58 (6.54) Time: 0.406s, 2522.43/s (0.414s, 2474.37/s) LR: 3.209e-01 Data: 0.026 (0.034) +Train: 44 [ 250/312 ( 80%)] Loss: 6.59 (6.54) Time: 0.407s, 2518.68/s (0.413s, 2481.88/s) LR: 3.209e-01 Data: 0.027 (0.032) +Train: 44 [ 300/312 ( 96%)] Loss: 6.59 (6.55) Time: 0.410s, 2496.15/s (0.412s, 2486.81/s) LR: 3.209e-01 Data: 0.032 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.414 (1.414) Loss: 5.717 ( 5.717) Acc@1: 11.230 ( 11.230) Acc@5: 25.098 ( 25.098) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 5.660 ( 5.727) Acc@1: 11.910 ( 10.870) Acc@5: 27.123 ( 24.916) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-42.pth.tar', 11.374000008239745) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-43.pth.tar', 10.942000004882813) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-44.pth.tar', 10.870000002441406) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-41.pth.tar', 10.33000000793457) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-40.pth.tar', 10.27600000732422) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-39.pth.tar', 10.008000007324219) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-38.pth.tar', 9.544000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-35.pth.tar', 9.543999993896485) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-37.pth.tar', 9.500000003967285) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-34.pth.tar', 9.224000010375976) + +Train: 45 [ 0/312 ( 0%)] Loss: 6.44 (6.44) Time: 2.051s, 499.28/s (2.051s, 499.28/s) LR: 3.176e-01 Data: 1.677 (1.677) +Train: 45 [ 50/312 ( 16%)] Loss: 6.48 (6.49) Time: 0.408s, 2512.42/s (0.439s, 2330.44/s) LR: 3.176e-01 Data: 0.027 (0.060) +Train: 45 [ 100/312 ( 32%)] Loss: 6.54 (6.50) Time: 0.409s, 2505.42/s (0.424s, 2416.31/s) LR: 3.176e-01 Data: 0.029 (0.044) +Train: 45 [ 150/312 ( 48%)] Loss: 6.55 (6.51) Time: 0.408s, 2509.86/s (0.419s, 2446.04/s) LR: 3.176e-01 Data: 0.028 (0.039) +Train: 45 [ 200/312 ( 64%)] Loss: 6.58 (6.52) Time: 0.405s, 2526.33/s (0.416s, 2462.59/s) LR: 3.176e-01 Data: 0.026 (0.036) +Train: 45 [ 250/312 ( 80%)] Loss: 6.53 (6.53) Time: 0.406s, 2519.12/s (0.414s, 2472.38/s) LR: 3.176e-01 Data: 0.026 (0.034) +Train: 45 [ 300/312 ( 96%)] Loss: 6.58 (6.54) Time: 0.406s, 2523.84/s (0.413s, 2478.79/s) LR: 3.176e-01 Data: 0.027 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.444 (1.444) Loss: 5.732 ( 5.732) Acc@1: 10.938 ( 10.938) Acc@5: 24.023 ( 24.023) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 5.639 ( 5.701) Acc@1: 11.910 ( 11.202) Acc@5: 28.066 ( 25.800) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-42.pth.tar', 11.374000008239745) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-45.pth.tar', 11.202000002441407) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-43.pth.tar', 10.942000004882813) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-44.pth.tar', 10.870000002441406) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-41.pth.tar', 10.33000000793457) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-40.pth.tar', 10.27600000732422) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-39.pth.tar', 10.008000007324219) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-38.pth.tar', 9.544000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-35.pth.tar', 9.543999993896485) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-37.pth.tar', 9.500000003967285) + +Train: 46 [ 0/312 ( 0%)] Loss: 6.45 (6.45) Time: 1.619s, 632.57/s (1.619s, 632.57/s) LR: 3.141e-01 Data: 1.245 (1.245) +Train: 46 [ 50/312 ( 16%)] Loss: 6.51 (6.48) Time: 0.409s, 2505.55/s (0.432s, 2370.78/s) LR: 3.141e-01 Data: 0.028 (0.052) +Train: 46 [ 100/312 ( 32%)] Loss: 6.53 (6.49) Time: 0.405s, 2525.59/s (0.420s, 2440.96/s) LR: 3.141e-01 Data: 0.028 (0.040) +Train: 46 [ 150/312 ( 48%)] Loss: 6.49 (6.50) Time: 0.409s, 2503.13/s (0.415s, 2467.89/s) LR: 3.141e-01 Data: 0.032 (0.036) +Train: 46 [ 200/312 ( 64%)] Loss: 6.53 (6.51) Time: 0.408s, 2511.46/s (0.413s, 2479.39/s) LR: 3.141e-01 Data: 0.027 (0.034) +Train: 46 [ 250/312 ( 80%)] Loss: 6.52 (6.52) Time: 0.404s, 2537.58/s (0.412s, 2485.25/s) LR: 3.141e-01 Data: 0.026 (0.033) +Train: 46 [ 300/312 ( 96%)] Loss: 6.52 (6.52) Time: 0.409s, 2502.86/s (0.411s, 2490.07/s) LR: 3.141e-01 Data: 0.031 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.422 (1.422) Loss: 5.702 ( 5.702) Acc@1: 11.230 ( 11.230) Acc@5: 25.977 ( 25.977) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 5.583 ( 5.657) Acc@1: 12.028 ( 11.672) Acc@5: 28.538 ( 25.892) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-46.pth.tar', 11.672000005187988) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-42.pth.tar', 11.374000008239745) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-45.pth.tar', 11.202000002441407) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-43.pth.tar', 10.942000004882813) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-44.pth.tar', 10.870000002441406) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-41.pth.tar', 10.33000000793457) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-40.pth.tar', 10.27600000732422) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-39.pth.tar', 10.008000007324219) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-38.pth.tar', 9.544000010375976) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-35.pth.tar', 9.543999993896485) + +Train: 47 [ 0/312 ( 0%)] Loss: 6.47 (6.47) Time: 1.687s, 607.06/s (1.687s, 607.06/s) LR: 3.107e-01 Data: 1.313 (1.313) +Train: 47 [ 50/312 ( 16%)] Loss: 6.50 (6.46) Time: 0.406s, 2520.11/s (0.434s, 2361.62/s) LR: 3.107e-01 Data: 0.028 (0.053) +Train: 47 [ 100/312 ( 32%)] Loss: 6.49 (6.48) Time: 0.402s, 2545.77/s (0.419s, 2442.93/s) LR: 3.107e-01 Data: 0.027 (0.041) +Train: 47 [ 150/312 ( 48%)] Loss: 6.52 (6.49) Time: 0.403s, 2543.28/s (0.414s, 2475.02/s) LR: 3.107e-01 Data: 0.027 (0.036) +Train: 47 [ 200/312 ( 64%)] Loss: 6.47 (6.49) Time: 0.406s, 2524.36/s (0.411s, 2491.77/s) LR: 3.107e-01 Data: 0.027 (0.034) +Train: 47 [ 250/312 ( 80%)] Loss: 6.62 (6.50) Time: 0.404s, 2536.20/s (0.409s, 2500.64/s) LR: 3.107e-01 Data: 0.028 (0.033) +Train: 47 [ 300/312 ( 96%)] Loss: 6.56 (6.51) Time: 0.406s, 2521.33/s (0.409s, 2504.86/s) LR: 3.107e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.403 (1.403) Loss: 5.685 ( 5.685) Acc@1: 12.305 ( 12.305) Acc@5: 25.586 ( 25.586) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 5.621 ( 5.661) Acc@1: 13.443 ( 12.092) Acc@5: 28.892 ( 26.750) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-47.pth.tar', 12.09200000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-46.pth.tar', 11.672000005187988) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-42.pth.tar', 11.374000008239745) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-45.pth.tar', 11.202000002441407) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-43.pth.tar', 10.942000004882813) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-44.pth.tar', 10.870000002441406) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-41.pth.tar', 10.33000000793457) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-40.pth.tar', 10.27600000732422) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-39.pth.tar', 10.008000007324219) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-38.pth.tar', 9.544000010375976) + +Train: 48 [ 0/312 ( 0%)] Loss: 6.45 (6.45) Time: 1.945s, 526.38/s (1.945s, 526.38/s) LR: 3.072e-01 Data: 1.122 (1.122) +Train: 48 [ 50/312 ( 16%)] Loss: 6.50 (6.44) Time: 0.404s, 2532.41/s (0.435s, 2355.49/s) LR: 3.072e-01 Data: 0.029 (0.049) +Train: 48 [ 100/312 ( 32%)] Loss: 6.50 (6.45) Time: 0.406s, 2523.63/s (0.419s, 2442.62/s) LR: 3.072e-01 Data: 0.027 (0.039) +Train: 48 [ 150/312 ( 48%)] Loss: 6.46 (6.46) Time: 0.408s, 2512.58/s (0.414s, 2470.56/s) LR: 3.072e-01 Data: 0.031 (0.035) +Train: 48 [ 200/312 ( 64%)] Loss: 6.53 (6.48) Time: 0.407s, 2513.67/s (0.413s, 2481.25/s) LR: 3.072e-01 Data: 0.026 (0.033) +Train: 48 [ 250/312 ( 80%)] Loss: 6.52 (6.49) Time: 0.407s, 2519.05/s (0.411s, 2488.64/s) LR: 3.072e-01 Data: 0.028 (0.032) +Train: 48 [ 300/312 ( 96%)] Loss: 6.54 (6.49) Time: 0.410s, 2497.68/s (0.410s, 2494.87/s) LR: 3.072e-01 Data: 0.031 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.426 (1.426) Loss: 5.771 ( 5.771) Acc@1: 12.598 ( 12.598) Acc@5: 25.000 ( 25.000) +Test: [ 48/48] Time: 0.091 (0.320) Loss: 5.651 ( 5.706) Acc@1: 12.736 ( 11.304) Acc@5: 27.005 ( 25.278) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-47.pth.tar', 12.09200000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-46.pth.tar', 11.672000005187988) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-42.pth.tar', 11.374000008239745) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-48.pth.tar', 11.304000005493164) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-45.pth.tar', 11.202000002441407) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-43.pth.tar', 10.942000004882813) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-44.pth.tar', 10.870000002441406) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-41.pth.tar', 10.33000000793457) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-40.pth.tar', 10.27600000732422) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-39.pth.tar', 10.008000007324219) + +Train: 49 [ 0/312 ( 0%)] Loss: 6.41 (6.41) Time: 1.697s, 603.27/s (1.697s, 603.27/s) LR: 3.036e-01 Data: 1.323 (1.323) +Train: 49 [ 50/312 ( 16%)] Loss: 6.37 (6.42) Time: 0.412s, 2482.71/s (0.434s, 2359.63/s) LR: 3.036e-01 Data: 0.027 (0.053) +Train: 49 [ 100/312 ( 32%)] Loss: 6.47 (6.44) Time: 0.404s, 2532.93/s (0.420s, 2435.67/s) LR: 3.036e-01 Data: 0.027 (0.040) +Train: 49 [ 150/312 ( 48%)] Loss: 6.45 (6.45) Time: 0.405s, 2530.90/s (0.416s, 2464.49/s) LR: 3.036e-01 Data: 0.027 (0.036) +Train: 49 [ 200/312 ( 64%)] Loss: 6.50 (6.46) Time: 0.412s, 2485.20/s (0.413s, 2476.76/s) LR: 3.036e-01 Data: 0.031 (0.034) +Train: 49 [ 250/312 ( 80%)] Loss: 6.50 (6.47) Time: 0.408s, 2511.84/s (0.412s, 2482.64/s) LR: 3.036e-01 Data: 0.029 (0.033) +Train: 49 [ 300/312 ( 96%)] Loss: 6.50 (6.48) Time: 0.406s, 2521.08/s (0.411s, 2488.76/s) LR: 3.036e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.408 (1.408) Loss: 5.735 ( 5.735) Acc@1: 11.816 ( 11.816) Acc@5: 25.781 ( 25.781) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 5.592 ( 5.690) Acc@1: 14.269 ( 12.074) Acc@5: 29.481 ( 26.322) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-47.pth.tar', 12.09200000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-49.pth.tar', 12.074000008850097) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-46.pth.tar', 11.672000005187988) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-42.pth.tar', 11.374000008239745) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-48.pth.tar', 11.304000005493164) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-45.pth.tar', 11.202000002441407) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-43.pth.tar', 10.942000004882813) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-44.pth.tar', 10.870000002441406) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-41.pth.tar', 10.33000000793457) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-40.pth.tar', 10.27600000732422) + +Train: 50 [ 0/312 ( 0%)] Loss: 6.37 (6.37) Time: 1.661s, 616.37/s (1.661s, 616.37/s) LR: 3.000e-01 Data: 1.286 (1.286) +Train: 50 [ 50/312 ( 16%)] Loss: 6.44 (6.40) Time: 0.407s, 2514.90/s (0.432s, 2372.80/s) LR: 3.000e-01 Data: 0.028 (0.053) +Train: 50 [ 100/312 ( 32%)] Loss: 6.49 (6.42) Time: 0.404s, 2533.45/s (0.419s, 2442.43/s) LR: 3.000e-01 Data: 0.028 (0.040) +Train: 50 [ 150/312 ( 48%)] Loss: 6.50 (6.44) Time: 0.404s, 2531.87/s (0.415s, 2470.25/s) LR: 3.000e-01 Data: 0.027 (0.036) +Train: 50 [ 200/312 ( 64%)] Loss: 6.47 (6.45) Time: 0.407s, 2513.67/s (0.412s, 2482.79/s) LR: 3.000e-01 Data: 0.023 (0.034) +Train: 50 [ 250/312 ( 80%)] Loss: 6.48 (6.46) Time: 0.406s, 2521.75/s (0.412s, 2488.28/s) LR: 3.000e-01 Data: 0.026 (0.033) +Train: 50 [ 300/312 ( 96%)] Loss: 6.50 (6.46) Time: 0.406s, 2521.65/s (0.411s, 2494.12/s) LR: 3.000e-01 Data: 0.023 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.409 (1.409) Loss: 5.655 ( 5.655) Acc@1: 12.305 ( 12.305) Acc@5: 26.367 ( 26.367) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 5.520 ( 5.614) Acc@1: 13.915 ( 12.048) Acc@5: 30.071 ( 26.536) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-47.pth.tar', 12.09200000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-49.pth.tar', 12.074000008850097) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-50.pth.tar', 12.048000000610351) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-46.pth.tar', 11.672000005187988) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-42.pth.tar', 11.374000008239745) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-48.pth.tar', 11.304000005493164) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-45.pth.tar', 11.202000002441407) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-43.pth.tar', 10.942000004882813) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-44.pth.tar', 10.870000002441406) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-41.pth.tar', 10.33000000793457) + +Train: 51 [ 0/312 ( 0%)] Loss: 6.40 (6.40) Time: 1.660s, 616.98/s (1.660s, 616.98/s) LR: 2.964e-01 Data: 1.288 (1.288) +Train: 51 [ 50/312 ( 16%)] Loss: 6.41 (6.38) Time: 0.406s, 2522.88/s (0.431s, 2376.95/s) LR: 2.964e-01 Data: 0.028 (0.053) +Train: 51 [ 100/312 ( 32%)] Loss: 6.46 (6.40) Time: 0.407s, 2518.99/s (0.420s, 2437.83/s) LR: 2.964e-01 Data: 0.029 (0.040) +Train: 51 [ 150/312 ( 48%)] Loss: 6.45 (6.42) Time: 0.404s, 2534.21/s (0.415s, 2465.48/s) LR: 2.964e-01 Data: 0.027 (0.036) +Train: 51 [ 200/312 ( 64%)] Loss: 6.44 (6.43) Time: 0.405s, 2530.48/s (0.413s, 2481.63/s) LR: 2.964e-01 Data: 0.029 (0.034) +Train: 51 [ 250/312 ( 80%)] Loss: 6.48 (6.44) Time: 0.406s, 2522.67/s (0.411s, 2490.66/s) LR: 2.964e-01 Data: 0.028 (0.033) +Train: 51 [ 300/312 ( 96%)] Loss: 6.49 (6.45) Time: 0.408s, 2511.38/s (0.411s, 2493.88/s) LR: 2.964e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.402 (1.402) Loss: 5.607 ( 5.607) Acc@1: 12.988 ( 12.988) Acc@5: 26.953 ( 26.953) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 5.497 ( 5.596) Acc@1: 13.443 ( 12.718) Acc@5: 29.481 ( 27.302) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-51.pth.tar', 12.71800000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-47.pth.tar', 12.09200000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-49.pth.tar', 12.074000008850097) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-50.pth.tar', 12.048000000610351) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-46.pth.tar', 11.672000005187988) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-42.pth.tar', 11.374000008239745) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-48.pth.tar', 11.304000005493164) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-45.pth.tar', 11.202000002441407) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-43.pth.tar', 10.942000004882813) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-44.pth.tar', 10.870000002441406) + +Train: 52 [ 0/312 ( 0%)] Loss: 6.35 (6.35) Time: 1.572s, 651.51/s (1.572s, 651.51/s) LR: 2.927e-01 Data: 1.113 (1.113) +Train: 52 [ 50/312 ( 16%)] Loss: 6.42 (6.36) Time: 0.402s, 2544.63/s (0.429s, 2386.89/s) LR: 2.927e-01 Data: 0.025 (0.049) +Train: 52 [ 100/312 ( 32%)] Loss: 6.41 (6.38) Time: 0.407s, 2518.37/s (0.417s, 2454.63/s) LR: 2.927e-01 Data: 0.028 (0.039) +Train: 52 [ 150/312 ( 48%)] Loss: 6.46 (6.40) Time: 0.408s, 2509.95/s (0.414s, 2473.08/s) LR: 2.927e-01 Data: 0.029 (0.035) +Train: 52 [ 200/312 ( 64%)] Loss: 6.41 (6.41) Time: 0.405s, 2528.40/s (0.412s, 2484.94/s) LR: 2.927e-01 Data: 0.028 (0.033) +Train: 52 [ 250/312 ( 80%)] Loss: 6.46 (6.42) Time: 0.405s, 2531.11/s (0.411s, 2492.53/s) LR: 2.927e-01 Data: 0.028 (0.032) +Train: 52 [ 300/312 ( 96%)] Loss: 6.49 (6.43) Time: 0.411s, 2490.22/s (0.410s, 2497.58/s) LR: 2.927e-01 Data: 0.034 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.423 (1.423) Loss: 5.666 ( 5.666) Acc@1: 11.621 ( 11.621) Acc@5: 27.051 ( 27.051) +Test: [ 48/48] Time: 0.090 (0.321) Loss: 5.529 ( 5.608) Acc@1: 14.623 ( 12.050) Acc@5: 28.066 ( 26.354) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-51.pth.tar', 12.71800000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-47.pth.tar', 12.09200000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-49.pth.tar', 12.074000008850097) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-52.pth.tar', 12.049999984741211) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-50.pth.tar', 12.048000000610351) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-46.pth.tar', 11.672000005187988) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-42.pth.tar', 11.374000008239745) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-48.pth.tar', 11.304000005493164) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-45.pth.tar', 11.202000002441407) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-43.pth.tar', 10.942000004882813) + +Train: 53 [ 0/312 ( 0%)] Loss: 6.37 (6.37) Time: 1.629s, 628.57/s (1.629s, 628.57/s) LR: 2.889e-01 Data: 1.220 (1.220) +Train: 53 [ 50/312 ( 16%)] Loss: 6.35 (6.35) Time: 0.407s, 2514.59/s (0.435s, 2351.53/s) LR: 2.889e-01 Data: 0.027 (0.054) +Train: 53 [ 100/312 ( 32%)] Loss: 6.37 (6.37) Time: 0.409s, 2502.95/s (0.422s, 2426.49/s) LR: 2.889e-01 Data: 0.027 (0.041) +Train: 53 [ 150/312 ( 48%)] Loss: 6.43 (6.38) Time: 0.408s, 2510.43/s (0.417s, 2453.22/s) LR: 2.889e-01 Data: 0.027 (0.037) +Train: 53 [ 200/312 ( 64%)] Loss: 6.43 (6.39) Time: 0.406s, 2522.02/s (0.415s, 2466.93/s) LR: 2.889e-01 Data: 0.027 (0.034) +Train: 53 [ 250/312 ( 80%)] Loss: 6.40 (6.40) Time: 0.409s, 2504.50/s (0.414s, 2474.76/s) LR: 2.889e-01 Data: 0.028 (0.033) +Train: 53 [ 300/312 ( 96%)] Loss: 6.43 (6.41) Time: 0.411s, 2494.14/s (0.413s, 2479.84/s) LR: 2.889e-01 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.423 (1.423) Loss: 5.679 ( 5.679) Acc@1: 11.523 ( 11.523) Acc@5: 24.512 ( 24.512) +Test: [ 48/48] Time: 0.090 (0.317) Loss: 5.580 ( 5.618) Acc@1: 12.382 ( 12.302) Acc@5: 28.302 ( 26.394) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-51.pth.tar', 12.71800000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-53.pth.tar', 12.301999997253418) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-47.pth.tar', 12.09200000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-49.pth.tar', 12.074000008850097) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-52.pth.tar', 12.049999984741211) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-50.pth.tar', 12.048000000610351) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-46.pth.tar', 11.672000005187988) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-42.pth.tar', 11.374000008239745) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-48.pth.tar', 11.304000005493164) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-45.pth.tar', 11.202000002441407) + +Train: 54 [ 0/312 ( 0%)] Loss: 6.29 (6.29) Time: 1.550s, 660.80/s (1.550s, 660.80/s) LR: 2.852e-01 Data: 1.177 (1.177) +Train: 54 [ 50/312 ( 16%)] Loss: 6.30 (6.33) Time: 0.408s, 2508.01/s (0.428s, 2395.23/s) LR: 2.852e-01 Data: 0.028 (0.050) +Train: 54 [ 100/312 ( 32%)] Loss: 6.44 (6.35) Time: 0.407s, 2515.36/s (0.417s, 2453.89/s) LR: 2.852e-01 Data: 0.028 (0.039) +Train: 54 [ 150/312 ( 48%)] Loss: 6.35 (6.36) Time: 0.406s, 2520.78/s (0.414s, 2471.93/s) LR: 2.852e-01 Data: 0.027 (0.035) +Train: 54 [ 200/312 ( 64%)] Loss: 6.42 (6.38) Time: 0.405s, 2530.55/s (0.412s, 2484.38/s) LR: 2.852e-01 Data: 0.028 (0.034) +Train: 54 [ 250/312 ( 80%)] Loss: 6.50 (6.39) Time: 0.405s, 2525.79/s (0.411s, 2491.44/s) LR: 2.852e-01 Data: 0.027 (0.032) +Train: 54 [ 300/312 ( 96%)] Loss: 6.42 (6.40) Time: 0.406s, 2522.11/s (0.410s, 2494.96/s) LR: 2.852e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.422 (1.422) Loss: 5.666 ( 5.666) Acc@1: 11.426 ( 11.426) Acc@5: 24.805 ( 24.805) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 5.552 ( 5.626) Acc@1: 13.797 ( 12.628) Acc@5: 27.712 ( 26.416) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-51.pth.tar', 12.71800000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-54.pth.tar', 12.628000014038086) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-53.pth.tar', 12.301999997253418) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-47.pth.tar', 12.09200000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-49.pth.tar', 12.074000008850097) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-52.pth.tar', 12.049999984741211) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-50.pth.tar', 12.048000000610351) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-46.pth.tar', 11.672000005187988) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-42.pth.tar', 11.374000008239745) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-48.pth.tar', 11.304000005493164) + +Train: 55 [ 0/312 ( 0%)] Loss: 6.29 (6.29) Time: 1.558s, 657.37/s (1.558s, 657.37/s) LR: 2.813e-01 Data: 1.181 (1.181) +Train: 55 [ 50/312 ( 16%)] Loss: 6.31 (6.32) Time: 0.406s, 2519.54/s (0.431s, 2377.58/s) LR: 2.813e-01 Data: 0.028 (0.050) +Train: 55 [ 100/312 ( 32%)] Loss: 6.39 (6.33) Time: 0.404s, 2535.09/s (0.418s, 2447.41/s) LR: 2.813e-01 Data: 0.028 (0.039) +Train: 55 [ 150/312 ( 48%)] Loss: 6.38 (6.35) Time: 0.405s, 2526.60/s (0.414s, 2473.09/s) LR: 2.813e-01 Data: 0.027 (0.035) +Train: 55 [ 200/312 ( 64%)] Loss: 6.44 (6.36) Time: 0.408s, 2511.37/s (0.412s, 2483.92/s) LR: 2.813e-01 Data: 0.028 (0.033) +Train: 55 [ 250/312 ( 80%)] Loss: 6.44 (6.37) Time: 0.409s, 2505.58/s (0.411s, 2489.32/s) LR: 2.813e-01 Data: 0.028 (0.032) +Train: 55 [ 300/312 ( 96%)] Loss: 6.40 (6.38) Time: 0.404s, 2533.76/s (0.411s, 2494.32/s) LR: 2.813e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.395 (1.395) Loss: 5.601 ( 5.601) Acc@1: 12.402 ( 12.402) Acc@5: 26.074 ( 26.074) +Test: [ 48/48] Time: 0.089 (0.319) Loss: 5.458 ( 5.565) Acc@1: 14.387 ( 12.590) Acc@5: 28.420 ( 27.050) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-51.pth.tar', 12.71800000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-54.pth.tar', 12.628000014038086) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-55.pth.tar', 12.590000011596679) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-53.pth.tar', 12.301999997253418) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-47.pth.tar', 12.09200000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-49.pth.tar', 12.074000008850097) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-52.pth.tar', 12.049999984741211) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-50.pth.tar', 12.048000000610351) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-46.pth.tar', 11.672000005187988) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-42.pth.tar', 11.374000008239745) + +Train: 56 [ 0/312 ( 0%)] Loss: 6.27 (6.27) Time: 2.033s, 503.72/s (2.033s, 503.72/s) LR: 2.775e-01 Data: 1.044 (1.044) +Train: 56 [ 50/312 ( 16%)] Loss: 6.28 (6.29) Time: 0.407s, 2513.06/s (0.436s, 2347.68/s) LR: 2.775e-01 Data: 0.030 (0.047) +Train: 56 [ 100/312 ( 32%)] Loss: 6.40 (6.31) Time: 0.407s, 2516.70/s (0.421s, 2430.24/s) LR: 2.775e-01 Data: 0.027 (0.037) +Train: 56 [ 150/312 ( 48%)] Loss: 6.39 (6.33) Time: 0.408s, 2509.00/s (0.417s, 2455.35/s) LR: 2.775e-01 Data: 0.027 (0.034) +Train: 56 [ 200/312 ( 64%)] Loss: 6.38 (6.34) Time: 0.403s, 2542.60/s (0.414s, 2470.57/s) LR: 2.775e-01 Data: 0.027 (0.033) +Train: 56 [ 250/312 ( 80%)] Loss: 6.47 (6.35) Time: 0.406s, 2523.16/s (0.413s, 2482.39/s) LR: 2.775e-01 Data: 0.027 (0.032) +Train: 56 [ 300/312 ( 96%)] Loss: 6.36 (6.36) Time: 0.408s, 2511.47/s (0.411s, 2489.16/s) LR: 2.775e-01 Data: 0.030 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.423 (1.423) Loss: 5.606 ( 5.606) Acc@1: 12.891 ( 12.891) Acc@5: 26.367 ( 26.367) +Test: [ 48/48] Time: 0.090 (0.323) Loss: 5.502 ( 5.570) Acc@1: 13.915 ( 12.786) Acc@5: 29.599 ( 27.398) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-56.pth.tar', 12.786000000610352) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-51.pth.tar', 12.71800000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-54.pth.tar', 12.628000014038086) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-55.pth.tar', 12.590000011596679) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-53.pth.tar', 12.301999997253418) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-47.pth.tar', 12.09200000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-49.pth.tar', 12.074000008850097) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-52.pth.tar', 12.049999984741211) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-50.pth.tar', 12.048000000610351) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-46.pth.tar', 11.672000005187988) + +Train: 57 [ 0/312 ( 0%)] Loss: 6.29 (6.29) Time: 1.556s, 658.11/s (1.556s, 658.11/s) LR: 2.736e-01 Data: 1.181 (1.181) +Train: 57 [ 50/312 ( 16%)] Loss: 6.29 (6.28) Time: 0.405s, 2525.76/s (0.429s, 2388.21/s) LR: 2.736e-01 Data: 0.027 (0.050) +Train: 57 [ 100/312 ( 32%)] Loss: 6.41 (6.30) Time: 0.408s, 2509.97/s (0.418s, 2448.73/s) LR: 2.736e-01 Data: 0.028 (0.039) +Train: 57 [ 150/312 ( 48%)] Loss: 6.43 (6.31) Time: 0.408s, 2506.90/s (0.415s, 2467.25/s) LR: 2.736e-01 Data: 0.029 (0.035) +Train: 57 [ 200/312 ( 64%)] Loss: 6.33 (6.32) Time: 0.403s, 2542.02/s (0.413s, 2480.48/s) LR: 2.736e-01 Data: 0.027 (0.033) +Train: 57 [ 250/312 ( 80%)] Loss: 6.45 (6.33) Time: 0.409s, 2503.71/s (0.411s, 2490.25/s) LR: 2.736e-01 Data: 0.033 (0.032) +Train: 57 [ 300/312 ( 96%)] Loss: 6.43 (6.34) Time: 0.406s, 2522.00/s (0.410s, 2496.33/s) LR: 2.736e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.394 (1.394) Loss: 5.664 ( 5.664) Acc@1: 10.449 ( 10.449) Acc@5: 26.758 ( 26.758) +Test: [ 48/48] Time: 0.091 (0.318) Loss: 5.510 ( 5.640) Acc@1: 14.269 ( 12.164) Acc@5: 30.189 ( 25.924) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-56.pth.tar', 12.786000000610352) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-51.pth.tar', 12.71800000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-54.pth.tar', 12.628000014038086) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-55.pth.tar', 12.590000011596679) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-53.pth.tar', 12.301999997253418) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-57.pth.tar', 12.163999992675782) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-47.pth.tar', 12.09200000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-49.pth.tar', 12.074000008850097) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-52.pth.tar', 12.049999984741211) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-50.pth.tar', 12.048000000610351) + +Train: 58 [ 0/312 ( 0%)] Loss: 6.26 (6.26) Time: 1.912s, 535.56/s (1.912s, 535.56/s) LR: 2.697e-01 Data: 1.537 (1.537) +Train: 58 [ 50/312 ( 16%)] Loss: 6.31 (6.25) Time: 0.404s, 2532.53/s (0.436s, 2348.54/s) LR: 2.697e-01 Data: 0.028 (0.057) +Train: 58 [ 100/312 ( 32%)] Loss: 6.34 (6.27) Time: 0.403s, 2537.84/s (0.420s, 2436.02/s) LR: 2.697e-01 Data: 0.028 (0.043) +Train: 58 [ 150/312 ( 48%)] Loss: 6.34 (6.29) Time: 0.405s, 2525.98/s (0.415s, 2464.77/s) LR: 2.697e-01 Data: 0.028 (0.038) +Train: 58 [ 200/312 ( 64%)] Loss: 6.32 (6.30) Time: 0.408s, 2510.64/s (0.413s, 2477.14/s) LR: 2.697e-01 Data: 0.028 (0.035) +Train: 58 [ 250/312 ( 80%)] Loss: 6.35 (6.31) Time: 0.405s, 2529.61/s (0.412s, 2484.38/s) LR: 2.697e-01 Data: 0.027 (0.034) +Train: 58 [ 300/312 ( 96%)] Loss: 6.41 (6.33) Time: 0.406s, 2521.43/s (0.411s, 2491.50/s) LR: 2.697e-01 Data: 0.029 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.410 (1.410) Loss: 5.600 ( 5.600) Acc@1: 13.086 ( 13.086) Acc@5: 27.148 ( 27.148) +Test: [ 48/48] Time: 0.089 (0.317) Loss: 5.424 ( 5.562) Acc@1: 15.684 ( 12.980) Acc@5: 31.840 ( 27.198) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-58.pth.tar', 12.980000009460449) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-56.pth.tar', 12.786000000610352) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-51.pth.tar', 12.71800000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-54.pth.tar', 12.628000014038086) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-55.pth.tar', 12.590000011596679) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-53.pth.tar', 12.301999997253418) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-57.pth.tar', 12.163999992675782) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-47.pth.tar', 12.09200000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-49.pth.tar', 12.074000008850097) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-52.pth.tar', 12.049999984741211) + +Train: 59 [ 0/312 ( 0%)] Loss: 6.27 (6.27) Time: 1.581s, 647.58/s (1.581s, 647.58/s) LR: 2.658e-01 Data: 1.209 (1.209) +Train: 59 [ 50/312 ( 16%)] Loss: 6.23 (6.24) Time: 0.409s, 2506.36/s (0.431s, 2378.27/s) LR: 2.658e-01 Data: 0.028 (0.052) +Train: 59 [ 100/312 ( 32%)] Loss: 6.35 (6.26) Time: 0.404s, 2531.74/s (0.419s, 2443.70/s) LR: 2.658e-01 Data: 0.027 (0.040) +Train: 59 [ 150/312 ( 48%)] Loss: 6.32 (6.27) Time: 0.403s, 2541.12/s (0.414s, 2473.66/s) LR: 2.658e-01 Data: 0.027 (0.036) +Train: 59 [ 200/312 ( 64%)] Loss: 6.33 (6.29) Time: 0.403s, 2539.92/s (0.411s, 2489.52/s) LR: 2.658e-01 Data: 0.028 (0.034) +Train: 59 [ 250/312 ( 80%)] Loss: 6.33 (6.30) Time: 0.405s, 2529.61/s (0.410s, 2498.07/s) LR: 2.658e-01 Data: 0.028 (0.033) +Train: 59 [ 300/312 ( 96%)] Loss: 6.35 (6.31) Time: 0.409s, 2506.07/s (0.409s, 2501.75/s) LR: 2.658e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.419 (1.419) Loss: 5.660 ( 5.660) Acc@1: 10.742 ( 10.742) Acc@5: 24.512 ( 24.512) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 5.525 ( 5.633) Acc@1: 12.736 ( 11.840) Acc@5: 27.476 ( 25.228) +Train: 60 [ 0/312 ( 0%)] Loss: 6.24 (6.24) Time: 1.594s, 642.30/s (1.594s, 642.30/s) LR: 2.618e-01 Data: 1.084 (1.084) +Train: 60 [ 50/312 ( 16%)] Loss: 6.26 (6.22) Time: 0.405s, 2526.30/s (0.431s, 2378.09/s) LR: 2.618e-01 Data: 0.026 (0.048) +Train: 60 [ 100/312 ( 32%)] Loss: 6.24 (6.23) Time: 0.403s, 2541.43/s (0.418s, 2449.40/s) LR: 2.618e-01 Data: 0.027 (0.038) +Train: 60 [ 150/312 ( 48%)] Loss: 6.34 (6.25) Time: 0.405s, 2528.27/s (0.414s, 2475.36/s) LR: 2.618e-01 Data: 0.027 (0.035) +Train: 60 [ 200/312 ( 64%)] Loss: 6.36 (6.26) Time: 0.407s, 2513.32/s (0.412s, 2486.39/s) LR: 2.618e-01 Data: 0.027 (0.033) +Train: 60 [ 250/312 ( 80%)] Loss: 6.35 (6.28) Time: 0.408s, 2510.29/s (0.411s, 2490.76/s) LR: 2.618e-01 Data: 0.028 (0.032) +Train: 60 [ 300/312 ( 96%)] Loss: 6.35 (6.29) Time: 0.405s, 2528.49/s (0.410s, 2494.99/s) LR: 2.618e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.411 (1.411) Loss: 5.653 ( 5.653) Acc@1: 11.035 ( 11.035) Acc@5: 25.195 ( 25.195) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 5.482 ( 5.580) Acc@1: 14.151 ( 12.810) Acc@5: 29.363 ( 26.630) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-58.pth.tar', 12.980000009460449) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-60.pth.tar', 12.810000006103516) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-56.pth.tar', 12.786000000610352) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-51.pth.tar', 12.71800000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-54.pth.tar', 12.628000014038086) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-55.pth.tar', 12.590000011596679) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-53.pth.tar', 12.301999997253418) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-57.pth.tar', 12.163999992675782) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-47.pth.tar', 12.09200000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-49.pth.tar', 12.074000008850097) + +Train: 61 [ 0/312 ( 0%)] Loss: 6.19 (6.19) Time: 1.571s, 651.86/s (1.571s, 651.86/s) LR: 2.578e-01 Data: 1.198 (1.198) +Train: 61 [ 50/312 ( 16%)] Loss: 6.21 (6.19) Time: 0.411s, 2492.71/s (0.427s, 2397.71/s) LR: 2.578e-01 Data: 0.034 (0.051) +Train: 61 [ 100/312 ( 32%)] Loss: 6.23 (6.21) Time: 0.408s, 2511.66/s (0.416s, 2458.65/s) LR: 2.578e-01 Data: 0.029 (0.040) +Train: 61 [ 150/312 ( 48%)] Loss: 6.28 (6.23) Time: 0.411s, 2489.20/s (0.414s, 2475.70/s) LR: 2.578e-01 Data: 0.029 (0.036) +Train: 61 [ 200/312 ( 64%)] Loss: 6.26 (6.24) Time: 0.405s, 2527.29/s (0.412s, 2486.04/s) LR: 2.578e-01 Data: 0.027 (0.034) +Train: 61 [ 250/312 ( 80%)] Loss: 6.32 (6.26) Time: 0.405s, 2528.17/s (0.410s, 2494.75/s) LR: 2.578e-01 Data: 0.028 (0.033) +Train: 61 [ 300/312 ( 96%)] Loss: 6.30 (6.27) Time: 0.408s, 2512.65/s (0.410s, 2500.11/s) LR: 2.578e-01 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.401 (1.401) Loss: 5.644 ( 5.644) Acc@1: 12.793 ( 12.793) Acc@5: 25.000 ( 25.000) +Test: [ 48/48] Time: 0.091 (0.318) Loss: 5.475 ( 5.616) Acc@1: 14.033 ( 12.156) Acc@5: 29.599 ( 25.974) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-58.pth.tar', 12.980000009460449) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-60.pth.tar', 12.810000006103516) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-56.pth.tar', 12.786000000610352) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-51.pth.tar', 12.71800000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-54.pth.tar', 12.628000014038086) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-55.pth.tar', 12.590000011596679) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-53.pth.tar', 12.301999997253418) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-57.pth.tar', 12.163999992675782) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-61.pth.tar', 12.156000003356933) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-47.pth.tar', 12.09200000579834) + +Train: 62 [ 0/312 ( 0%)] Loss: 6.19 (6.19) Time: 1.910s, 536.26/s (1.910s, 536.26/s) LR: 2.538e-01 Data: 1.206 (1.206) +Train: 62 [ 50/312 ( 16%)] Loss: 6.14 (6.17) Time: 0.406s, 2523.63/s (0.437s, 2341.19/s) LR: 2.538e-01 Data: 0.028 (0.051) +Train: 62 [ 100/312 ( 32%)] Loss: 6.30 (6.19) Time: 0.403s, 2538.30/s (0.422s, 2427.81/s) LR: 2.538e-01 Data: 0.027 (0.039) +Train: 62 [ 150/312 ( 48%)] Loss: 6.31 (6.21) Time: 0.406s, 2520.09/s (0.417s, 2456.89/s) LR: 2.538e-01 Data: 0.027 (0.036) +Train: 62 [ 200/312 ( 64%)] Loss: 6.28 (6.23) Time: 0.407s, 2518.15/s (0.415s, 2470.17/s) LR: 2.538e-01 Data: 0.028 (0.034) +Train: 62 [ 250/312 ( 80%)] Loss: 6.31 (6.24) Time: 0.403s, 2538.61/s (0.413s, 2481.25/s) LR: 2.538e-01 Data: 0.027 (0.032) +Train: 62 [ 300/312 ( 96%)] Loss: 6.33 (6.25) Time: 0.402s, 2546.62/s (0.411s, 2491.08/s) LR: 2.538e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 5.681 ( 5.681) Acc@1: 12.012 ( 12.012) Acc@5: 23.633 ( 23.633) +Test: [ 48/48] Time: 0.089 (0.319) Loss: 5.528 ( 5.643) Acc@1: 13.915 ( 12.296) Acc@5: 27.594 ( 25.558) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-58.pth.tar', 12.980000009460449) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-60.pth.tar', 12.810000006103516) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-56.pth.tar', 12.786000000610352) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-51.pth.tar', 12.71800000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-54.pth.tar', 12.628000014038086) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-55.pth.tar', 12.590000011596679) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-53.pth.tar', 12.301999997253418) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-62.pth.tar', 12.296000000610352) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-57.pth.tar', 12.163999992675782) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-61.pth.tar', 12.156000003356933) + +Train: 63 [ 0/312 ( 0%)] Loss: 6.12 (6.12) Time: 1.620s, 632.05/s (1.620s, 632.05/s) LR: 2.497e-01 Data: 1.081 (1.081) +Train: 63 [ 50/312 ( 16%)] Loss: 6.17 (6.15) Time: 0.402s, 2548.28/s (0.428s, 2391.71/s) LR: 2.497e-01 Data: 0.024 (0.048) +Train: 63 [ 100/312 ( 32%)] Loss: 6.19 (6.17) Time: 0.404s, 2531.71/s (0.417s, 2457.89/s) LR: 2.497e-01 Data: 0.027 (0.038) +Train: 63 [ 150/312 ( 48%)] Loss: 6.26 (6.19) Time: 0.409s, 2502.31/s (0.414s, 2476.02/s) LR: 2.497e-01 Data: 0.028 (0.035) +Train: 63 [ 200/312 ( 64%)] Loss: 6.31 (6.20) Time: 0.411s, 2489.50/s (0.412s, 2484.09/s) LR: 2.497e-01 Data: 0.031 (0.033) +Train: 63 [ 250/312 ( 80%)] Loss: 6.25 (6.22) Time: 0.405s, 2529.27/s (0.411s, 2490.57/s) LR: 2.497e-01 Data: 0.027 (0.032) +Train: 63 [ 300/312 ( 96%)] Loss: 6.32 (6.23) Time: 0.404s, 2531.62/s (0.410s, 2495.45/s) LR: 2.497e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.393 (1.393) Loss: 5.738 ( 5.738) Acc@1: 10.840 ( 10.840) Acc@5: 24.121 ( 24.121) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 5.572 ( 5.697) Acc@1: 12.500 ( 11.680) Acc@5: 28.420 ( 24.426) +Train: 64 [ 0/312 ( 0%)] Loss: 6.10 (6.10) Time: 1.652s, 619.69/s (1.652s, 619.69/s) LR: 2.457e-01 Data: 1.278 (1.278) +Train: 64 [ 50/312 ( 16%)] Loss: 6.17 (6.12) Time: 0.405s, 2531.28/s (0.430s, 2379.29/s) LR: 2.457e-01 Data: 0.028 (0.052) +Train: 64 [ 100/312 ( 32%)] Loss: 6.21 (6.15) Time: 0.403s, 2539.10/s (0.417s, 2454.64/s) LR: 2.457e-01 Data: 0.028 (0.040) +Train: 64 [ 150/312 ( 48%)] Loss: 6.25 (6.17) Time: 0.405s, 2529.16/s (0.413s, 2481.22/s) LR: 2.457e-01 Data: 0.025 (0.036) +Train: 64 [ 200/312 ( 64%)] Loss: 6.22 (6.18) Time: 0.405s, 2530.29/s (0.411s, 2493.32/s) LR: 2.457e-01 Data: 0.027 (0.034) +Train: 64 [ 250/312 ( 80%)] Loss: 6.30 (6.20) Time: 0.408s, 2508.18/s (0.410s, 2497.61/s) LR: 2.457e-01 Data: 0.027 (0.032) +Train: 64 [ 300/312 ( 96%)] Loss: 6.26 (6.21) Time: 0.405s, 2526.63/s (0.410s, 2499.41/s) LR: 2.457e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.411 (1.411) Loss: 5.655 ( 5.655) Acc@1: 12.305 ( 12.305) Acc@5: 24.414 ( 24.414) +Test: [ 48/48] Time: 0.089 (0.322) Loss: 5.510 ( 5.589) Acc@1: 13.797 ( 12.566) Acc@5: 29.481 ( 26.248) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-58.pth.tar', 12.980000009460449) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-60.pth.tar', 12.810000006103516) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-56.pth.tar', 12.786000000610352) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-51.pth.tar', 12.71800000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-54.pth.tar', 12.628000014038086) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-55.pth.tar', 12.590000011596679) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-64.pth.tar', 12.566000014038085) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-53.pth.tar', 12.301999997253418) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-62.pth.tar', 12.296000000610352) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-57.pth.tar', 12.163999992675782) + +Train: 65 [ 0/312 ( 0%)] Loss: 6.03 (6.03) Time: 1.786s, 573.40/s (1.786s, 573.40/s) LR: 2.416e-01 Data: 1.170 (1.170) +Train: 65 [ 50/312 ( 16%)] Loss: 6.11 (6.10) Time: 0.403s, 2540.27/s (0.431s, 2376.99/s) LR: 2.416e-01 Data: 0.027 (0.050) +Train: 65 [ 100/312 ( 32%)] Loss: 6.11 (6.13) Time: 0.403s, 2538.72/s (0.418s, 2451.89/s) LR: 2.416e-01 Data: 0.026 (0.039) +Train: 65 [ 150/312 ( 48%)] Loss: 6.16 (6.15) Time: 0.408s, 2508.54/s (0.414s, 2474.11/s) LR: 2.416e-01 Data: 0.029 (0.035) +Train: 65 [ 200/312 ( 64%)] Loss: 6.20 (6.16) Time: 0.407s, 2514.44/s (0.412s, 2482.95/s) LR: 2.416e-01 Data: 0.027 (0.033) +Train: 65 [ 250/312 ( 80%)] Loss: 6.27 (6.18) Time: 0.406s, 2519.77/s (0.412s, 2487.90/s) LR: 2.416e-01 Data: 0.028 (0.032) +Train: 65 [ 300/312 ( 96%)] Loss: 6.26 (6.19) Time: 0.406s, 2524.07/s (0.411s, 2491.95/s) LR: 2.416e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.414 (1.414) Loss: 5.734 ( 5.734) Acc@1: 10.547 ( 10.547) Acc@5: 23.535 ( 23.535) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 5.565 ( 5.644) Acc@1: 12.028 ( 11.868) Acc@5: 25.943 ( 25.006) +Train: 66 [ 0/312 ( 0%)] Loss: 5.99 (5.99) Time: 1.922s, 532.88/s (1.922s, 532.88/s) LR: 2.375e-01 Data: 1.547 (1.547) +Train: 66 [ 50/312 ( 16%)] Loss: 6.08 (6.07) Time: 0.405s, 2526.98/s (0.437s, 2340.66/s) LR: 2.375e-01 Data: 0.028 (0.058) +Train: 66 [ 100/312 ( 32%)] Loss: 6.28 (6.10) Time: 0.407s, 2514.72/s (0.421s, 2432.55/s) LR: 2.375e-01 Data: 0.029 (0.043) +Train: 66 [ 150/312 ( 48%)] Loss: 6.25 (6.12) Time: 0.402s, 2548.47/s (0.415s, 2467.63/s) LR: 2.375e-01 Data: 0.027 (0.038) +Train: 66 [ 200/312 ( 64%)] Loss: 6.26 (6.14) Time: 0.407s, 2513.65/s (0.412s, 2484.92/s) LR: 2.375e-01 Data: 0.029 (0.035) +Train: 66 [ 250/312 ( 80%)] Loss: 6.20 (6.16) Time: 0.409s, 2501.24/s (0.411s, 2492.36/s) LR: 2.375e-01 Data: 0.031 (0.034) +Train: 66 [ 300/312 ( 96%)] Loss: 6.23 (6.17) Time: 0.405s, 2528.38/s (0.410s, 2496.30/s) LR: 2.375e-01 Data: 0.027 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.411 (1.411) Loss: 5.721 ( 5.721) Acc@1: 10.254 ( 10.254) Acc@5: 23.438 ( 23.438) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 5.511 ( 5.649) Acc@1: 14.505 ( 11.638) Acc@5: 26.651 ( 24.508) +Train: 67 [ 0/312 ( 0%)] Loss: 6.06 (6.06) Time: 1.648s, 621.23/s (1.648s, 621.23/s) LR: 2.334e-01 Data: 1.216 (1.216) +Train: 67 [ 50/312 ( 16%)] Loss: 6.07 (6.07) Time: 0.408s, 2507.36/s (0.429s, 2385.58/s) LR: 2.334e-01 Data: 0.028 (0.051) +Train: 67 [ 100/312 ( 32%)] Loss: 6.10 (6.09) Time: 0.406s, 2519.62/s (0.418s, 2451.46/s) LR: 2.334e-01 Data: 0.027 (0.040) +Train: 67 [ 150/312 ( 48%)] Loss: 6.16 (6.11) Time: 0.412s, 2487.08/s (0.414s, 2470.72/s) LR: 2.334e-01 Data: 0.029 (0.036) +Train: 67 [ 200/312 ( 64%)] Loss: 6.17 (6.12) Time: 0.404s, 2533.26/s (0.413s, 2480.77/s) LR: 2.334e-01 Data: 0.026 (0.033) +Train: 67 [ 250/312 ( 80%)] Loss: 6.23 (6.14) Time: 0.404s, 2536.24/s (0.411s, 2489.87/s) LR: 2.334e-01 Data: 0.027 (0.032) +Train: 67 [ 300/312 ( 96%)] Loss: 6.17 (6.15) Time: 0.402s, 2545.38/s (0.410s, 2497.51/s) LR: 2.334e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.404 (1.404) Loss: 5.722 ( 5.722) Acc@1: 11.719 ( 11.719) Acc@5: 25.684 ( 25.684) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 5.568 ( 5.669) Acc@1: 13.090 ( 12.176) Acc@5: 27.830 ( 24.590) +Current checkpoints: + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-58.pth.tar', 12.980000009460449) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-60.pth.tar', 12.810000006103516) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-56.pth.tar', 12.786000000610352) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-51.pth.tar', 12.71800000579834) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-54.pth.tar', 12.628000014038086) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-55.pth.tar', 12.590000011596679) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-64.pth.tar', 12.566000014038085) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-53.pth.tar', 12.301999997253418) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-62.pth.tar', 12.296000000610352) + ('./output/train/ImageNetTraining80.0-frac-1over4/checkpoint-67.pth.tar', 12.175999997558593) + +Train: 68 [ 0/312 ( 0%)] Loss: 6.02 (6.02) Time: 1.527s, 670.44/s (1.527s, 670.44/s) LR: 2.292e-01 Data: 1.156 (1.156) +Train: 68 [ 50/312 ( 16%)] Loss: 6.06 (6.03) Time: 0.411s, 2493.34/s (0.428s, 2393.71/s) LR: 2.292e-01 Data: 0.033 (0.050) +Train: 68 [ 100/312 ( 32%)] Loss: 6.10 (6.06) Time: 0.406s, 2522.80/s (0.418s, 2450.19/s) LR: 2.292e-01 Data: 0.027 (0.039) +Train: 68 [ 150/312 ( 48%)] Loss: 6.16 (6.08) Time: 0.408s, 2507.39/s (0.415s, 2469.33/s) LR: 2.292e-01 Data: 0.027 (0.035) +Train: 68 [ 200/312 ( 64%)] Loss: 6.19 (6.10) Time: 0.407s, 2513.54/s (0.413s, 2480.12/s) LR: 2.292e-01 Data: 0.029 (0.033) +Train: 68 [ 250/312 ( 80%)] Loss: 6.18 (6.11) Time: 0.406s, 2522.17/s (0.412s, 2487.91/s) LR: 2.292e-01 Data: 0.028 (0.032) +Train: 68 [ 300/312 ( 96%)] Loss: 6.25 (6.13) Time: 0.409s, 2502.34/s (0.411s, 2492.61/s) LR: 2.292e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.399 (1.399) Loss: 5.788 ( 5.788) Acc@1: 10.645 ( 10.645) Acc@5: 23.047 ( 23.047) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 5.621 ( 5.722) Acc@1: 12.854 ( 11.442) Acc@5: 27.123 ( 23.586) +Train: 69 [ 0/312 ( 0%)] Loss: 6.01 (6.01) Time: 1.629s, 628.46/s (1.629s, 628.46/s) LR: 2.251e-01 Data: 1.255 (1.255) +Train: 69 [ 50/312 ( 16%)] Loss: 5.97 (6.02) Time: 0.406s, 2522.37/s (0.434s, 2361.81/s) LR: 2.251e-01 Data: 0.028 (0.055) +Train: 69 [ 100/312 ( 32%)] Loss: 6.02 (6.04) Time: 0.406s, 2522.16/s (0.420s, 2435.94/s) LR: 2.251e-01 Data: 0.026 (0.041) +Train: 69 [ 150/312 ( 48%)] Loss: 6.05 (6.06) Time: 0.406s, 2521.39/s (0.416s, 2462.35/s) LR: 2.251e-01 Data: 0.027 (0.037) +Train: 69 [ 200/312 ( 64%)] Loss: 6.20 (6.08) Time: 0.408s, 2507.54/s (0.414s, 2476.15/s) LR: 2.251e-01 Data: 0.029 (0.035) +Train: 69 [ 250/312 ( 80%)] Loss: 6.18 (6.09) Time: 0.405s, 2529.68/s (0.412s, 2483.13/s) LR: 2.251e-01 Data: 0.027 (0.033) +Train: 69 [ 300/312 ( 96%)] Loss: 6.19 (6.11) Time: 0.404s, 2532.38/s (0.411s, 2489.69/s) LR: 2.251e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.421 (1.421) Loss: 5.746 ( 5.746) Acc@1: 11.133 ( 11.133) Acc@5: 25.000 ( 25.000) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 5.525 ( 5.685) Acc@1: 12.972 ( 11.938) Acc@5: 25.354 ( 24.312) +Train: 70 [ 0/312 ( 0%)] Loss: 5.98 (5.98) Time: 1.589s, 644.55/s (1.589s, 644.55/s) LR: 2.209e-01 Data: 1.079 (1.079) +Train: 70 [ 50/312 ( 16%)] Loss: 6.04 (5.99) Time: 0.405s, 2528.12/s (0.430s, 2379.59/s) LR: 2.209e-01 Data: 0.028 (0.048) +Train: 70 [ 100/312 ( 32%)] Loss: 6.09 (6.01) Time: 0.405s, 2528.55/s (0.418s, 2448.35/s) LR: 2.209e-01 Data: 0.028 (0.038) +Train: 70 [ 150/312 ( 48%)] Loss: 6.16 (6.04) Time: 0.406s, 2523.50/s (0.414s, 2470.89/s) LR: 2.209e-01 Data: 0.027 (0.035) +Train: 70 [ 200/312 ( 64%)] Loss: 6.07 (6.05) Time: 0.406s, 2522.30/s (0.413s, 2481.93/s) LR: 2.209e-01 Data: 0.027 (0.033) +Train: 70 [ 250/312 ( 80%)] Loss: 6.18 (6.07) Time: 0.404s, 2531.81/s (0.411s, 2489.83/s) LR: 2.209e-01 Data: 0.027 (0.032) +Train: 70 [ 300/312 ( 96%)] Loss: 6.17 (6.08) Time: 0.407s, 2517.89/s (0.410s, 2494.98/s) LR: 2.209e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.403 (1.403) Loss: 5.703 ( 5.703) Acc@1: 11.523 ( 11.523) Acc@5: 25.000 ( 25.000) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 5.540 ( 5.649) Acc@1: 14.033 ( 11.900) Acc@5: 26.415 ( 24.592) +Train: 71 [ 0/312 ( 0%)] Loss: 5.93 (5.93) Time: 1.756s, 583.11/s (1.756s, 583.11/s) LR: 2.167e-01 Data: 1.382 (1.382) +Train: 71 [ 50/312 ( 16%)] Loss: 5.98 (5.96) Time: 0.408s, 2509.53/s (0.433s, 2363.96/s) LR: 2.167e-01 Data: 0.028 (0.054) +Train: 71 [ 100/312 ( 32%)] Loss: 6.05 (5.99) Time: 0.405s, 2525.52/s (0.420s, 2436.73/s) LR: 2.167e-01 Data: 0.027 (0.041) +Train: 71 [ 150/312 ( 48%)] Loss: 6.05 (6.01) Time: 0.403s, 2540.60/s (0.415s, 2465.36/s) LR: 2.167e-01 Data: 0.027 (0.037) +Train: 71 [ 200/312 ( 64%)] Loss: 6.10 (6.03) Time: 0.406s, 2520.69/s (0.413s, 2480.08/s) LR: 2.167e-01 Data: 0.028 (0.034) +Train: 71 [ 250/312 ( 80%)] Loss: 6.13 (6.04) Time: 0.406s, 2525.15/s (0.412s, 2487.26/s) LR: 2.167e-01 Data: 0.027 (0.033) +Train: 71 [ 300/312 ( 96%)] Loss: 6.11 (6.06) Time: 0.406s, 2520.69/s (0.411s, 2493.18/s) LR: 2.167e-01 Data: 0.030 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.401 (1.401) Loss: 5.719 ( 5.719) Acc@1: 11.523 ( 11.523) Acc@5: 24.121 ( 24.121) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 5.614 ( 5.684) Acc@1: 12.618 ( 11.664) Acc@5: 26.533 ( 23.868) +Train: 72 [ 0/312 ( 0%)] Loss: 5.93 (5.93) Time: 1.714s, 597.26/s (1.714s, 597.26/s) LR: 2.126e-01 Data: 1.339 (1.339) +Train: 72 [ 50/312 ( 16%)] Loss: 5.91 (5.95) Time: 0.406s, 2525.11/s (0.436s, 2345.97/s) LR: 2.126e-01 Data: 0.028 (0.057) +Train: 72 [ 100/312 ( 32%)] Loss: 5.98 (5.97) Time: 0.405s, 2531.41/s (0.421s, 2429.95/s) LR: 2.126e-01 Data: 0.027 (0.042) +Train: 72 [ 150/312 ( 48%)] Loss: 5.99 (5.99) Time: 0.406s, 2520.95/s (0.416s, 2459.64/s) LR: 2.126e-01 Data: 0.027 (0.038) +Train: 72 [ 200/312 ( 64%)] Loss: 6.08 (6.01) Time: 0.409s, 2505.98/s (0.414s, 2473.28/s) LR: 2.126e-01 Data: 0.027 (0.035) +Train: 72 [ 250/312 ( 80%)] Loss: 6.12 (6.03) Time: 0.405s, 2526.33/s (0.413s, 2481.58/s) LR: 2.126e-01 Data: 0.028 (0.034) +Train: 72 [ 300/312 ( 96%)] Loss: 6.12 (6.04) Time: 0.407s, 2514.99/s (0.412s, 2487.27/s) LR: 2.126e-01 Data: 0.028 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.395 (1.395) Loss: 5.808 ( 5.808) Acc@1: 10.840 ( 10.840) Acc@5: 20.801 ( 20.801) +Test: [ 48/48] Time: 0.090 (0.316) Loss: 5.695 ( 5.758) Acc@1: 10.731 ( 10.952) Acc@5: 23.939 ( 22.650) +Train: 73 [ 0/312 ( 0%)] Loss: 5.88 (5.88) Time: 1.729s, 592.20/s (1.729s, 592.20/s) LR: 2.084e-01 Data: 1.355 (1.355) +Train: 73 [ 50/312 ( 16%)] Loss: 5.94 (5.92) Time: 0.407s, 2517.59/s (0.433s, 2362.90/s) LR: 2.084e-01 Data: 0.027 (0.054) +Train: 73 [ 100/312 ( 32%)] Loss: 5.94 (5.94) Time: 0.407s, 2514.21/s (0.421s, 2433.11/s) LR: 2.084e-01 Data: 0.029 (0.041) +Train: 73 [ 150/312 ( 48%)] Loss: 6.04 (5.97) Time: 0.406s, 2519.67/s (0.417s, 2458.28/s) LR: 2.084e-01 Data: 0.028 (0.037) +Train: 73 [ 200/312 ( 64%)] Loss: 6.03 (5.99) Time: 0.413s, 2480.88/s (0.414s, 2471.10/s) LR: 2.084e-01 Data: 0.032 (0.035) +Train: 73 [ 250/312 ( 80%)] Loss: 6.10 (6.00) Time: 0.413s, 2481.78/s (0.413s, 2478.32/s) LR: 2.084e-01 Data: 0.032 (0.033) +Train: 73 [ 300/312 ( 96%)] Loss: 6.07 (6.02) Time: 0.409s, 2504.95/s (0.412s, 2483.37/s) LR: 2.084e-01 Data: 0.026 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 5.777 ( 5.777) Acc@1: 10.449 ( 10.449) Acc@5: 21.973 ( 21.973) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 5.630 ( 5.736) Acc@1: 11.321 ( 11.040) Acc@5: 23.821 ( 22.828) +Train: 74 [ 0/312 ( 0%)] Loss: 5.86 (5.86) Time: 1.867s, 548.36/s (1.867s, 548.36/s) LR: 2.042e-01 Data: 1.494 (1.494) +Train: 74 [ 50/312 ( 16%)] Loss: 5.94 (5.89) Time: 0.406s, 2520.01/s (0.435s, 2351.81/s) LR: 2.042e-01 Data: 0.027 (0.057) +Train: 74 [ 100/312 ( 32%)] Loss: 5.93 (5.92) Time: 0.403s, 2542.46/s (0.421s, 2434.07/s) LR: 2.042e-01 Data: 0.027 (0.043) +Train: 74 [ 150/312 ( 48%)] Loss: 5.99 (5.94) Time: 0.403s, 2542.79/s (0.415s, 2468.18/s) LR: 2.042e-01 Data: 0.028 (0.038) +Train: 74 [ 200/312 ( 64%)] Loss: 6.01 (5.96) Time: 0.403s, 2538.26/s (0.412s, 2484.12/s) LR: 2.042e-01 Data: 0.027 (0.035) +Train: 74 [ 250/312 ( 80%)] Loss: 6.09 (5.98) Time: 0.405s, 2525.31/s (0.411s, 2492.30/s) LR: 2.042e-01 Data: 0.027 (0.034) +Train: 74 [ 300/312 ( 96%)] Loss: 6.07 (5.99) Time: 0.406s, 2523.60/s (0.410s, 2495.16/s) LR: 2.042e-01 Data: 0.027 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.422 (1.422) Loss: 5.867 ( 5.867) Acc@1: 10.547 ( 10.547) Acc@5: 21.484 ( 21.484) +Test: [ 48/48] Time: 0.089 (0.320) Loss: 5.710 ( 5.788) Acc@1: 12.382 ( 10.710) Acc@5: 25.118 ( 22.092) +Train: 75 [ 0/312 ( 0%)] Loss: 5.84 (5.84) Time: 1.627s, 629.55/s (1.627s, 629.55/s) LR: 2.000e-01 Data: 1.256 (1.256) +Train: 75 [ 50/312 ( 16%)] Loss: 5.92 (5.87) Time: 0.404s, 2534.69/s (0.428s, 2394.96/s) LR: 2.000e-01 Data: 0.027 (0.051) +Train: 75 [ 100/312 ( 32%)] Loss: 5.92 (5.90) Time: 0.406s, 2523.80/s (0.417s, 2457.60/s) LR: 2.000e-01 Data: 0.027 (0.040) +Train: 75 [ 150/312 ( 48%)] Loss: 5.98 (5.92) Time: 0.408s, 2510.98/s (0.414s, 2475.64/s) LR: 2.000e-01 Data: 0.029 (0.036) +Train: 75 [ 200/312 ( 64%)] Loss: 6.02 (5.94) Time: 0.405s, 2530.65/s (0.412s, 2486.71/s) LR: 2.000e-01 Data: 0.027 (0.034) +Train: 75 [ 250/312 ( 80%)] Loss: 5.98 (5.96) Time: 0.407s, 2513.34/s (0.410s, 2495.76/s) LR: 2.000e-01 Data: 0.028 (0.032) +Train: 75 [ 300/312 ( 96%)] Loss: 6.14 (5.97) Time: 0.407s, 2514.91/s (0.410s, 2500.59/s) LR: 2.000e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.422 (1.422) Loss: 5.779 ( 5.779) Acc@1: 10.645 ( 10.645) Acc@5: 22.168 ( 22.168) +Test: [ 48/48] Time: 0.091 (0.321) Loss: 5.527 ( 5.738) Acc@1: 13.325 ( 11.084) Acc@5: 27.358 ( 23.076) +Train: 76 [ 0/312 ( 0%)] Loss: 5.83 (5.83) Time: 1.796s, 570.16/s (1.796s, 570.16/s) LR: 1.958e-01 Data: 1.088 (1.088) +Train: 76 [ 50/312 ( 16%)] Loss: 5.90 (5.85) Time: 0.412s, 2485.31/s (0.435s, 2355.87/s) LR: 1.958e-01 Data: 0.029 (0.048) +Train: 76 [ 100/312 ( 32%)] Loss: 5.97 (5.88) Time: 0.406s, 2520.94/s (0.421s, 2431.42/s) LR: 1.958e-01 Data: 0.028 (0.038) +Train: 76 [ 150/312 ( 48%)] Loss: 6.00 (5.90) Time: 0.404s, 2537.41/s (0.417s, 2457.82/s) LR: 1.958e-01 Data: 0.027 (0.035) +Train: 76 [ 200/312 ( 64%)] Loss: 5.94 (5.92) Time: 0.405s, 2530.79/s (0.414s, 2475.23/s) LR: 1.958e-01 Data: 0.026 (0.033) +Train: 76 [ 250/312 ( 80%)] Loss: 5.92 (5.93) Time: 0.405s, 2527.70/s (0.412s, 2486.29/s) LR: 1.958e-01 Data: 0.029 (0.032) +Train: 76 [ 300/312 ( 96%)] Loss: 5.96 (5.95) Time: 0.407s, 2518.23/s (0.411s, 2492.62/s) LR: 1.958e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 5.844 ( 5.844) Acc@1: 9.375 ( 9.375) Acc@5: 21.973 ( 21.973) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 5.683 ( 5.791) Acc@1: 11.439 ( 10.964) Acc@5: 24.057 ( 22.244) +Train: 77 [ 0/312 ( 0%)] Loss: 5.76 (5.76) Time: 1.697s, 603.56/s (1.697s, 603.56/s) LR: 1.916e-01 Data: 1.323 (1.323) +Train: 77 [ 50/312 ( 16%)] Loss: 5.86 (5.82) Time: 0.408s, 2506.94/s (0.431s, 2375.49/s) LR: 1.916e-01 Data: 0.028 (0.053) +Train: 77 [ 100/312 ( 32%)] Loss: 5.90 (5.85) Time: 0.408s, 2509.66/s (0.419s, 2443.64/s) LR: 1.916e-01 Data: 0.028 (0.040) +Train: 77 [ 150/312 ( 48%)] Loss: 5.95 (5.87) Time: 0.404s, 2532.16/s (0.415s, 2467.19/s) LR: 1.916e-01 Data: 0.027 (0.036) +Train: 77 [ 200/312 ( 64%)] Loss: 5.93 (5.89) Time: 0.402s, 2545.03/s (0.412s, 2482.65/s) LR: 1.916e-01 Data: 0.027 (0.034) +Train: 77 [ 250/312 ( 80%)] Loss: 5.99 (5.91) Time: 0.404s, 2531.99/s (0.411s, 2492.36/s) LR: 1.916e-01 Data: 0.027 (0.033) +Train: 77 [ 300/312 ( 96%)] Loss: 6.04 (5.92) Time: 0.407s, 2513.56/s (0.410s, 2496.91/s) LR: 1.916e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.419 (1.419) Loss: 5.921 ( 5.921) Acc@1: 10.059 ( 10.059) Acc@5: 20.312 ( 20.312) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 5.754 ( 5.851) Acc@1: 10.142 ( 10.402) Acc@5: 23.585 ( 21.440) +Train: 78 [ 0/312 ( 0%)] Loss: 5.78 (5.78) Time: 1.867s, 548.53/s (1.867s, 548.53/s) LR: 1.874e-01 Data: 1.494 (1.494) +Train: 78 [ 50/312 ( 16%)] Loss: 5.77 (5.80) Time: 0.405s, 2530.10/s (0.434s, 2361.40/s) LR: 1.874e-01 Data: 0.026 (0.056) +Train: 78 [ 100/312 ( 32%)] Loss: 5.90 (5.83) Time: 0.409s, 2501.19/s (0.420s, 2436.99/s) LR: 1.874e-01 Data: 0.029 (0.042) +Train: 78 [ 150/312 ( 48%)] Loss: 5.90 (5.85) Time: 0.408s, 2507.83/s (0.416s, 2460.17/s) LR: 1.874e-01 Data: 0.028 (0.037) +Train: 78 [ 200/312 ( 64%)] Loss: 5.92 (5.87) Time: 0.404s, 2536.94/s (0.414s, 2475.44/s) LR: 1.874e-01 Data: 0.028 (0.035) +Train: 78 [ 250/312 ( 80%)] Loss: 6.02 (5.88) Time: 0.402s, 2550.05/s (0.412s, 2487.92/s) LR: 1.874e-01 Data: 0.027 (0.034) +Train: 78 [ 300/312 ( 96%)] Loss: 5.96 (5.90) Time: 0.403s, 2538.90/s (0.410s, 2495.81/s) LR: 1.874e-01 Data: 0.026 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.421 (1.421) Loss: 5.929 ( 5.929) Acc@1: 9.473 ( 9.473) Acc@5: 20.508 ( 20.508) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 5.747 ( 5.876) Acc@1: 10.849 ( 9.964) Acc@5: 22.406 ( 21.294) +Train: 79 [ 0/312 ( 0%)] Loss: 5.77 (5.77) Time: 1.639s, 624.72/s (1.639s, 624.72/s) LR: 1.833e-01 Data: 1.265 (1.265) +Train: 79 [ 50/312 ( 16%)] Loss: 5.76 (5.76) Time: 0.407s, 2515.44/s (0.436s, 2346.76/s) LR: 1.833e-01 Data: 0.028 (0.057) +Train: 79 [ 100/312 ( 32%)] Loss: 5.83 (5.79) Time: 0.407s, 2516.65/s (0.421s, 2433.58/s) LR: 1.833e-01 Data: 0.033 (0.042) +Train: 79 [ 150/312 ( 48%)] Loss: 5.90 (5.82) Time: 0.406s, 2520.55/s (0.415s, 2467.36/s) LR: 1.833e-01 Data: 0.033 (0.038) +Train: 79 [ 200/312 ( 64%)] Loss: 5.86 (5.84) Time: 0.402s, 2547.36/s (0.412s, 2485.35/s) LR: 1.833e-01 Data: 0.027 (0.035) +Train: 79 [ 250/312 ( 80%)] Loss: 5.88 (5.86) Time: 0.405s, 2529.75/s (0.411s, 2494.34/s) LR: 1.833e-01 Data: 0.027 (0.034) +Train: 79 [ 300/312 ( 96%)] Loss: 5.95 (5.88) Time: 0.413s, 2482.26/s (0.410s, 2497.69/s) LR: 1.833e-01 Data: 0.029 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.442 (1.442) Loss: 5.942 ( 5.942) Acc@1: 9.180 ( 9.180) Acc@5: 21.289 ( 21.289) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 5.706 ( 5.860) Acc@1: 12.500 ( 10.198) Acc@5: 24.410 ( 21.380) +Train: 80 [ 0/312 ( 0%)] Loss: 5.76 (5.76) Time: 1.564s, 654.86/s (1.564s, 654.86/s) LR: 1.791e-01 Data: 1.192 (1.192) +Train: 80 [ 50/312 ( 16%)] Loss: 5.85 (5.75) Time: 0.404s, 2535.91/s (0.427s, 2398.99/s) LR: 1.791e-01 Data: 0.026 (0.051) +Train: 80 [ 100/312 ( 32%)] Loss: 5.77 (5.77) Time: 0.413s, 2482.09/s (0.416s, 2459.92/s) LR: 1.791e-01 Data: 0.028 (0.039) +Train: 80 [ 150/312 ( 48%)] Loss: 5.78 (5.80) Time: 0.408s, 2509.47/s (0.413s, 2477.09/s) LR: 1.791e-01 Data: 0.028 (0.035) +Train: 80 [ 200/312 ( 64%)] Loss: 5.88 (5.82) Time: 0.409s, 2502.09/s (0.412s, 2485.77/s) LR: 1.791e-01 Data: 0.033 (0.034) +Train: 80 [ 250/312 ( 80%)] Loss: 6.01 (5.84) Time: 0.404s, 2535.83/s (0.410s, 2494.53/s) LR: 1.791e-01 Data: 0.028 (0.032) +Train: 80 [ 300/312 ( 96%)] Loss: 5.91 (5.85) Time: 0.405s, 2531.03/s (0.409s, 2501.27/s) LR: 1.791e-01 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.428 (1.428) Loss: 5.956 ( 5.956) Acc@1: 9.473 ( 9.473) Acc@5: 19.336 ( 19.336) +Test: [ 48/48] Time: 0.089 (0.318) Loss: 5.706 ( 5.884) Acc@1: 12.618 ( 10.056) Acc@5: 24.528 ( 20.714) +Train: 81 [ 0/312 ( 0%)] Loss: 5.72 (5.72) Time: 1.562s, 655.64/s (1.562s, 655.64/s) LR: 1.749e-01 Data: 1.070 (1.070) +Train: 81 [ 50/312 ( 16%)] Loss: 5.73 (5.72) Time: 0.406s, 2522.05/s (0.430s, 2383.98/s) LR: 1.749e-01 Data: 0.027 (0.049) +Train: 81 [ 100/312 ( 32%)] Loss: 5.79 (5.74) Time: 0.404s, 2531.89/s (0.418s, 2448.99/s) LR: 1.749e-01 Data: 0.028 (0.038) +Train: 81 [ 150/312 ( 48%)] Loss: 5.85 (5.77) Time: 0.404s, 2532.45/s (0.414s, 2475.41/s) LR: 1.749e-01 Data: 0.028 (0.035) +Train: 81 [ 200/312 ( 64%)] Loss: 5.96 (5.79) Time: 0.404s, 2535.29/s (0.412s, 2488.03/s) LR: 1.749e-01 Data: 0.027 (0.033) +Train: 81 [ 250/312 ( 80%)] Loss: 6.01 (5.81) Time: 0.408s, 2508.86/s (0.411s, 2492.93/s) LR: 1.749e-01 Data: 0.028 (0.032) +Train: 81 [ 300/312 ( 96%)] Loss: 5.89 (5.82) Time: 0.404s, 2537.41/s (0.410s, 2497.67/s) LR: 1.749e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.409 (1.409) Loss: 6.028 ( 6.028) Acc@1: 9.473 ( 9.473) Acc@5: 19.141 ( 19.141) +Test: [ 48/48] Time: 0.089 (0.322) Loss: 5.780 ( 5.896) Acc@1: 10.024 ( 10.226) Acc@5: 22.877 ( 21.054) +Train: 82 [ 0/312 ( 0%)] Loss: 5.69 (5.69) Time: 1.559s, 656.83/s (1.559s, 656.83/s) LR: 1.708e-01 Data: 1.189 (1.189) +Train: 82 [ 50/312 ( 16%)] Loss: 5.69 (5.69) Time: 0.402s, 2545.47/s (0.425s, 2408.96/s) LR: 1.708e-01 Data: 0.029 (0.050) +Train: 82 [ 100/312 ( 32%)] Loss: 5.81 (5.72) Time: 0.404s, 2532.85/s (0.414s, 2472.23/s) LR: 1.708e-01 Data: 0.029 (0.039) +Train: 82 [ 150/312 ( 48%)] Loss: 5.81 (5.74) Time: 0.406s, 2519.94/s (0.411s, 2488.71/s) LR: 1.708e-01 Data: 0.027 (0.035) +Train: 82 [ 200/312 ( 64%)] Loss: 5.81 (5.76) Time: 0.403s, 2540.42/s (0.410s, 2495.47/s) LR: 1.708e-01 Data: 0.027 (0.033) +Train: 82 [ 250/312 ( 80%)] Loss: 5.87 (5.78) Time: 0.404s, 2534.47/s (0.409s, 2503.92/s) LR: 1.708e-01 Data: 0.028 (0.032) +Train: 82 [ 300/312 ( 96%)] Loss: 5.83 (5.80) Time: 0.402s, 2548.96/s (0.408s, 2510.60/s) LR: 1.708e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.416 (1.416) Loss: 5.946 ( 5.946) Acc@1: 9.277 ( 9.277) Acc@5: 20.117 ( 20.117) +Test: [ 48/48] Time: 0.089 (0.320) Loss: 5.807 ( 5.933) Acc@1: 10.967 ( 9.934) Acc@5: 23.703 ( 20.462) +Train: 83 [ 0/312 ( 0%)] Loss: 5.70 (5.70) Time: 1.724s, 594.14/s (1.724s, 594.14/s) LR: 1.666e-01 Data: 1.235 (1.235) +Train: 83 [ 50/312 ( 16%)] Loss: 5.66 (5.66) Time: 0.404s, 2533.95/s (0.430s, 2381.56/s) LR: 1.666e-01 Data: 0.028 (0.051) +Train: 83 [ 100/312 ( 32%)] Loss: 5.84 (5.69) Time: 0.410s, 2495.58/s (0.418s, 2452.37/s) LR: 1.666e-01 Data: 0.028 (0.040) +Train: 83 [ 150/312 ( 48%)] Loss: 5.84 (5.72) Time: 0.407s, 2514.19/s (0.414s, 2473.42/s) LR: 1.666e-01 Data: 0.027 (0.036) +Train: 83 [ 200/312 ( 64%)] Loss: 5.74 (5.74) Time: 0.407s, 2514.92/s (0.413s, 2482.13/s) LR: 1.666e-01 Data: 0.027 (0.034) +Train: 83 [ 250/312 ( 80%)] Loss: 5.81 (5.76) Time: 0.404s, 2532.22/s (0.411s, 2489.36/s) LR: 1.666e-01 Data: 0.027 (0.033) +Train: 83 [ 300/312 ( 96%)] Loss: 5.78 (5.78) Time: 0.404s, 2535.59/s (0.410s, 2496.42/s) LR: 1.666e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 6.093 ( 6.093) Acc@1: 7.910 ( 7.910) Acc@5: 18.359 ( 18.359) +Test: [ 48/48] Time: 0.089 (0.321) Loss: 5.972 ( 6.032) Acc@1: 9.434 ( 9.114) Acc@5: 20.519 ( 19.122) +Train: 84 [ 0/312 ( 0%)] Loss: 5.63 (5.63) Time: 1.687s, 606.82/s (1.687s, 606.82/s) LR: 1.625e-01 Data: 1.314 (1.314) +Train: 84 [ 50/312 ( 16%)] Loss: 5.77 (5.65) Time: 0.406s, 2519.68/s (0.430s, 2382.39/s) LR: 1.625e-01 Data: 0.028 (0.053) +Train: 84 [ 100/312 ( 32%)] Loss: 5.78 (5.67) Time: 0.407s, 2516.91/s (0.418s, 2448.40/s) LR: 1.625e-01 Data: 0.027 (0.041) +Train: 84 [ 150/312 ( 48%)] Loss: 5.82 (5.70) Time: 0.402s, 2548.66/s (0.414s, 2472.50/s) LR: 1.625e-01 Data: 0.026 (0.036) +Train: 84 [ 200/312 ( 64%)] Loss: 5.79 (5.72) Time: 0.401s, 2551.43/s (0.411s, 2489.56/s) LR: 1.625e-01 Data: 0.028 (0.034) +Train: 84 [ 250/312 ( 80%)] Loss: 5.91 (5.73) Time: 0.402s, 2549.09/s (0.410s, 2500.54/s) LR: 1.625e-01 Data: 0.026 (0.033) +Train: 84 [ 300/312 ( 96%)] Loss: 5.80 (5.75) Time: 0.404s, 2532.47/s (0.409s, 2506.64/s) LR: 1.625e-01 Data: 0.026 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.422 (1.422) Loss: 5.934 ( 5.934) Acc@1: 10.254 ( 10.254) Acc@5: 19.824 ( 19.824) +Test: [ 48/48] Time: 0.090 (0.322) Loss: 5.791 ( 5.877) Acc@1: 10.495 ( 10.078) Acc@5: 22.877 ( 20.898) +Train: 85 [ 0/312 ( 0%)] Loss: 5.67 (5.67) Time: 1.511s, 677.55/s (1.511s, 677.55/s) LR: 1.584e-01 Data: 1.138 (1.138) +Train: 85 [ 50/312 ( 16%)] Loss: 5.71 (5.62) Time: 0.407s, 2515.97/s (0.429s, 2385.18/s) LR: 1.584e-01 Data: 0.028 (0.050) +Train: 85 [ 100/312 ( 32%)] Loss: 5.70 (5.65) Time: 0.405s, 2528.29/s (0.418s, 2450.49/s) LR: 1.584e-01 Data: 0.028 (0.039) +Train: 85 [ 150/312 ( 48%)] Loss: 5.67 (5.67) Time: 0.405s, 2529.63/s (0.414s, 2475.99/s) LR: 1.584e-01 Data: 0.027 (0.036) +Train: 85 [ 200/312 ( 64%)] Loss: 5.75 (5.69) Time: 0.411s, 2494.31/s (0.412s, 2488.30/s) LR: 1.584e-01 Data: 0.030 (0.034) +Train: 85 [ 250/312 ( 80%)] Loss: 5.84 (5.71) Time: 0.406s, 2520.47/s (0.411s, 2493.70/s) LR: 1.584e-01 Data: 0.028 (0.032) +Train: 85 [ 300/312 ( 96%)] Loss: 5.79 (5.72) Time: 0.404s, 2535.67/s (0.410s, 2498.48/s) LR: 1.584e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.424 (1.424) Loss: 6.078 ( 6.078) Acc@1: 8.105 ( 8.105) Acc@5: 17.383 ( 17.383) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 5.885 ( 6.016) Acc@1: 11.203 ( 9.232) Acc@5: 22.877 ( 19.444) +Train: 86 [ 0/312 ( 0%)] Loss: 5.53 (5.53) Time: 1.491s, 686.57/s (1.491s, 686.57/s) LR: 1.543e-01 Data: 1.048 (1.048) +Train: 86 [ 50/312 ( 16%)] Loss: 5.67 (5.59) Time: 0.405s, 2525.30/s (0.425s, 2408.95/s) LR: 1.543e-01 Data: 0.028 (0.048) +Train: 86 [ 100/312 ( 32%)] Loss: 5.70 (5.62) Time: 0.406s, 2525.17/s (0.416s, 2464.17/s) LR: 1.543e-01 Data: 0.026 (0.038) +Train: 86 [ 150/312 ( 48%)] Loss: 5.75 (5.65) Time: 0.407s, 2518.16/s (0.413s, 2479.69/s) LR: 1.543e-01 Data: 0.029 (0.034) +Train: 86 [ 200/312 ( 64%)] Loss: 5.77 (5.67) Time: 0.404s, 2531.91/s (0.411s, 2490.70/s) LR: 1.543e-01 Data: 0.028 (0.033) +Train: 86 [ 250/312 ( 80%)] Loss: 5.84 (5.68) Time: 0.405s, 2526.17/s (0.410s, 2498.49/s) LR: 1.543e-01 Data: 0.028 (0.032) +Train: 86 [ 300/312 ( 96%)] Loss: 5.78 (5.70) Time: 0.410s, 2499.56/s (0.409s, 2502.71/s) LR: 1.543e-01 Data: 0.030 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.405 (1.405) Loss: 5.992 ( 5.992) Acc@1: 10.156 ( 10.156) Acc@5: 19.434 ( 19.434) +Test: [ 48/48] Time: 0.091 (0.321) Loss: 5.818 ( 6.016) Acc@1: 10.849 ( 9.542) Acc@5: 22.995 ( 19.550) +Train: 87 [ 0/312 ( 0%)] Loss: 5.44 (5.44) Time: 1.628s, 629.00/s (1.628s, 629.00/s) LR: 1.503e-01 Data: 1.254 (1.254) +Train: 87 [ 50/312 ( 16%)] Loss: 5.61 (5.58) Time: 0.407s, 2517.16/s (0.430s, 2382.93/s) LR: 1.503e-01 Data: 0.027 (0.051) +Train: 87 [ 100/312 ( 32%)] Loss: 5.63 (5.60) Time: 0.411s, 2491.97/s (0.418s, 2447.47/s) LR: 1.503e-01 Data: 0.028 (0.039) +Train: 87 [ 150/312 ( 48%)] Loss: 5.70 (5.62) Time: 0.405s, 2525.42/s (0.415s, 2469.56/s) LR: 1.503e-01 Data: 0.028 (0.035) +Train: 87 [ 200/312 ( 64%)] Loss: 5.70 (5.64) Time: 0.404s, 2534.45/s (0.412s, 2484.59/s) LR: 1.503e-01 Data: 0.029 (0.034) +Train: 87 [ 250/312 ( 80%)] Loss: 5.77 (5.66) Time: 0.406s, 2525.25/s (0.411s, 2493.61/s) LR: 1.503e-01 Data: 0.029 (0.032) +Train: 87 [ 300/312 ( 96%)] Loss: 5.87 (5.68) Time: 0.408s, 2511.30/s (0.410s, 2497.57/s) LR: 1.503e-01 Data: 0.026 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.473 (1.473) Loss: 6.023 ( 6.023) Acc@1: 8.887 ( 8.887) Acc@5: 19.043 ( 19.043) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 5.862 ( 5.970) Acc@1: 10.024 ( 10.036) Acc@5: 21.698 ( 20.788) +Train: 88 [ 0/312 ( 0%)] Loss: 5.53 (5.53) Time: 1.976s, 518.16/s (1.976s, 518.16/s) LR: 1.462e-01 Data: 1.107 (1.107) +Train: 88 [ 50/312 ( 16%)] Loss: 5.57 (5.53) Time: 0.406s, 2519.27/s (0.436s, 2348.38/s) LR: 1.462e-01 Data: 0.028 (0.049) +Train: 88 [ 100/312 ( 32%)] Loss: 5.60 (5.56) Time: 0.406s, 2523.37/s (0.422s, 2427.49/s) LR: 1.462e-01 Data: 0.028 (0.038) +Train: 88 [ 150/312 ( 48%)] Loss: 5.71 (5.59) Time: 0.407s, 2517.22/s (0.416s, 2459.68/s) LR: 1.462e-01 Data: 0.027 (0.035) +Train: 88 [ 200/312 ( 64%)] Loss: 5.64 (5.61) Time: 0.406s, 2520.25/s (0.413s, 2477.89/s) LR: 1.462e-01 Data: 0.028 (0.033) +Train: 88 [ 250/312 ( 80%)] Loss: 5.75 (5.63) Time: 0.407s, 2513.73/s (0.412s, 2486.44/s) LR: 1.462e-01 Data: 0.028 (0.032) +Train: 88 [ 300/312 ( 96%)] Loss: 5.74 (5.65) Time: 0.405s, 2526.06/s (0.411s, 2491.41/s) LR: 1.462e-01 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.462 (1.462) Loss: 6.209 ( 6.209) Acc@1: 7.715 ( 7.715) Acc@5: 16.211 ( 16.211) +Test: [ 48/48] Time: 0.089 (0.321) Loss: 5.913 ( 6.124) Acc@1: 10.967 ( 9.020) Acc@5: 21.698 ( 18.744) +Train: 89 [ 0/312 ( 0%)] Loss: 5.50 (5.50) Time: 1.557s, 657.57/s (1.557s, 657.57/s) LR: 1.422e-01 Data: 1.187 (1.187) +Train: 89 [ 50/312 ( 16%)] Loss: 5.42 (5.52) Time: 0.405s, 2529.07/s (0.428s, 2393.41/s) LR: 1.422e-01 Data: 0.028 (0.051) +Train: 89 [ 100/312 ( 32%)] Loss: 5.64 (5.55) Time: 0.408s, 2507.31/s (0.418s, 2450.94/s) LR: 1.422e-01 Data: 0.028 (0.039) +Train: 89 [ 150/312 ( 48%)] Loss: 5.63 (5.57) Time: 0.406s, 2521.17/s (0.414s, 2473.72/s) LR: 1.422e-01 Data: 0.030 (0.036) +Train: 89 [ 200/312 ( 64%)] Loss: 5.66 (5.59) Time: 0.405s, 2530.20/s (0.412s, 2488.07/s) LR: 1.422e-01 Data: 0.029 (0.034) +Train: 89 [ 250/312 ( 80%)] Loss: 5.67 (5.61) Time: 0.407s, 2517.59/s (0.410s, 2496.11/s) LR: 1.422e-01 Data: 0.027 (0.033) +Train: 89 [ 300/312 ( 96%)] Loss: 5.72 (5.62) Time: 0.411s, 2493.92/s (0.410s, 2499.29/s) LR: 1.422e-01 Data: 0.025 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.421 (1.421) Loss: 6.213 ( 6.213) Acc@1: 7.520 ( 7.520) Acc@5: 16.992 ( 16.992) +Test: [ 48/48] Time: 0.090 (0.322) Loss: 6.021 ( 6.145) Acc@1: 10.024 ( 8.558) Acc@5: 17.453 ( 18.286) +Train: 90 [ 0/312 ( 0%)] Loss: 5.47 (5.47) Time: 1.867s, 548.42/s (1.867s, 548.42/s) LR: 1.382e-01 Data: 1.356 (1.356) +Train: 90 [ 50/312 ( 16%)] Loss: 5.49 (5.49) Time: 0.403s, 2540.07/s (0.432s, 2368.03/s) LR: 1.382e-01 Data: 0.027 (0.054) +Train: 90 [ 100/312 ( 32%)] Loss: 5.71 (5.52) Time: 0.405s, 2525.81/s (0.419s, 2444.07/s) LR: 1.382e-01 Data: 0.028 (0.041) +Train: 90 [ 150/312 ( 48%)] Loss: 5.49 (5.54) Time: 0.407s, 2516.07/s (0.415s, 2468.74/s) LR: 1.382e-01 Data: 0.030 (0.036) +Train: 90 [ 200/312 ( 64%)] Loss: 5.64 (5.57) Time: 0.403s, 2538.51/s (0.413s, 2481.60/s) LR: 1.382e-01 Data: 0.027 (0.034) +Train: 90 [ 250/312 ( 80%)] Loss: 5.67 (5.58) Time: 0.406s, 2522.53/s (0.411s, 2488.57/s) LR: 1.382e-01 Data: 0.026 (0.033) +Train: 90 [ 300/312 ( 96%)] Loss: 5.69 (5.60) Time: 0.406s, 2520.99/s (0.411s, 2492.95/s) LR: 1.382e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.409 (1.409) Loss: 6.194 ( 6.194) Acc@1: 7.715 ( 7.715) Acc@5: 17.578 ( 17.578) +Test: [ 48/48] Time: 0.089 (0.320) Loss: 5.989 ( 6.135) Acc@1: 10.731 ( 8.712) Acc@5: 20.047 ( 18.280) +Train: 91 [ 0/312 ( 0%)] Loss: 5.44 (5.44) Time: 1.705s, 600.70/s (1.705s, 600.70/s) LR: 1.342e-01 Data: 1.334 (1.334) +Train: 91 [ 50/312 ( 16%)] Loss: 5.43 (5.44) Time: 0.400s, 2558.04/s (0.428s, 2392.48/s) LR: 1.342e-01 Data: 0.027 (0.053) +Train: 91 [ 100/312 ( 32%)] Loss: 5.54 (5.48) Time: 0.403s, 2542.61/s (0.416s, 2463.61/s) LR: 1.342e-01 Data: 0.026 (0.041) +Train: 91 [ 150/312 ( 48%)] Loss: 5.64 (5.51) Time: 0.408s, 2511.82/s (0.412s, 2484.17/s) LR: 1.342e-01 Data: 0.028 (0.036) +Train: 91 [ 200/312 ( 64%)] Loss: 5.58 (5.53) Time: 0.406s, 2519.58/s (0.411s, 2491.58/s) LR: 1.342e-01 Data: 0.029 (0.034) +Train: 91 [ 250/312 ( 80%)] Loss: 5.64 (5.55) Time: 0.405s, 2526.54/s (0.410s, 2498.99/s) LR: 1.342e-01 Data: 0.027 (0.033) +Train: 91 [ 300/312 ( 96%)] Loss: 5.65 (5.57) Time: 0.405s, 2526.18/s (0.409s, 2503.77/s) LR: 1.342e-01 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.400 (1.400) Loss: 6.209 ( 6.209) Acc@1: 8.496 ( 8.496) Acc@5: 18.066 ( 18.066) +Test: [ 48/48] Time: 0.090 (0.321) Loss: 5.954 ( 6.120) Acc@1: 11.085 ( 9.286) Acc@5: 22.052 ( 19.378) +Train: 92 [ 0/312 ( 0%)] Loss: 5.44 (5.44) Time: 1.665s, 614.87/s (1.665s, 614.87/s) LR: 1.303e-01 Data: 1.291 (1.291) +Train: 92 [ 50/312 ( 16%)] Loss: 5.47 (5.43) Time: 0.406s, 2522.08/s (0.432s, 2370.88/s) LR: 1.303e-01 Data: 0.028 (0.052) +Train: 92 [ 100/312 ( 32%)] Loss: 5.48 (5.46) Time: 0.406s, 2519.75/s (0.419s, 2443.35/s) LR: 1.303e-01 Data: 0.028 (0.040) +Train: 92 [ 150/312 ( 48%)] Loss: 5.58 (5.48) Time: 0.404s, 2535.23/s (0.415s, 2470.38/s) LR: 1.303e-01 Data: 0.028 (0.036) +Train: 92 [ 200/312 ( 64%)] Loss: 5.54 (5.50) Time: 0.406s, 2521.76/s (0.413s, 2482.00/s) LR: 1.303e-01 Data: 0.027 (0.034) +Train: 92 [ 250/312 ( 80%)] Loss: 5.61 (5.52) Time: 0.411s, 2492.39/s (0.412s, 2488.38/s) LR: 1.303e-01 Data: 0.032 (0.033) +Train: 92 [ 300/312 ( 96%)] Loss: 5.61 (5.54) Time: 0.407s, 2516.01/s (0.411s, 2493.35/s) LR: 1.303e-01 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.409 (1.409) Loss: 6.176 ( 6.176) Acc@1: 8.789 ( 8.789) Acc@5: 17.578 ( 17.578) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.021 ( 6.152) Acc@1: 8.608 ( 8.840) Acc@5: 20.165 ( 18.398) +Train: 93 [ 0/312 ( 0%)] Loss: 5.40 (5.40) Time: 1.769s, 578.70/s (1.769s, 578.70/s) LR: 1.264e-01 Data: 1.398 (1.398) +Train: 93 [ 50/312 ( 16%)] Loss: 5.34 (5.40) Time: 0.405s, 2529.00/s (0.432s, 2370.90/s) LR: 1.264e-01 Data: 0.028 (0.055) +Train: 93 [ 100/312 ( 32%)] Loss: 5.51 (5.43) Time: 0.405s, 2525.31/s (0.419s, 2441.70/s) LR: 1.264e-01 Data: 0.027 (0.041) +Train: 93 [ 150/312 ( 48%)] Loss: 5.58 (5.46) Time: 0.412s, 2488.28/s (0.415s, 2464.61/s) LR: 1.264e-01 Data: 0.032 (0.037) +Train: 93 [ 200/312 ( 64%)] Loss: 5.49 (5.48) Time: 0.407s, 2515.83/s (0.413s, 2477.05/s) LR: 1.264e-01 Data: 0.028 (0.035) +Train: 93 [ 250/312 ( 80%)] Loss: 5.55 (5.50) Time: 0.409s, 2503.10/s (0.412s, 2484.29/s) LR: 1.264e-01 Data: 0.026 (0.033) +Train: 93 [ 300/312 ( 96%)] Loss: 5.65 (5.51) Time: 0.408s, 2512.52/s (0.411s, 2488.68/s) LR: 1.264e-01 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.403 (1.403) Loss: 6.284 ( 6.284) Acc@1: 8.105 ( 8.105) Acc@5: 18.359 ( 18.359) +Test: [ 48/48] Time: 0.090 (0.321) Loss: 6.051 ( 6.242) Acc@1: 9.552 ( 8.618) Acc@5: 20.283 ( 18.306) +Train: 94 [ 0/312 ( 0%)] Loss: 5.34 (5.34) Time: 1.435s, 713.71/s (1.435s, 713.71/s) LR: 1.225e-01 Data: 1.030 (1.030) +Train: 94 [ 50/312 ( 16%)] Loss: 5.45 (5.37) Time: 0.406s, 2521.86/s (0.427s, 2395.95/s) LR: 1.225e-01 Data: 0.028 (0.048) +Train: 94 [ 100/312 ( 32%)] Loss: 5.40 (5.40) Time: 0.404s, 2534.43/s (0.417s, 2454.47/s) LR: 1.225e-01 Data: 0.028 (0.038) +Train: 94 [ 150/312 ( 48%)] Loss: 5.47 (5.42) Time: 0.401s, 2555.19/s (0.412s, 2482.95/s) LR: 1.225e-01 Data: 0.028 (0.034) +Train: 94 [ 200/312 ( 64%)] Loss: 5.63 (5.44) Time: 0.402s, 2547.55/s (0.410s, 2499.70/s) LR: 1.225e-01 Data: 0.029 (0.033) +Train: 94 [ 250/312 ( 80%)] Loss: 5.58 (5.47) Time: 0.402s, 2549.75/s (0.408s, 2509.87/s) LR: 1.225e-01 Data: 0.028 (0.032) +Train: 94 [ 300/312 ( 96%)] Loss: 5.66 (5.48) Time: 0.404s, 2536.91/s (0.407s, 2516.13/s) LR: 1.225e-01 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.407 (1.407) Loss: 6.464 ( 6.464) Acc@1: 8.105 ( 8.105) Acc@5: 15.332 ( 15.332) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 6.256 ( 6.383) Acc@1: 10.495 ( 8.114) Acc@5: 21.462 ( 17.188) +Train: 95 [ 0/312 ( 0%)] Loss: 5.33 (5.33) Time: 1.791s, 571.90/s (1.791s, 571.90/s) LR: 1.187e-01 Data: 1.419 (1.419) +Train: 95 [ 50/312 ( 16%)] Loss: 5.31 (5.34) Time: 0.406s, 2523.00/s (0.432s, 2369.91/s) LR: 1.187e-01 Data: 0.028 (0.055) +Train: 95 [ 100/312 ( 32%)] Loss: 5.39 (5.38) Time: 0.407s, 2515.74/s (0.420s, 2438.20/s) LR: 1.187e-01 Data: 0.028 (0.041) +Train: 95 [ 150/312 ( 48%)] Loss: 5.49 (5.40) Time: 0.404s, 2535.47/s (0.415s, 2467.83/s) LR: 1.187e-01 Data: 0.027 (0.037) +Train: 95 [ 200/312 ( 64%)] Loss: 5.51 (5.42) Time: 0.402s, 2546.54/s (0.412s, 2484.81/s) LR: 1.187e-01 Data: 0.026 (0.035) +Train: 95 [ 250/312 ( 80%)] Loss: 5.51 (5.44) Time: 0.405s, 2529.36/s (0.411s, 2493.40/s) LR: 1.187e-01 Data: 0.028 (0.033) +Train: 95 [ 300/312 ( 96%)] Loss: 5.58 (5.46) Time: 0.409s, 2502.84/s (0.410s, 2496.92/s) LR: 1.187e-01 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.400 (1.400) Loss: 6.190 ( 6.190) Acc@1: 8.008 ( 8.008) Acc@5: 17.383 ( 17.383) +Test: [ 48/48] Time: 0.090 (0.321) Loss: 6.061 ( 6.137) Acc@1: 8.726 ( 8.590) Acc@5: 19.104 ( 18.268) +Train: 96 [ 0/312 ( 0%)] Loss: 5.33 (5.33) Time: 1.493s, 685.94/s (1.493s, 685.94/s) LR: 1.148e-01 Data: 1.095 (1.095) +Train: 96 [ 50/312 ( 16%)] Loss: 5.40 (5.33) Time: 0.404s, 2535.71/s (0.427s, 2399.09/s) LR: 1.148e-01 Data: 0.027 (0.048) +Train: 96 [ 100/312 ( 32%)] Loss: 5.48 (5.35) Time: 0.405s, 2527.47/s (0.416s, 2460.67/s) LR: 1.148e-01 Data: 0.027 (0.038) +Train: 96 [ 150/312 ( 48%)] Loss: 5.41 (5.37) Time: 0.405s, 2526.12/s (0.413s, 2478.90/s) LR: 1.148e-01 Data: 0.027 (0.035) +Train: 96 [ 200/312 ( 64%)] Loss: 5.57 (5.40) Time: 0.404s, 2534.50/s (0.411s, 2491.40/s) LR: 1.148e-01 Data: 0.027 (0.033) +Train: 96 [ 250/312 ( 80%)] Loss: 5.61 (5.41) Time: 0.405s, 2531.23/s (0.410s, 2500.55/s) LR: 1.148e-01 Data: 0.029 (0.032) +Train: 96 [ 300/312 ( 96%)] Loss: 5.50 (5.43) Time: 0.411s, 2491.77/s (0.409s, 2505.08/s) LR: 1.148e-01 Data: 0.024 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 6.212 ( 6.212) Acc@1: 9.082 ( 9.082) Acc@5: 17.773 ( 17.773) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 5.973 ( 6.195) Acc@1: 9.906 ( 8.542) Acc@5: 20.873 ( 17.888) +Train: 97 [ 0/312 ( 0%)] Loss: 5.27 (5.27) Time: 1.632s, 627.40/s (1.632s, 627.40/s) LR: 1.111e-01 Data: 1.257 (1.257) +Train: 97 [ 50/312 ( 16%)] Loss: 5.24 (5.29) Time: 0.405s, 2527.30/s (0.431s, 2376.18/s) LR: 1.111e-01 Data: 0.028 (0.051) +Train: 97 [ 100/312 ( 32%)] Loss: 5.49 (5.32) Time: 0.404s, 2534.09/s (0.418s, 2449.53/s) LR: 1.111e-01 Data: 0.027 (0.040) +Train: 97 [ 150/312 ( 48%)] Loss: 5.36 (5.35) Time: 0.406s, 2523.09/s (0.414s, 2474.47/s) LR: 1.111e-01 Data: 0.029 (0.036) +Train: 97 [ 200/312 ( 64%)] Loss: 5.45 (5.37) Time: 0.412s, 2485.99/s (0.412s, 2484.12/s) LR: 1.111e-01 Data: 0.032 (0.034) +Train: 97 [ 250/312 ( 80%)] Loss: 5.53 (5.38) Time: 0.405s, 2530.91/s (0.411s, 2489.75/s) LR: 1.111e-01 Data: 0.027 (0.033) +Train: 97 [ 300/312 ( 96%)] Loss: 5.49 (5.40) Time: 0.403s, 2537.84/s (0.410s, 2496.13/s) LR: 1.111e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.421 (1.421) Loss: 6.247 ( 6.247) Acc@1: 8.203 ( 8.203) Acc@5: 17.188 ( 17.188) +Test: [ 48/48] Time: 0.089 (0.318) Loss: 6.008 ( 6.204) Acc@1: 9.434 ( 8.554) Acc@5: 20.283 ( 18.196) +Train: 98 [ 0/312 ( 0%)] Loss: 5.22 (5.22) Time: 1.644s, 622.81/s (1.644s, 622.81/s) LR: 1.073e-01 Data: 1.273 (1.273) +Train: 98 [ 50/312 ( 16%)] Loss: 5.24 (5.27) Time: 0.406s, 2522.63/s (0.428s, 2389.89/s) LR: 1.073e-01 Data: 0.029 (0.052) +Train: 98 [ 100/312 ( 32%)] Loss: 5.29 (5.30) Time: 0.406s, 2521.59/s (0.418s, 2452.30/s) LR: 1.073e-01 Data: 0.027 (0.040) +Train: 98 [ 150/312 ( 48%)] Loss: 5.40 (5.32) Time: 0.406s, 2520.20/s (0.414s, 2472.45/s) LR: 1.073e-01 Data: 0.030 (0.036) +Train: 98 [ 200/312 ( 64%)] Loss: 5.37 (5.34) Time: 0.404s, 2537.66/s (0.412s, 2485.58/s) LR: 1.073e-01 Data: 0.027 (0.034) +Train: 98 [ 250/312 ( 80%)] Loss: 5.44 (5.36) Time: 0.406s, 2519.07/s (0.411s, 2494.05/s) LR: 1.073e-01 Data: 0.028 (0.033) +Train: 98 [ 300/312 ( 96%)] Loss: 5.37 (5.38) Time: 0.407s, 2517.41/s (0.410s, 2497.58/s) LR: 1.073e-01 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.402 (1.402) Loss: 6.513 ( 6.513) Acc@1: 7.129 ( 7.129) Acc@5: 16.016 ( 16.016) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.281 ( 6.376) Acc@1: 8.019 ( 7.818) Acc@5: 20.283 ( 16.982) +Train: 99 [ 0/312 ( 0%)] Loss: 5.19 (5.19) Time: 1.654s, 619.11/s (1.654s, 619.11/s) LR: 1.036e-01 Data: 1.282 (1.282) +Train: 99 [ 50/312 ( 16%)] Loss: 5.20 (5.24) Time: 0.407s, 2516.32/s (0.431s, 2373.89/s) LR: 1.036e-01 Data: 0.027 (0.052) +Train: 99 [ 100/312 ( 32%)] Loss: 5.43 (5.27) Time: 0.409s, 2506.55/s (0.420s, 2439.27/s) LR: 1.036e-01 Data: 0.029 (0.040) +Train: 99 [ 150/312 ( 48%)] Loss: 5.33 (5.29) Time: 0.406s, 2524.20/s (0.415s, 2467.57/s) LR: 1.036e-01 Data: 0.028 (0.036) +Train: 99 [ 200/312 ( 64%)] Loss: 5.44 (5.31) Time: 0.403s, 2543.55/s (0.412s, 2483.53/s) LR: 1.036e-01 Data: 0.027 (0.034) +Train: 99 [ 250/312 ( 80%)] Loss: 5.41 (5.33) Time: 0.409s, 2506.24/s (0.411s, 2491.67/s) LR: 1.036e-01 Data: 0.029 (0.032) +Train: 99 [ 300/312 ( 96%)] Loss: 5.42 (5.35) Time: 0.408s, 2510.59/s (0.410s, 2495.18/s) LR: 1.036e-01 Data: 0.030 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.401 (1.401) Loss: 6.389 ( 6.389) Acc@1: 7.520 ( 7.520) Acc@5: 16.699 ( 16.699) +Test: [ 48/48] Time: 0.090 (0.322) Loss: 6.143 ( 6.285) Acc@1: 7.901 ( 8.166) Acc@5: 20.165 ( 17.712) +Train: 100 [ 0/312 ( 0%)] Loss: 5.27 (5.27) Time: 1.873s, 546.78/s (1.873s, 546.78/s) LR: 1.000e-01 Data: 1.498 (1.498) +Train: 100 [ 50/312 ( 16%)] Loss: 5.20 (5.22) Time: 0.406s, 2523.72/s (0.435s, 2355.48/s) LR: 1.000e-01 Data: 0.028 (0.056) +Train: 100 [ 100/312 ( 32%)] Loss: 5.27 (5.24) Time: 0.405s, 2525.48/s (0.421s, 2429.92/s) LR: 1.000e-01 Data: 0.028 (0.042) +Train: 100 [ 150/312 ( 48%)] Loss: 5.28 (5.27) Time: 0.403s, 2538.33/s (0.416s, 2462.28/s) LR: 1.000e-01 Data: 0.029 (0.038) +Train: 100 [ 200/312 ( 64%)] Loss: 5.33 (5.29) Time: 0.411s, 2494.00/s (0.413s, 2480.04/s) LR: 1.000e-01 Data: 0.029 (0.035) +Train: 100 [ 250/312 ( 80%)] Loss: 5.40 (5.30) Time: 0.405s, 2529.30/s (0.411s, 2490.13/s) LR: 1.000e-01 Data: 0.028 (0.034) +Train: 100 [ 300/312 ( 96%)] Loss: 5.48 (5.32) Time: 0.407s, 2517.98/s (0.410s, 2494.94/s) LR: 1.000e-01 Data: 0.026 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 6.464 ( 6.464) Acc@1: 7.227 ( 7.227) Acc@5: 16.895 ( 16.895) +Test: [ 48/48] Time: 0.089 (0.318) Loss: 6.171 ( 6.365) Acc@1: 8.491 ( 8.212) Acc@5: 19.458 ( 17.614) +Train: 101 [ 0/312 ( 0%)] Loss: 5.10 (5.10) Time: 1.568s, 653.07/s (1.568s, 653.07/s) LR: 9.639e-02 Data: 1.196 (1.196) +Train: 101 [ 50/312 ( 16%)] Loss: 5.19 (5.18) Time: 0.402s, 2546.08/s (0.426s, 2401.22/s) LR: 9.639e-02 Data: 0.027 (0.051) +Train: 101 [ 100/312 ( 32%)] Loss: 5.19 (5.20) Time: 0.405s, 2529.85/s (0.415s, 2466.54/s) LR: 9.639e-02 Data: 0.028 (0.039) +Train: 101 [ 150/312 ( 48%)] Loss: 5.34 (5.23) Time: 0.408s, 2512.47/s (0.412s, 2485.73/s) LR: 9.639e-02 Data: 0.029 (0.035) +Train: 101 [ 200/312 ( 64%)] Loss: 5.37 (5.25) Time: 0.405s, 2526.11/s (0.411s, 2493.79/s) LR: 9.639e-02 Data: 0.027 (0.034) +Train: 101 [ 250/312 ( 80%)] Loss: 5.33 (5.27) Time: 0.405s, 2530.00/s (0.410s, 2499.24/s) LR: 9.639e-02 Data: 0.027 (0.032) +Train: 101 [ 300/312 ( 96%)] Loss: 5.29 (5.29) Time: 0.407s, 2516.78/s (0.409s, 2502.24/s) LR: 9.639e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 6.552 ( 6.552) Acc@1: 6.445 ( 6.445) Acc@5: 14.844 ( 14.844) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.347 ( 6.490) Acc@1: 8.137 ( 7.798) Acc@5: 16.981 ( 16.618) +Train: 102 [ 0/312 ( 0%)] Loss: 5.15 (5.15) Time: 1.672s, 612.30/s (1.672s, 612.30/s) LR: 9.283e-02 Data: 1.298 (1.298) +Train: 102 [ 50/312 ( 16%)] Loss: 5.22 (5.15) Time: 0.410s, 2497.86/s (0.432s, 2369.60/s) LR: 9.283e-02 Data: 0.028 (0.053) +Train: 102 [ 100/312 ( 32%)] Loss: 5.27 (5.18) Time: 0.406s, 2519.16/s (0.420s, 2440.46/s) LR: 9.283e-02 Data: 0.026 (0.040) +Train: 102 [ 150/312 ( 48%)] Loss: 5.38 (5.20) Time: 0.406s, 2519.62/s (0.415s, 2464.93/s) LR: 9.283e-02 Data: 0.028 (0.036) +Train: 102 [ 200/312 ( 64%)] Loss: 5.33 (5.22) Time: 0.406s, 2520.98/s (0.413s, 2479.03/s) LR: 9.283e-02 Data: 0.028 (0.034) +Train: 102 [ 250/312 ( 80%)] Loss: 5.38 (5.24) Time: 0.404s, 2536.57/s (0.412s, 2487.18/s) LR: 9.283e-02 Data: 0.027 (0.033) +Train: 102 [ 300/312 ( 96%)] Loss: 5.33 (5.26) Time: 0.405s, 2530.50/s (0.411s, 2493.23/s) LR: 9.283e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 6.521 ( 6.521) Acc@1: 6.738 ( 6.738) Acc@5: 14.551 ( 14.551) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 6.209 ( 6.400) Acc@1: 7.547 ( 8.104) Acc@5: 19.458 ( 17.314) +Train: 103 [ 0/312 ( 0%)] Loss: 5.15 (5.15) Time: 1.655s, 618.83/s (1.655s, 618.83/s) LR: 8.932e-02 Data: 1.255 (1.255) +Train: 103 [ 50/312 ( 16%)] Loss: 5.15 (5.12) Time: 0.408s, 2512.45/s (0.431s, 2375.02/s) LR: 8.932e-02 Data: 0.029 (0.052) +Train: 103 [ 100/312 ( 32%)] Loss: 5.19 (5.15) Time: 0.406s, 2520.37/s (0.419s, 2443.84/s) LR: 8.932e-02 Data: 0.028 (0.040) +Train: 103 [ 150/312 ( 48%)] Loss: 5.22 (5.18) Time: 0.406s, 2524.28/s (0.415s, 2467.88/s) LR: 8.932e-02 Data: 0.028 (0.036) +Train: 103 [ 200/312 ( 64%)] Loss: 5.23 (5.20) Time: 0.407s, 2516.67/s (0.413s, 2479.40/s) LR: 8.932e-02 Data: 0.028 (0.034) +Train: 103 [ 250/312 ( 80%)] Loss: 5.35 (5.21) Time: 0.403s, 2537.96/s (0.412s, 2488.14/s) LR: 8.932e-02 Data: 0.027 (0.033) +Train: 103 [ 300/312 ( 96%)] Loss: 5.22 (5.23) Time: 0.406s, 2519.10/s (0.411s, 2493.65/s) LR: 8.932e-02 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 6.491 ( 6.491) Acc@1: 6.836 ( 6.836) Acc@5: 16.406 ( 16.406) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 6.237 ( 6.459) Acc@1: 7.193 ( 7.796) Acc@5: 18.750 ( 16.888) +Train: 104 [ 0/312 ( 0%)] Loss: 4.99 (4.99) Time: 1.523s, 672.15/s (1.523s, 672.15/s) LR: 8.586e-02 Data: 1.150 (1.150) +Train: 104 [ 50/312 ( 16%)] Loss: 5.12 (5.09) Time: 0.407s, 2518.79/s (0.429s, 2389.03/s) LR: 8.586e-02 Data: 0.028 (0.050) +Train: 104 [ 100/312 ( 32%)] Loss: 5.20 (5.12) Time: 0.409s, 2504.76/s (0.418s, 2450.15/s) LR: 8.586e-02 Data: 0.029 (0.039) +Train: 104 [ 150/312 ( 48%)] Loss: 5.21 (5.15) Time: 0.406s, 2524.19/s (0.415s, 2470.38/s) LR: 8.586e-02 Data: 0.028 (0.035) +Train: 104 [ 200/312 ( 64%)] Loss: 5.32 (5.16) Time: 0.407s, 2516.97/s (0.413s, 2482.39/s) LR: 8.586e-02 Data: 0.027 (0.033) +Train: 104 [ 250/312 ( 80%)] Loss: 5.38 (5.18) Time: 0.408s, 2511.35/s (0.411s, 2490.27/s) LR: 8.586e-02 Data: 0.028 (0.032) +Train: 104 [ 300/312 ( 96%)] Loss: 5.25 (5.20) Time: 0.405s, 2528.89/s (0.411s, 2494.28/s) LR: 8.586e-02 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.415 (1.415) Loss: 6.634 ( 6.634) Acc@1: 6.348 ( 6.348) Acc@5: 14.648 ( 14.648) +Test: [ 48/48] Time: 0.090 (0.317) Loss: 6.394 ( 6.520) Acc@1: 8.137 ( 7.558) Acc@5: 17.099 ( 16.356) +Train: 105 [ 0/312 ( 0%)] Loss: 5.02 (5.02) Time: 1.561s, 656.09/s (1.561s, 656.09/s) LR: 8.244e-02 Data: 1.187 (1.187) +Train: 105 [ 50/312 ( 16%)] Loss: 5.08 (5.08) Time: 0.405s, 2525.28/s (0.429s, 2387.52/s) LR: 8.244e-02 Data: 0.027 (0.051) +Train: 105 [ 100/312 ( 32%)] Loss: 5.13 (5.10) Time: 0.408s, 2509.20/s (0.418s, 2452.48/s) LR: 8.244e-02 Data: 0.028 (0.039) +Train: 105 [ 150/312 ( 48%)] Loss: 5.28 (5.12) Time: 0.405s, 2531.31/s (0.414s, 2473.57/s) LR: 8.244e-02 Data: 0.027 (0.035) +Train: 105 [ 200/312 ( 64%)] Loss: 5.16 (5.14) Time: 0.405s, 2525.87/s (0.412s, 2486.82/s) LR: 8.244e-02 Data: 0.028 (0.033) +Train: 105 [ 250/312 ( 80%)] Loss: 5.19 (5.16) Time: 0.405s, 2529.07/s (0.410s, 2494.75/s) LR: 8.244e-02 Data: 0.028 (0.032) +Train: 105 [ 300/312 ( 96%)] Loss: 5.34 (5.18) Time: 0.407s, 2518.78/s (0.410s, 2498.84/s) LR: 8.244e-02 Data: 0.025 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.419 (1.419) Loss: 6.454 ( 6.454) Acc@1: 7.031 ( 7.031) Acc@5: 16.016 ( 16.016) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.190 ( 6.411) Acc@1: 7.665 ( 7.742) Acc@5: 18.868 ( 16.806) +Train: 106 [ 0/312 ( 0%)] Loss: 5.06 (5.06) Time: 1.563s, 655.17/s (1.563s, 655.17/s) LR: 7.908e-02 Data: 1.073 (1.073) +Train: 106 [ 50/312 ( 16%)] Loss: 5.08 (5.05) Time: 0.405s, 2530.39/s (0.428s, 2393.90/s) LR: 7.908e-02 Data: 0.027 (0.048) +Train: 106 [ 100/312 ( 32%)] Loss: 5.10 (5.08) Time: 0.408s, 2508.76/s (0.417s, 2453.02/s) LR: 7.908e-02 Data: 0.026 (0.038) +Train: 106 [ 150/312 ( 48%)] Loss: 5.11 (5.10) Time: 0.404s, 2536.23/s (0.414s, 2475.22/s) LR: 7.908e-02 Data: 0.027 (0.035) +Train: 106 [ 200/312 ( 64%)] Loss: 5.17 (5.11) Time: 0.403s, 2541.14/s (0.411s, 2489.99/s) LR: 7.908e-02 Data: 0.028 (0.033) +Train: 106 [ 250/312 ( 80%)] Loss: 5.21 (5.13) Time: 0.403s, 2538.95/s (0.410s, 2498.43/s) LR: 7.908e-02 Data: 0.028 (0.032) +Train: 106 [ 300/312 ( 96%)] Loss: 5.17 (5.15) Time: 0.406s, 2521.92/s (0.409s, 2502.56/s) LR: 7.908e-02 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 6.668 ( 6.668) Acc@1: 6.738 ( 6.738) Acc@5: 15.723 ( 15.723) +Test: [ 48/48] Time: 0.091 (0.319) Loss: 6.429 ( 6.630) Acc@1: 7.429 ( 7.180) Acc@5: 17.571 ( 15.898) +Train: 107 [ 0/312 ( 0%)] Loss: 4.99 (4.99) Time: 1.708s, 599.70/s (1.708s, 599.70/s) LR: 7.577e-02 Data: 1.331 (1.331) +Train: 107 [ 50/312 ( 16%)] Loss: 5.14 (5.03) Time: 0.407s, 2518.30/s (0.432s, 2370.21/s) LR: 7.577e-02 Data: 0.029 (0.053) +Train: 107 [ 100/312 ( 32%)] Loss: 5.09 (5.05) Time: 0.405s, 2530.38/s (0.419s, 2446.30/s) LR: 7.577e-02 Data: 0.029 (0.040) +Train: 107 [ 150/312 ( 48%)] Loss: 5.06 (5.06) Time: 0.404s, 2536.53/s (0.414s, 2475.52/s) LR: 7.577e-02 Data: 0.028 (0.036) +Train: 107 [ 200/312 ( 64%)] Loss: 5.08 (5.08) Time: 0.406s, 2519.66/s (0.412s, 2488.22/s) LR: 7.577e-02 Data: 0.028 (0.034) +Train: 107 [ 250/312 ( 80%)] Loss: 5.15 (5.10) Time: 0.407s, 2517.28/s (0.411s, 2493.95/s) LR: 7.577e-02 Data: 0.028 (0.033) +Train: 107 [ 300/312 ( 96%)] Loss: 5.28 (5.12) Time: 0.406s, 2523.69/s (0.410s, 2497.73/s) LR: 7.577e-02 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.423 (1.423) Loss: 6.672 ( 6.672) Acc@1: 6.934 ( 6.934) Acc@5: 15.625 ( 15.625) +Test: [ 48/48] Time: 0.090 (0.317) Loss: 6.428 ( 6.606) Acc@1: 8.137 ( 7.386) Acc@5: 17.099 ( 16.100) +Train: 108 [ 0/312 ( 0%)] Loss: 5.00 (5.00) Time: 1.522s, 672.81/s (1.522s, 672.81/s) LR: 7.252e-02 Data: 1.096 (1.096) +Train: 108 [ 50/312 ( 16%)] Loss: 4.94 (4.98) Time: 0.409s, 2505.03/s (0.429s, 2386.58/s) LR: 7.252e-02 Data: 0.029 (0.049) +Train: 108 [ 100/312 ( 32%)] Loss: 5.01 (5.01) Time: 0.406s, 2520.36/s (0.418s, 2450.40/s) LR: 7.252e-02 Data: 0.028 (0.038) +Train: 108 [ 150/312 ( 48%)] Loss: 5.08 (5.03) Time: 0.406s, 2520.91/s (0.414s, 2471.98/s) LR: 7.252e-02 Data: 0.028 (0.035) +Train: 108 [ 200/312 ( 64%)] Loss: 5.14 (5.05) Time: 0.407s, 2516.61/s (0.412s, 2483.52/s) LR: 7.252e-02 Data: 0.029 (0.033) +Train: 108 [ 250/312 ( 80%)] Loss: 5.16 (5.07) Time: 0.406s, 2523.65/s (0.411s, 2490.30/s) LR: 7.252e-02 Data: 0.027 (0.032) +Train: 108 [ 300/312 ( 96%)] Loss: 5.19 (5.09) Time: 0.407s, 2516.16/s (0.410s, 2495.72/s) LR: 7.252e-02 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.410 (1.410) Loss: 6.653 ( 6.653) Acc@1: 6.543 ( 6.543) Acc@5: 14.941 ( 14.941) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 6.338 ( 6.598) Acc@1: 8.255 ( 7.156) Acc@5: 18.986 ( 15.894) +Train: 109 [ 0/312 ( 0%)] Loss: 4.91 (4.91) Time: 1.749s, 585.37/s (1.749s, 585.37/s) LR: 6.932e-02 Data: 1.375 (1.375) +Train: 109 [ 50/312 ( 16%)] Loss: 5.01 (4.97) Time: 0.406s, 2520.86/s (0.432s, 2369.55/s) LR: 6.932e-02 Data: 0.027 (0.054) +Train: 109 [ 100/312 ( 32%)] Loss: 5.08 (5.00) Time: 0.407s, 2516.40/s (0.420s, 2440.41/s) LR: 6.932e-02 Data: 0.028 (0.041) +Train: 109 [ 150/312 ( 48%)] Loss: 5.04 (5.02) Time: 0.405s, 2531.34/s (0.415s, 2466.65/s) LR: 6.932e-02 Data: 0.026 (0.036) +Train: 109 [ 200/312 ( 64%)] Loss: 5.22 (5.03) Time: 0.403s, 2539.26/s (0.413s, 2480.39/s) LR: 6.932e-02 Data: 0.027 (0.034) +Train: 109 [ 250/312 ( 80%)] Loss: 5.10 (5.05) Time: 0.403s, 2539.91/s (0.411s, 2491.41/s) LR: 6.932e-02 Data: 0.028 (0.033) +Train: 109 [ 300/312 ( 96%)] Loss: 5.15 (5.06) Time: 0.404s, 2535.59/s (0.410s, 2498.35/s) LR: 6.932e-02 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.404 (1.404) Loss: 6.631 ( 6.631) Acc@1: 7.324 ( 7.324) Acc@5: 14.258 ( 14.258) +Test: [ 48/48] Time: 0.089 (0.318) Loss: 6.420 ( 6.588) Acc@1: 9.198 ( 7.176) Acc@5: 18.750 ( 15.804) +Train: 110 [ 0/312 ( 0%)] Loss: 4.97 (4.97) Time: 1.732s, 591.30/s (1.732s, 591.30/s) LR: 6.617e-02 Data: 1.360 (1.360) +Train: 110 [ 50/312 ( 16%)] Loss: 4.94 (4.93) Time: 0.410s, 2495.27/s (0.432s, 2371.62/s) LR: 6.617e-02 Data: 0.033 (0.054) +Train: 110 [ 100/312 ( 32%)] Loss: 4.98 (4.95) Time: 0.403s, 2542.53/s (0.419s, 2443.84/s) LR: 6.617e-02 Data: 0.027 (0.041) +Train: 110 [ 150/312 ( 48%)] Loss: 5.13 (4.98) Time: 0.406s, 2524.36/s (0.414s, 2470.73/s) LR: 6.617e-02 Data: 0.027 (0.036) +Train: 110 [ 200/312 ( 64%)] Loss: 5.05 (5.00) Time: 0.407s, 2518.30/s (0.413s, 2481.64/s) LR: 6.617e-02 Data: 0.028 (0.034) +Train: 110 [ 250/312 ( 80%)] Loss: 5.04 (5.01) Time: 0.405s, 2526.67/s (0.411s, 2490.72/s) LR: 6.617e-02 Data: 0.028 (0.033) +Train: 110 [ 300/312 ( 96%)] Loss: 5.10 (5.03) Time: 0.405s, 2526.19/s (0.410s, 2497.66/s) LR: 6.617e-02 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.416 (1.416) Loss: 6.696 ( 6.696) Acc@1: 6.934 ( 6.934) Acc@5: 15.527 ( 15.527) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 6.490 ( 6.690) Acc@1: 8.608 ( 6.962) Acc@5: 16.156 ( 15.384) +Train: 111 [ 0/312 ( 0%)] Loss: 4.98 (4.98) Time: 1.972s, 519.34/s (1.972s, 519.34/s) LR: 6.309e-02 Data: 1.278 (1.278) +Train: 111 [ 50/312 ( 16%)] Loss: 4.93 (4.92) Time: 0.408s, 2509.40/s (0.437s, 2341.64/s) LR: 6.309e-02 Data: 0.025 (0.052) +Train: 111 [ 100/312 ( 32%)] Loss: 5.01 (4.93) Time: 0.405s, 2527.74/s (0.422s, 2426.08/s) LR: 6.309e-02 Data: 0.027 (0.040) +Train: 111 [ 150/312 ( 48%)] Loss: 5.04 (4.95) Time: 0.406s, 2523.36/s (0.416s, 2459.59/s) LR: 6.309e-02 Data: 0.029 (0.036) +Train: 111 [ 200/312 ( 64%)] Loss: 5.09 (4.97) Time: 0.405s, 2527.42/s (0.414s, 2476.00/s) LR: 6.309e-02 Data: 0.026 (0.034) +Train: 111 [ 250/312 ( 80%)] Loss: 5.04 (4.99) Time: 0.408s, 2507.34/s (0.412s, 2483.56/s) LR: 6.309e-02 Data: 0.028 (0.032) +Train: 111 [ 300/312 ( 96%)] Loss: 5.07 (5.00) Time: 0.408s, 2511.16/s (0.411s, 2488.79/s) LR: 6.309e-02 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.411 (1.411) Loss: 6.841 ( 6.841) Acc@1: 5.371 ( 5.371) Acc@5: 15.332 ( 15.332) +Test: [ 48/48] Time: 0.089 (0.320) Loss: 6.555 ( 6.740) Acc@1: 7.547 ( 6.754) Acc@5: 16.981 ( 15.364) +Train: 112 [ 0/312 ( 0%)] Loss: 4.89 (4.89) Time: 1.969s, 520.17/s (1.969s, 520.17/s) LR: 6.007e-02 Data: 1.194 (1.194) +Train: 112 [ 50/312 ( 16%)] Loss: 4.92 (4.90) Time: 0.404s, 2534.69/s (0.435s, 2352.23/s) LR: 6.007e-02 Data: 0.027 (0.051) +Train: 112 [ 100/312 ( 32%)] Loss: 4.84 (4.91) Time: 0.407s, 2515.51/s (0.421s, 2429.96/s) LR: 6.007e-02 Data: 0.026 (0.039) +Train: 112 [ 150/312 ( 48%)] Loss: 4.96 (4.93) Time: 0.404s, 2534.64/s (0.416s, 2458.98/s) LR: 6.007e-02 Data: 0.027 (0.036) +Train: 112 [ 200/312 ( 64%)] Loss: 5.03 (4.95) Time: 0.404s, 2532.49/s (0.414s, 2476.42/s) LR: 6.007e-02 Data: 0.028 (0.034) +Train: 112 [ 250/312 ( 80%)] Loss: 5.00 (4.96) Time: 0.405s, 2527.80/s (0.412s, 2486.49/s) LR: 6.007e-02 Data: 0.026 (0.032) +Train: 112 [ 300/312 ( 96%)] Loss: 5.02 (4.97) Time: 0.407s, 2513.99/s (0.411s, 2490.97/s) LR: 6.007e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.423 (1.423) Loss: 6.630 ( 6.630) Acc@1: 6.152 ( 6.152) Acc@5: 15.723 ( 15.723) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.356 ( 6.582) Acc@1: 7.783 ( 7.238) Acc@5: 17.925 ( 16.064) +Train: 113 [ 0/312 ( 0%)] Loss: 4.78 (4.78) Time: 1.852s, 552.96/s (1.852s, 552.96/s) LR: 5.711e-02 Data: 1.478 (1.478) +Train: 113 [ 50/312 ( 16%)] Loss: 4.74 (4.84) Time: 0.404s, 2537.07/s (0.433s, 2366.40/s) LR: 5.711e-02 Data: 0.028 (0.056) +Train: 113 [ 100/312 ( 32%)] Loss: 4.93 (4.87) Time: 0.407s, 2519.02/s (0.419s, 2445.20/s) LR: 5.711e-02 Data: 0.029 (0.042) +Train: 113 [ 150/312 ( 48%)] Loss: 5.00 (4.89) Time: 0.407s, 2517.59/s (0.415s, 2467.78/s) LR: 5.711e-02 Data: 0.028 (0.037) +Train: 113 [ 200/312 ( 64%)] Loss: 5.00 (4.91) Time: 0.404s, 2535.24/s (0.413s, 2480.08/s) LR: 5.711e-02 Data: 0.027 (0.035) +Train: 113 [ 250/312 ( 80%)] Loss: 4.89 (4.93) Time: 0.404s, 2531.85/s (0.412s, 2488.14/s) LR: 5.711e-02 Data: 0.027 (0.033) +Train: 113 [ 300/312 ( 96%)] Loss: 4.95 (4.94) Time: 0.410s, 2496.30/s (0.411s, 2492.99/s) LR: 5.711e-02 Data: 0.033 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.402 (1.402) Loss: 6.884 ( 6.884) Acc@1: 5.957 ( 5.957) Acc@5: 14.258 ( 14.258) +Test: [ 48/48] Time: 0.090 (0.321) Loss: 6.574 ( 6.766) Acc@1: 7.311 ( 6.830) Acc@5: 17.925 ( 14.982) +Train: 114 [ 0/312 ( 0%)] Loss: 4.78 (4.78) Time: 2.057s, 497.78/s (2.057s, 497.78/s) LR: 5.421e-02 Data: 1.182 (1.182) +Train: 114 [ 50/312 ( 16%)] Loss: 4.89 (4.82) Time: 0.405s, 2529.46/s (0.438s, 2339.70/s) LR: 5.421e-02 Data: 0.028 (0.050) +Train: 114 [ 100/312 ( 32%)] Loss: 4.88 (4.85) Time: 0.404s, 2537.16/s (0.422s, 2428.18/s) LR: 5.421e-02 Data: 0.027 (0.039) +Train: 114 [ 150/312 ( 48%)] Loss: 4.86 (4.87) Time: 0.403s, 2540.32/s (0.416s, 2463.68/s) LR: 5.421e-02 Data: 0.028 (0.035) +Train: 114 [ 200/312 ( 64%)] Loss: 4.86 (4.88) Time: 0.405s, 2528.08/s (0.412s, 2482.97/s) LR: 5.421e-02 Data: 0.029 (0.033) +Train: 114 [ 250/312 ( 80%)] Loss: 4.97 (4.90) Time: 0.405s, 2526.38/s (0.411s, 2492.57/s) LR: 5.421e-02 Data: 0.029 (0.032) +Train: 114 [ 300/312 ( 96%)] Loss: 5.03 (4.91) Time: 0.404s, 2535.40/s (0.410s, 2496.77/s) LR: 5.421e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.416 (1.416) Loss: 6.782 ( 6.782) Acc@1: 6.250 ( 6.250) Acc@5: 14.941 ( 14.941) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 6.532 ( 6.752) Acc@1: 8.726 ( 6.850) Acc@5: 17.335 ( 15.394) +Train: 115 [ 0/312 ( 0%)] Loss: 4.84 (4.84) Time: 1.846s, 554.76/s (1.846s, 554.76/s) LR: 5.137e-02 Data: 1.091 (1.091) +Train: 115 [ 50/312 ( 16%)] Loss: 4.88 (4.80) Time: 0.404s, 2531.58/s (0.435s, 2353.29/s) LR: 5.137e-02 Data: 0.027 (0.049) +Train: 115 [ 100/312 ( 32%)] Loss: 4.90 (4.82) Time: 0.403s, 2539.91/s (0.420s, 2436.60/s) LR: 5.137e-02 Data: 0.026 (0.038) +Train: 115 [ 150/312 ( 48%)] Loss: 4.85 (4.84) Time: 0.404s, 2531.83/s (0.415s, 2464.58/s) LR: 5.137e-02 Data: 0.027 (0.035) +Train: 115 [ 200/312 ( 64%)] Loss: 4.88 (4.85) Time: 0.403s, 2543.52/s (0.413s, 2478.72/s) LR: 5.137e-02 Data: 0.026 (0.033) +Train: 115 [ 250/312 ( 80%)] Loss: 4.89 (4.87) Time: 0.405s, 2528.92/s (0.411s, 2490.28/s) LR: 5.137e-02 Data: 0.028 (0.032) +Train: 115 [ 300/312 ( 96%)] Loss: 4.92 (4.88) Time: 0.404s, 2535.94/s (0.410s, 2498.83/s) LR: 5.137e-02 Data: 0.029 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.418 (1.418) Loss: 6.855 ( 6.855) Acc@1: 7.129 ( 7.129) Acc@5: 14.844 ( 14.844) +Test: [ 48/48] Time: 0.089 (0.318) Loss: 6.570 ( 6.809) Acc@1: 8.373 ( 6.906) Acc@5: 18.042 ( 15.216) +Train: 116 [ 0/312 ( 0%)] Loss: 4.79 (4.79) Time: 1.750s, 585.29/s (1.750s, 585.29/s) LR: 4.860e-02 Data: 1.360 (1.360) +Train: 116 [ 50/312 ( 16%)] Loss: 4.76 (4.78) Time: 0.408s, 2512.27/s (0.434s, 2359.66/s) LR: 4.860e-02 Data: 0.028 (0.053) +Train: 116 [ 100/312 ( 32%)] Loss: 4.83 (4.79) Time: 0.405s, 2527.11/s (0.421s, 2432.07/s) LR: 4.860e-02 Data: 0.027 (0.041) +Train: 116 [ 150/312 ( 48%)] Loss: 4.86 (4.81) Time: 0.407s, 2518.02/s (0.417s, 2457.65/s) LR: 4.860e-02 Data: 0.028 (0.037) +Train: 116 [ 200/312 ( 64%)] Loss: 5.04 (4.83) Time: 0.404s, 2533.51/s (0.414s, 2471.15/s) LR: 4.860e-02 Data: 0.028 (0.034) +Train: 116 [ 250/312 ( 80%)] Loss: 4.95 (4.84) Time: 0.407s, 2515.63/s (0.413s, 2480.41/s) LR: 4.860e-02 Data: 0.027 (0.033) +Train: 116 [ 300/312 ( 96%)] Loss: 4.94 (4.85) Time: 0.406s, 2523.31/s (0.412s, 2485.79/s) LR: 4.860e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.394 (1.394) Loss: 6.928 ( 6.928) Acc@1: 5.664 ( 5.664) Acc@5: 13.867 ( 13.867) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 6.650 ( 6.840) Acc@1: 7.193 ( 6.646) Acc@5: 15.566 ( 14.942) +Train: 117 [ 0/312 ( 0%)] Loss: 4.75 (4.75) Time: 1.589s, 644.53/s (1.589s, 644.53/s) LR: 4.590e-02 Data: 1.216 (1.216) +Train: 117 [ 50/312 ( 16%)] Loss: 4.74 (4.73) Time: 0.408s, 2512.30/s (0.430s, 2383.31/s) LR: 4.590e-02 Data: 0.028 (0.051) +Train: 117 [ 100/312 ( 32%)] Loss: 4.93 (4.76) Time: 0.404s, 2535.14/s (0.418s, 2447.89/s) LR: 4.590e-02 Data: 0.028 (0.039) +Train: 117 [ 150/312 ( 48%)] Loss: 4.78 (4.78) Time: 0.401s, 2551.45/s (0.413s, 2477.70/s) LR: 4.590e-02 Data: 0.026 (0.036) +Train: 117 [ 200/312 ( 64%)] Loss: 4.92 (4.80) Time: 0.402s, 2545.83/s (0.411s, 2493.00/s) LR: 4.590e-02 Data: 0.026 (0.034) +Train: 117 [ 250/312 ( 80%)] Loss: 5.03 (4.81) Time: 0.407s, 2518.47/s (0.409s, 2501.43/s) LR: 4.590e-02 Data: 0.028 (0.032) +Train: 117 [ 300/312 ( 96%)] Loss: 4.91 (4.82) Time: 0.407s, 2513.35/s (0.409s, 2504.75/s) LR: 4.590e-02 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.430 (1.430) Loss: 6.784 ( 6.784) Acc@1: 6.738 ( 6.738) Acc@5: 14.746 ( 14.746) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 6.622 ( 6.795) Acc@1: 7.901 ( 6.672) Acc@5: 17.335 ( 15.164) +Train: 118 [ 0/312 ( 0%)] Loss: 4.69 (4.69) Time: 1.725s, 593.78/s (1.725s, 593.78/s) LR: 4.326e-02 Data: 1.351 (1.351) +Train: 118 [ 50/312 ( 16%)] Loss: 4.67 (4.72) Time: 0.406s, 2522.49/s (0.433s, 2365.04/s) LR: 4.326e-02 Data: 0.028 (0.054) +Train: 118 [ 100/312 ( 32%)] Loss: 4.77 (4.74) Time: 0.406s, 2523.63/s (0.420s, 2440.77/s) LR: 4.326e-02 Data: 0.028 (0.041) +Train: 118 [ 150/312 ( 48%)] Loss: 4.86 (4.76) Time: 0.408s, 2511.42/s (0.415s, 2465.06/s) LR: 4.326e-02 Data: 0.027 (0.037) +Train: 118 [ 200/312 ( 64%)] Loss: 4.83 (4.77) Time: 0.405s, 2529.43/s (0.413s, 2479.58/s) LR: 4.326e-02 Data: 0.029 (0.034) +Train: 118 [ 250/312 ( 80%)] Loss: 4.85 (4.79) Time: 0.409s, 2506.50/s (0.411s, 2489.75/s) LR: 4.326e-02 Data: 0.028 (0.033) +Train: 118 [ 300/312 ( 96%)] Loss: 4.86 (4.80) Time: 0.407s, 2514.36/s (0.410s, 2496.57/s) LR: 4.326e-02 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.444 (1.444) Loss: 6.862 ( 6.862) Acc@1: 6.250 ( 6.250) Acc@5: 13.770 ( 13.770) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 6.589 ( 6.788) Acc@1: 7.311 ( 6.826) Acc@5: 16.509 ( 15.500) +Train: 119 [ 0/312 ( 0%)] Loss: 4.64 (4.64) Time: 1.840s, 556.66/s (1.840s, 556.66/s) LR: 4.069e-02 Data: 1.467 (1.467) +Train: 119 [ 50/312 ( 16%)] Loss: 4.74 (4.69) Time: 0.407s, 2513.75/s (0.436s, 2349.77/s) LR: 4.069e-02 Data: 0.025 (0.056) +Train: 119 [ 100/312 ( 32%)] Loss: 4.70 (4.71) Time: 0.403s, 2541.91/s (0.421s, 2433.24/s) LR: 4.069e-02 Data: 0.027 (0.042) +Train: 119 [ 150/312 ( 48%)] Loss: 4.79 (4.73) Time: 0.404s, 2533.78/s (0.415s, 2464.51/s) LR: 4.069e-02 Data: 0.028 (0.037) +Train: 119 [ 200/312 ( 64%)] Loss: 4.78 (4.74) Time: 0.406s, 2523.72/s (0.413s, 2479.36/s) LR: 4.069e-02 Data: 0.028 (0.035) +Train: 119 [ 250/312 ( 80%)] Loss: 4.80 (4.75) Time: 0.406s, 2519.58/s (0.412s, 2486.52/s) LR: 4.069e-02 Data: 0.027 (0.033) +Train: 119 [ 300/312 ( 96%)] Loss: 4.88 (4.77) Time: 0.403s, 2542.11/s (0.411s, 2491.83/s) LR: 4.069e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 6.825 ( 6.825) Acc@1: 6.543 ( 6.543) Acc@5: 15.234 ( 15.234) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 6.628 ( 6.847) Acc@1: 7.783 ( 6.668) Acc@5: 16.863 ( 14.952) +Train: 120 [ 0/312 ( 0%)] Loss: 4.62 (4.62) Time: 1.824s, 561.33/s (1.824s, 561.33/s) LR: 3.820e-02 Data: 1.454 (1.454) +Train: 120 [ 50/312 ( 16%)] Loss: 4.65 (4.68) Time: 0.403s, 2542.66/s (0.431s, 2376.62/s) LR: 3.820e-02 Data: 0.027 (0.056) +Train: 120 [ 100/312 ( 32%)] Loss: 4.67 (4.69) Time: 0.405s, 2526.57/s (0.418s, 2450.22/s) LR: 3.820e-02 Data: 0.027 (0.042) +Train: 120 [ 150/312 ( 48%)] Loss: 4.62 (4.71) Time: 0.406s, 2520.96/s (0.414s, 2472.34/s) LR: 3.820e-02 Data: 0.025 (0.037) +Train: 120 [ 200/312 ( 64%)] Loss: 4.68 (4.72) Time: 0.405s, 2530.05/s (0.412s, 2483.38/s) LR: 3.820e-02 Data: 0.027 (0.035) +Train: 120 [ 250/312 ( 80%)] Loss: 4.84 (4.73) Time: 0.414s, 2473.45/s (0.411s, 2489.20/s) LR: 3.820e-02 Data: 0.024 (0.033) +Train: 120 [ 300/312 ( 96%)] Loss: 4.86 (4.74) Time: 0.403s, 2543.24/s (0.410s, 2496.18/s) LR: 3.820e-02 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.404 (1.404) Loss: 6.897 ( 6.897) Acc@1: 6.836 ( 6.836) Acc@5: 14.160 ( 14.160) +Test: [ 48/48] Time: 0.089 (0.321) Loss: 6.648 ( 6.880) Acc@1: 8.255 ( 6.722) Acc@5: 17.571 ( 15.032) +Train: 121 [ 0/312 ( 0%)] Loss: 4.63 (4.63) Time: 1.733s, 590.80/s (1.733s, 590.80/s) LR: 3.577e-02 Data: 1.240 (1.240) +Train: 121 [ 50/312 ( 16%)] Loss: 4.58 (4.64) Time: 0.400s, 2559.38/s (0.426s, 2402.61/s) LR: 3.577e-02 Data: 0.029 (0.051) +Train: 121 [ 100/312 ( 32%)] Loss: 4.87 (4.66) Time: 0.401s, 2554.25/s (0.414s, 2475.64/s) LR: 3.577e-02 Data: 0.028 (0.039) +Train: 121 [ 150/312 ( 48%)] Loss: 4.74 (4.68) Time: 0.403s, 2542.64/s (0.410s, 2497.88/s) LR: 3.577e-02 Data: 0.027 (0.036) +Train: 121 [ 200/312 ( 64%)] Loss: 4.82 (4.69) Time: 0.405s, 2530.76/s (0.409s, 2505.43/s) LR: 3.577e-02 Data: 0.027 (0.034) +Train: 121 [ 250/312 ( 80%)] Loss: 4.85 (4.70) Time: 0.407s, 2514.33/s (0.408s, 2508.42/s) LR: 3.577e-02 Data: 0.028 (0.032) +Train: 121 [ 300/312 ( 96%)] Loss: 4.84 (4.71) Time: 0.406s, 2523.90/s (0.408s, 2510.09/s) LR: 3.577e-02 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.405 (1.405) Loss: 6.925 ( 6.925) Acc@1: 5.957 ( 5.957) Acc@5: 14.258 ( 14.258) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.610 ( 6.868) Acc@1: 7.783 ( 6.570) Acc@5: 17.335 ( 14.876) +Train: 122 [ 0/312 ( 0%)] Loss: 4.68 (4.68) Time: 1.753s, 584.07/s (1.753s, 584.07/s) LR: 3.342e-02 Data: 1.379 (1.379) +Train: 122 [ 50/312 ( 16%)] Loss: 4.60 (4.63) Time: 0.408s, 2507.97/s (0.433s, 2366.15/s) LR: 3.342e-02 Data: 0.027 (0.054) +Train: 122 [ 100/312 ( 32%)] Loss: 4.76 (4.63) Time: 0.407s, 2516.24/s (0.421s, 2434.41/s) LR: 3.342e-02 Data: 0.028 (0.041) +Train: 122 [ 150/312 ( 48%)] Loss: 4.67 (4.65) Time: 0.402s, 2546.35/s (0.415s, 2466.06/s) LR: 3.342e-02 Data: 0.027 (0.037) +Train: 122 [ 200/312 ( 64%)] Loss: 4.71 (4.66) Time: 0.402s, 2546.23/s (0.412s, 2482.93/s) LR: 3.342e-02 Data: 0.027 (0.034) +Train: 122 [ 250/312 ( 80%)] Loss: 4.66 (4.67) Time: 0.406s, 2524.17/s (0.411s, 2492.01/s) LR: 3.342e-02 Data: 0.027 (0.033) +Train: 122 [ 300/312 ( 96%)] Loss: 4.90 (4.69) Time: 0.406s, 2520.21/s (0.410s, 2495.71/s) LR: 3.342e-02 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.399 (1.399) Loss: 6.933 ( 6.933) Acc@1: 6.055 ( 6.055) Acc@5: 14.551 ( 14.551) +Test: [ 48/48] Time: 0.089 (0.322) Loss: 6.694 ( 6.895) Acc@1: 6.958 ( 6.508) Acc@5: 16.745 ( 14.806) +Train: 123 [ 0/312 ( 0%)] Loss: 4.61 (4.61) Time: 1.589s, 644.37/s (1.589s, 644.37/s) LR: 3.113e-02 Data: 1.145 (1.145) +Train: 123 [ 50/312 ( 16%)] Loss: 4.61 (4.61) Time: 0.402s, 2544.32/s (0.426s, 2403.81/s) LR: 3.113e-02 Data: 0.027 (0.049) +Train: 123 [ 100/312 ( 32%)] Loss: 4.53 (4.61) Time: 0.403s, 2541.18/s (0.414s, 2470.79/s) LR: 3.113e-02 Data: 0.026 (0.038) +Train: 123 [ 150/312 ( 48%)] Loss: 4.76 (4.63) Time: 0.408s, 2511.56/s (0.411s, 2490.09/s) LR: 3.113e-02 Data: 0.028 (0.035) +Train: 123 [ 200/312 ( 64%)] Loss: 4.79 (4.64) Time: 0.406s, 2521.08/s (0.410s, 2497.63/s) LR: 3.113e-02 Data: 0.028 (0.033) +Train: 123 [ 250/312 ( 80%)] Loss: 4.73 (4.65) Time: 0.404s, 2531.92/s (0.409s, 2502.91/s) LR: 3.113e-02 Data: 0.028 (0.032) +Train: 123 [ 300/312 ( 96%)] Loss: 4.67 (4.66) Time: 0.407s, 2517.95/s (0.409s, 2505.99/s) LR: 3.113e-02 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.471 (1.471) Loss: 7.014 ( 7.014) Acc@1: 6.152 ( 6.152) Acc@5: 13.965 ( 13.965) +Test: [ 48/48] Time: 0.090 (0.321) Loss: 6.764 ( 6.958) Acc@1: 8.255 ( 6.466) Acc@5: 18.278 ( 14.708) +Train: 124 [ 0/312 ( 0%)] Loss: 4.49 (4.49) Time: 1.555s, 658.40/s (1.555s, 658.40/s) LR: 2.893e-02 Data: 1.120 (1.120) +Train: 124 [ 50/312 ( 16%)] Loss: 4.62 (4.56) Time: 0.407s, 2517.66/s (0.430s, 2382.20/s) LR: 2.893e-02 Data: 0.027 (0.049) +Train: 124 [ 100/312 ( 32%)] Loss: 4.59 (4.59) Time: 0.408s, 2511.10/s (0.419s, 2443.71/s) LR: 2.893e-02 Data: 0.028 (0.039) +Train: 124 [ 150/312 ( 48%)] Loss: 4.61 (4.60) Time: 0.404s, 2532.52/s (0.414s, 2472.20/s) LR: 2.893e-02 Data: 0.027 (0.035) +Train: 124 [ 200/312 ( 64%)] Loss: 4.56 (4.61) Time: 0.407s, 2517.56/s (0.412s, 2488.34/s) LR: 2.893e-02 Data: 0.033 (0.033) +Train: 124 [ 250/312 ( 80%)] Loss: 4.71 (4.62) Time: 0.404s, 2537.20/s (0.410s, 2498.31/s) LR: 2.893e-02 Data: 0.028 (0.032) +Train: 124 [ 300/312 ( 96%)] Loss: 4.65 (4.63) Time: 0.406s, 2520.74/s (0.409s, 2503.36/s) LR: 2.893e-02 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.442 (1.442) Loss: 7.054 ( 7.054) Acc@1: 5.273 ( 5.273) Acc@5: 13.281 ( 13.281) +Test: [ 48/48] Time: 0.090 (0.322) Loss: 6.789 ( 7.002) Acc@1: 7.311 ( 6.112) Acc@5: 17.335 ( 14.236) +Train: 125 [ 0/312 ( 0%)] Loss: 4.59 (4.59) Time: 1.763s, 580.83/s (1.763s, 580.83/s) LR: 2.679e-02 Data: 1.388 (1.388) +Train: 125 [ 50/312 ( 16%)] Loss: 4.65 (4.54) Time: 0.407s, 2515.25/s (0.434s, 2361.85/s) LR: 2.679e-02 Data: 0.027 (0.054) +Train: 125 [ 100/312 ( 32%)] Loss: 4.52 (4.57) Time: 0.409s, 2506.27/s (0.420s, 2440.44/s) LR: 2.679e-02 Data: 0.033 (0.041) +Train: 125 [ 150/312 ( 48%)] Loss: 4.56 (4.58) Time: 0.402s, 2547.30/s (0.414s, 2471.79/s) LR: 2.679e-02 Data: 0.028 (0.037) +Train: 125 [ 200/312 ( 64%)] Loss: 4.63 (4.59) Time: 0.404s, 2535.90/s (0.412s, 2487.72/s) LR: 2.679e-02 Data: 0.028 (0.035) +Train: 125 [ 250/312 ( 80%)] Loss: 4.74 (4.60) Time: 0.406s, 2523.45/s (0.410s, 2495.36/s) LR: 2.679e-02 Data: 0.027 (0.033) +Train: 125 [ 300/312 ( 96%)] Loss: 4.69 (4.61) Time: 0.408s, 2512.07/s (0.410s, 2498.94/s) LR: 2.679e-02 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.416 (1.416) Loss: 7.034 ( 7.034) Acc@1: 5.664 ( 5.664) Acc@5: 12.305 ( 12.305) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.753 ( 6.975) Acc@1: 7.311 ( 6.326) Acc@5: 17.689 ( 14.518) +Train: 126 [ 0/312 ( 0%)] Loss: 4.58 (4.58) Time: 1.491s, 686.65/s (1.491s, 686.65/s) LR: 2.474e-02 Data: 1.071 (1.071) +Train: 126 [ 50/312 ( 16%)] Loss: 4.62 (4.51) Time: 0.406s, 2521.99/s (0.429s, 2389.68/s) LR: 2.474e-02 Data: 0.027 (0.048) +Train: 126 [ 100/312 ( 32%)] Loss: 4.54 (4.53) Time: 0.410s, 2499.23/s (0.417s, 2453.98/s) LR: 2.474e-02 Data: 0.033 (0.038) +Train: 126 [ 150/312 ( 48%)] Loss: 4.59 (4.55) Time: 0.405s, 2525.39/s (0.413s, 2476.79/s) LR: 2.474e-02 Data: 0.028 (0.035) +Train: 126 [ 200/312 ( 64%)] Loss: 4.63 (4.56) Time: 0.407s, 2516.74/s (0.412s, 2486.45/s) LR: 2.474e-02 Data: 0.028 (0.033) +Train: 126 [ 250/312 ( 80%)] Loss: 4.55 (4.57) Time: 0.406s, 2524.48/s (0.411s, 2492.30/s) LR: 2.474e-02 Data: 0.027 (0.032) +Train: 126 [ 300/312 ( 96%)] Loss: 4.71 (4.58) Time: 0.411s, 2492.01/s (0.410s, 2496.22/s) LR: 2.474e-02 Data: 0.032 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.415 (1.415) Loss: 6.890 ( 6.890) Acc@1: 5.469 ( 5.469) Acc@5: 13.184 ( 13.184) +Test: [ 48/48] Time: 0.089 (0.317) Loss: 6.664 ( 6.877) Acc@1: 7.429 ( 6.302) Acc@5: 16.981 ( 14.564) +Train: 127 [ 0/312 ( 0%)] Loss: 4.45 (4.45) Time: 1.713s, 597.65/s (1.713s, 597.65/s) LR: 2.276e-02 Data: 1.344 (1.344) +Train: 127 [ 50/312 ( 16%)] Loss: 4.47 (4.51) Time: 0.401s, 2554.51/s (0.428s, 2394.10/s) LR: 2.276e-02 Data: 0.027 (0.054) +Train: 127 [ 100/312 ( 32%)] Loss: 4.59 (4.52) Time: 0.403s, 2543.96/s (0.415s, 2466.49/s) LR: 2.276e-02 Data: 0.027 (0.041) +Train: 127 [ 150/312 ( 48%)] Loss: 4.54 (4.53) Time: 0.404s, 2536.27/s (0.411s, 2488.51/s) LR: 2.276e-02 Data: 0.028 (0.037) +Train: 127 [ 200/312 ( 64%)] Loss: 4.57 (4.54) Time: 0.406s, 2525.00/s (0.410s, 2496.27/s) LR: 2.276e-02 Data: 0.027 (0.035) +Train: 127 [ 250/312 ( 80%)] Loss: 4.61 (4.55) Time: 0.405s, 2528.55/s (0.409s, 2500.84/s) LR: 2.276e-02 Data: 0.027 (0.033) +Train: 127 [ 300/312 ( 96%)] Loss: 4.62 (4.56) Time: 0.404s, 2533.33/s (0.409s, 2504.73/s) LR: 2.276e-02 Data: 0.026 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.416 (1.416) Loss: 7.065 ( 7.065) Acc@1: 5.664 ( 5.664) Acc@5: 12.793 ( 12.793) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 6.812 ( 7.040) Acc@1: 7.547 ( 6.218) Acc@5: 16.156 ( 14.186) +Train: 128 [ 0/312 ( 0%)] Loss: 4.52 (4.52) Time: 1.783s, 574.27/s (1.783s, 574.27/s) LR: 2.086e-02 Data: 1.277 (1.277) +Train: 128 [ 50/312 ( 16%)] Loss: 4.50 (4.49) Time: 0.404s, 2532.51/s (0.433s, 2364.38/s) LR: 2.086e-02 Data: 0.026 (0.052) +Train: 128 [ 100/312 ( 32%)] Loss: 4.50 (4.50) Time: 0.403s, 2540.92/s (0.419s, 2446.60/s) LR: 2.086e-02 Data: 0.028 (0.040) +Train: 128 [ 150/312 ( 48%)] Loss: 4.50 (4.51) Time: 0.404s, 2536.72/s (0.414s, 2475.56/s) LR: 2.086e-02 Data: 0.028 (0.036) +Train: 128 [ 200/312 ( 64%)] Loss: 4.48 (4.52) Time: 0.405s, 2526.72/s (0.411s, 2488.62/s) LR: 2.086e-02 Data: 0.028 (0.034) +Train: 128 [ 250/312 ( 80%)] Loss: 4.57 (4.52) Time: 0.405s, 2526.20/s (0.411s, 2494.33/s) LR: 2.086e-02 Data: 0.026 (0.033) +Train: 128 [ 300/312 ( 96%)] Loss: 4.57 (4.53) Time: 0.406s, 2524.11/s (0.410s, 2497.54/s) LR: 2.086e-02 Data: 0.026 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.436 (1.436) Loss: 6.923 ( 6.923) Acc@1: 4.980 ( 4.980) Acc@5: 15.137 ( 15.137) +Test: [ 48/48] Time: 0.090 (0.322) Loss: 6.679 ( 6.900) Acc@1: 6.840 ( 6.566) Acc@5: 16.627 ( 14.732) +Train: 129 [ 0/312 ( 0%)] Loss: 4.48 (4.48) Time: 1.840s, 556.62/s (1.840s, 556.62/s) LR: 1.903e-02 Data: 1.426 (1.426) +Train: 129 [ 50/312 ( 16%)] Loss: 4.51 (4.46) Time: 0.405s, 2528.57/s (0.435s, 2352.07/s) LR: 1.903e-02 Data: 0.028 (0.056) +Train: 129 [ 100/312 ( 32%)] Loss: 4.53 (4.47) Time: 0.405s, 2530.87/s (0.420s, 2439.11/s) LR: 1.903e-02 Data: 0.028 (0.042) +Train: 129 [ 150/312 ( 48%)] Loss: 4.52 (4.49) Time: 0.401s, 2556.39/s (0.414s, 2473.17/s) LR: 1.903e-02 Data: 0.028 (0.037) +Train: 129 [ 200/312 ( 64%)] Loss: 4.46 (4.50) Time: 0.403s, 2541.70/s (0.411s, 2490.53/s) LR: 1.903e-02 Data: 0.028 (0.035) +Train: 129 [ 250/312 ( 80%)] Loss: 4.51 (4.50) Time: 0.406s, 2523.28/s (0.410s, 2499.39/s) LR: 1.903e-02 Data: 0.028 (0.034) +Train: 129 [ 300/312 ( 96%)] Loss: 4.50 (4.51) Time: 0.407s, 2518.21/s (0.409s, 2503.15/s) LR: 1.903e-02 Data: 0.028 (0.033) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.415 (1.415) Loss: 6.956 ( 6.956) Acc@1: 5.957 ( 5.957) Acc@5: 12.695 ( 12.695) +Test: [ 48/48] Time: 0.089 (0.324) Loss: 6.695 ( 6.940) Acc@1: 7.665 ( 6.562) Acc@5: 16.745 ( 14.770) +Train: 130 [ 0/312 ( 0%)] Loss: 4.38 (4.38) Time: 1.715s, 597.14/s (1.715s, 597.14/s) LR: 1.729e-02 Data: 1.345 (1.345) +Train: 130 [ 50/312 ( 16%)] Loss: 4.50 (4.44) Time: 0.402s, 2548.24/s (0.427s, 2397.06/s) LR: 1.729e-02 Data: 0.029 (0.054) +Train: 130 [ 100/312 ( 32%)] Loss: 4.57 (4.45) Time: 0.403s, 2540.38/s (0.415s, 2468.04/s) LR: 1.729e-02 Data: 0.028 (0.041) +Train: 130 [ 150/312 ( 48%)] Loss: 4.47 (4.46) Time: 0.405s, 2529.04/s (0.411s, 2489.44/s) LR: 1.729e-02 Data: 0.027 (0.037) +Train: 130 [ 200/312 ( 64%)] Loss: 4.52 (4.48) Time: 0.406s, 2521.61/s (0.410s, 2497.18/s) LR: 1.729e-02 Data: 0.028 (0.035) +Train: 130 [ 250/312 ( 80%)] Loss: 4.49 (4.48) Time: 0.404s, 2536.26/s (0.409s, 2503.55/s) LR: 1.729e-02 Data: 0.028 (0.033) +Train: 130 [ 300/312 ( 96%)] Loss: 4.49 (4.49) Time: 0.403s, 2539.50/s (0.408s, 2508.86/s) LR: 1.729e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.390 (1.390) Loss: 7.123 ( 7.123) Acc@1: 5.371 ( 5.371) Acc@5: 12.305 ( 12.305) +Test: [ 48/48] Time: 0.089 (0.318) Loss: 6.845 ( 7.083) Acc@1: 7.665 ( 6.044) Acc@5: 16.274 ( 13.818) +Train: 131 [ 0/312 ( 0%)] Loss: 4.40 (4.40) Time: 1.703s, 601.34/s (1.703s, 601.34/s) LR: 1.563e-02 Data: 1.171 (1.171) +Train: 131 [ 50/312 ( 16%)] Loss: 4.49 (4.43) Time: 0.404s, 2536.19/s (0.430s, 2383.33/s) LR: 1.563e-02 Data: 0.028 (0.049) +Train: 131 [ 100/312 ( 32%)] Loss: 4.51 (4.43) Time: 0.407s, 2519.06/s (0.418s, 2452.51/s) LR: 1.563e-02 Data: 0.029 (0.039) +Train: 131 [ 150/312 ( 48%)] Loss: 4.56 (4.44) Time: 0.405s, 2529.37/s (0.414s, 2473.41/s) LR: 1.563e-02 Data: 0.028 (0.035) +Train: 131 [ 200/312 ( 64%)] Loss: 4.41 (4.45) Time: 0.405s, 2527.08/s (0.412s, 2483.21/s) LR: 1.563e-02 Data: 0.028 (0.033) +Train: 131 [ 250/312 ( 80%)] Loss: 4.42 (4.46) Time: 0.408s, 2507.50/s (0.411s, 2489.24/s) LR: 1.563e-02 Data: 0.027 (0.032) +Train: 131 [ 300/312 ( 96%)] Loss: 4.72 (4.47) Time: 0.404s, 2536.95/s (0.410s, 2496.68/s) LR: 1.563e-02 Data: 0.029 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.453 (1.453) Loss: 7.054 ( 7.054) Acc@1: 5.078 ( 5.078) Acc@5: 12.500 ( 12.500) +Test: [ 48/48] Time: 0.089 (0.319) Loss: 6.874 ( 7.068) Acc@1: 6.604 ( 6.068) Acc@5: 16.392 ( 13.798) +Train: 132 [ 0/312 ( 0%)] Loss: 4.40 (4.40) Time: 1.585s, 646.11/s (1.585s, 646.11/s) LR: 1.404e-02 Data: 1.215 (1.215) +Train: 132 [ 50/312 ( 16%)] Loss: 4.26 (4.41) Time: 0.403s, 2542.91/s (0.426s, 2404.69/s) LR: 1.404e-02 Data: 0.026 (0.051) +Train: 132 [ 100/312 ( 32%)] Loss: 4.48 (4.41) Time: 0.406s, 2522.73/s (0.415s, 2465.55/s) LR: 1.404e-02 Data: 0.028 (0.039) +Train: 132 [ 150/312 ( 48%)] Loss: 4.51 (4.42) Time: 0.409s, 2502.30/s (0.412s, 2482.51/s) LR: 1.404e-02 Data: 0.028 (0.036) +Train: 132 [ 200/312 ( 64%)] Loss: 4.41 (4.43) Time: 0.406s, 2520.78/s (0.411s, 2493.67/s) LR: 1.404e-02 Data: 0.029 (0.034) +Train: 132 [ 250/312 ( 80%)] Loss: 4.48 (4.44) Time: 0.404s, 2532.95/s (0.409s, 2502.06/s) LR: 1.404e-02 Data: 0.028 (0.032) +Train: 132 [ 300/312 ( 96%)] Loss: 4.44 (4.45) Time: 0.405s, 2528.82/s (0.409s, 2506.27/s) LR: 1.404e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.400 (1.400) Loss: 6.958 ( 6.958) Acc@1: 5.664 ( 5.664) Acc@5: 13.477 ( 13.477) +Test: [ 48/48] Time: 0.090 (0.322) Loss: 6.743 ( 6.964) Acc@1: 8.137 ( 6.456) Acc@5: 17.099 ( 14.550) +Train: 133 [ 0/312 ( 0%)] Loss: 4.41 (4.41) Time: 1.591s, 643.80/s (1.591s, 643.80/s) LR: 1.254e-02 Data: 1.216 (1.216) +Train: 133 [ 50/312 ( 16%)] Loss: 4.48 (4.41) Time: 0.407s, 2518.57/s (0.430s, 2381.10/s) LR: 1.254e-02 Data: 0.027 (0.051) +Train: 133 [ 100/312 ( 32%)] Loss: 4.46 (4.41) Time: 0.405s, 2526.89/s (0.418s, 2449.81/s) LR: 1.254e-02 Data: 0.027 (0.040) +Train: 133 [ 150/312 ( 48%)] Loss: 4.42 (4.42) Time: 0.406s, 2521.66/s (0.414s, 2471.99/s) LR: 1.254e-02 Data: 0.028 (0.036) +Train: 133 [ 200/312 ( 64%)] Loss: 4.53 (4.42) Time: 0.405s, 2526.91/s (0.412s, 2484.64/s) LR: 1.254e-02 Data: 0.027 (0.034) +Train: 133 [ 250/312 ( 80%)] Loss: 4.51 (4.43) Time: 0.410s, 2496.68/s (0.411s, 2490.36/s) LR: 1.254e-02 Data: 0.031 (0.033) +Train: 133 [ 300/312 ( 96%)] Loss: 4.39 (4.43) Time: 0.405s, 2530.93/s (0.410s, 2495.76/s) LR: 1.254e-02 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.433 (1.433) Loss: 7.011 ( 7.011) Acc@1: 5.273 ( 5.273) Acc@5: 13.770 ( 13.770) +Test: [ 48/48] Time: 0.089 (0.317) Loss: 6.799 ( 7.001) Acc@1: 7.429 ( 6.388) Acc@5: 16.863 ( 14.492) +Train: 134 [ 0/312 ( 0%)] Loss: 4.30 (4.30) Time: 1.675s, 611.31/s (1.675s, 611.31/s) LR: 1.112e-02 Data: 1.092 (1.092) +Train: 134 [ 50/312 ( 16%)] Loss: 4.46 (4.39) Time: 0.403s, 2540.03/s (0.428s, 2394.04/s) LR: 1.112e-02 Data: 0.028 (0.049) +Train: 134 [ 100/312 ( 32%)] Loss: 4.39 (4.39) Time: 0.405s, 2530.94/s (0.416s, 2459.41/s) LR: 1.112e-02 Data: 0.028 (0.038) +Train: 134 [ 150/312 ( 48%)] Loss: 4.41 (4.39) Time: 0.406s, 2521.97/s (0.413s, 2477.34/s) LR: 1.112e-02 Data: 0.028 (0.035) +Train: 134 [ 200/312 ( 64%)] Loss: 4.45 (4.40) Time: 0.411s, 2492.91/s (0.412s, 2485.79/s) LR: 1.112e-02 Data: 0.033 (0.033) +Train: 134 [ 250/312 ( 80%)] Loss: 4.45 (4.40) Time: 0.404s, 2535.78/s (0.411s, 2493.30/s) LR: 1.112e-02 Data: 0.027 (0.032) +Train: 134 [ 300/312 ( 96%)] Loss: 4.43 (4.41) Time: 0.403s, 2540.44/s (0.410s, 2500.00/s) LR: 1.112e-02 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.393 (1.393) Loss: 7.077 ( 7.077) Acc@1: 4.980 ( 4.980) Acc@5: 12.891 ( 12.891) +Test: [ 48/48] Time: 0.089 (0.319) Loss: 6.817 ( 7.039) Acc@1: 7.901 ( 6.228) Acc@5: 16.981 ( 14.256) +Train: 135 [ 0/312 ( 0%)] Loss: 4.36 (4.36) Time: 2.301s, 445.07/s (2.301s, 445.07/s) LR: 9.789e-03 Data: 1.928 (1.928) +Train: 135 [ 50/312 ( 16%)] Loss: 4.41 (4.36) Time: 0.409s, 2505.05/s (0.442s, 2318.17/s) LR: 9.789e-03 Data: 0.034 (0.065) +Train: 135 [ 100/312 ( 32%)] Loss: 4.41 (4.37) Time: 0.403s, 2538.36/s (0.424s, 2417.57/s) LR: 9.789e-03 Data: 0.025 (0.047) +Train: 135 [ 150/312 ( 48%)] Loss: 4.42 (4.38) Time: 0.407s, 2518.33/s (0.418s, 2450.11/s) LR: 9.789e-03 Data: 0.028 (0.040) +Train: 135 [ 200/312 ( 64%)] Loss: 4.42 (4.38) Time: 0.409s, 2501.53/s (0.415s, 2466.65/s) LR: 9.789e-03 Data: 0.032 (0.037) +Train: 135 [ 250/312 ( 80%)] Loss: 4.29 (4.39) Time: 0.404s, 2537.66/s (0.413s, 2479.02/s) LR: 9.789e-03 Data: 0.027 (0.035) +Train: 135 [ 300/312 ( 96%)] Loss: 4.60 (4.39) Time: 0.411s, 2493.49/s (0.412s, 2487.03/s) LR: 9.789e-03 Data: 0.029 (0.034) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.404 (1.404) Loss: 7.070 ( 7.070) Acc@1: 5.176 ( 5.176) Acc@5: 12.695 ( 12.695) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.821 ( 7.031) Acc@1: 7.547 ( 6.048) Acc@5: 16.863 ( 13.960) +Train: 136 [ 0/312 ( 0%)] Loss: 4.44 (4.44) Time: 1.677s, 610.69/s (1.677s, 610.69/s) LR: 8.536e-03 Data: 1.135 (1.135) +Train: 136 [ 50/312 ( 16%)] Loss: 4.36 (4.34) Time: 0.403s, 2543.09/s (0.431s, 2378.39/s) LR: 8.536e-03 Data: 0.027 (0.049) +Train: 136 [ 100/312 ( 32%)] Loss: 4.34 (4.35) Time: 0.401s, 2554.14/s (0.417s, 2457.61/s) LR: 8.536e-03 Data: 0.028 (0.039) +Train: 136 [ 150/312 ( 48%)] Loss: 4.49 (4.37) Time: 0.400s, 2560.75/s (0.412s, 2487.02/s) LR: 8.536e-03 Data: 0.028 (0.035) +Train: 136 [ 200/312 ( 64%)] Loss: 4.41 (4.37) Time: 0.402s, 2547.68/s (0.409s, 2502.12/s) LR: 8.536e-03 Data: 0.027 (0.033) +Train: 136 [ 250/312 ( 80%)] Loss: 4.44 (4.37) Time: 0.404s, 2534.83/s (0.408s, 2509.95/s) LR: 8.536e-03 Data: 0.027 (0.032) +Train: 136 [ 300/312 ( 96%)] Loss: 4.46 (4.38) Time: 0.408s, 2510.62/s (0.408s, 2512.56/s) LR: 8.536e-03 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.395 (1.395) Loss: 7.085 ( 7.085) Acc@1: 5.566 ( 5.566) Acc@5: 12.012 ( 12.012) +Test: [ 48/48] Time: 0.091 (0.319) Loss: 6.833 ( 7.053) Acc@1: 7.075 ( 6.200) Acc@5: 17.453 ( 14.104) +Train: 137 [ 0/312 ( 0%)] Loss: 4.34 (4.34) Time: 1.861s, 550.38/s (1.861s, 550.38/s) LR: 7.367e-03 Data: 1.486 (1.486) +Train: 137 [ 50/312 ( 16%)] Loss: 4.25 (4.35) Time: 0.405s, 2529.66/s (0.435s, 2352.40/s) LR: 7.367e-03 Data: 0.024 (0.056) +Train: 137 [ 100/312 ( 32%)] Loss: 4.39 (4.35) Time: 0.410s, 2496.72/s (0.421s, 2430.25/s) LR: 7.367e-03 Data: 0.027 (0.041) +Train: 137 [ 150/312 ( 48%)] Loss: 4.38 (4.35) Time: 0.407s, 2516.89/s (0.416s, 2459.32/s) LR: 7.367e-03 Data: 0.026 (0.037) +Train: 137 [ 200/312 ( 64%)] Loss: 4.49 (4.36) Time: 0.406s, 2524.99/s (0.414s, 2473.40/s) LR: 7.367e-03 Data: 0.027 (0.034) +Train: 137 [ 250/312 ( 80%)] Loss: 4.44 (4.36) Time: 0.408s, 2510.74/s (0.412s, 2484.21/s) LR: 7.367e-03 Data: 0.028 (0.033) +Train: 137 [ 300/312 ( 96%)] Loss: 4.44 (4.36) Time: 0.405s, 2531.47/s (0.411s, 2491.94/s) LR: 7.367e-03 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.411 (1.411) Loss: 7.079 ( 7.079) Acc@1: 5.469 ( 5.469) Acc@5: 12.891 ( 12.891) +Test: [ 48/48] Time: 0.090 (0.318) Loss: 6.841 ( 7.056) Acc@1: 7.429 ( 6.052) Acc@5: 16.981 ( 14.002) +Train: 138 [ 0/312 ( 0%)] Loss: 4.28 (4.28) Time: 1.817s, 563.58/s (1.817s, 563.58/s) LR: 6.283e-03 Data: 1.352 (1.352) +Train: 138 [ 50/312 ( 16%)] Loss: 4.23 (4.32) Time: 0.404s, 2537.72/s (0.434s, 2358.12/s) LR: 6.283e-03 Data: 0.026 (0.053) +Train: 138 [ 100/312 ( 32%)] Loss: 4.31 (4.33) Time: 0.401s, 2555.05/s (0.419s, 2444.47/s) LR: 6.283e-03 Data: 0.027 (0.041) +Train: 138 [ 150/312 ( 48%)] Loss: 4.28 (4.33) Time: 0.404s, 2533.87/s (0.414s, 2476.04/s) LR: 6.283e-03 Data: 0.027 (0.036) +Train: 138 [ 200/312 ( 64%)] Loss: 4.29 (4.34) Time: 0.412s, 2482.87/s (0.411s, 2489.60/s) LR: 6.283e-03 Data: 0.036 (0.034) +Train: 138 [ 250/312 ( 80%)] Loss: 4.33 (4.34) Time: 0.406s, 2520.18/s (0.410s, 2495.28/s) LR: 6.283e-03 Data: 0.028 (0.033) +Train: 138 [ 300/312 ( 96%)] Loss: 4.37 (4.35) Time: 0.413s, 2480.78/s (0.410s, 2497.68/s) LR: 6.283e-03 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.406 (1.406) Loss: 7.057 ( 7.057) Acc@1: 5.664 ( 5.664) Acc@5: 13.086 ( 13.086) +Test: [ 48/48] Time: 0.090 (0.319) Loss: 6.844 ( 7.042) Acc@1: 7.901 ( 6.174) Acc@5: 17.335 ( 14.292) +Train: 139 [ 0/312 ( 0%)] Loss: 4.37 (4.37) Time: 1.862s, 549.93/s (1.862s, 549.93/s) LR: 5.284e-03 Data: 1.488 (1.488) +Train: 139 [ 50/312 ( 16%)] Loss: 4.32 (4.31) Time: 0.407s, 2515.60/s (0.436s, 2350.23/s) LR: 5.284e-03 Data: 0.028 (0.057) +Train: 139 [ 100/312 ( 32%)] Loss: 4.35 (4.32) Time: 0.406s, 2521.16/s (0.422s, 2429.20/s) LR: 5.284e-03 Data: 0.027 (0.042) +Train: 139 [ 150/312 ( 48%)] Loss: 4.28 (4.33) Time: 0.408s, 2511.92/s (0.417s, 2456.63/s) LR: 5.284e-03 Data: 0.027 (0.037) +Train: 139 [ 200/312 ( 64%)] Loss: 4.31 (4.34) Time: 0.403s, 2539.58/s (0.414s, 2472.17/s) LR: 5.284e-03 Data: 0.026 (0.035) +Train: 139 [ 250/312 ( 80%)] Loss: 4.36 (4.33) Time: 0.401s, 2555.14/s (0.412s, 2486.44/s) LR: 5.284e-03 Data: 0.028 (0.033) +Train: 139 [ 300/312 ( 96%)] Loss: 4.36 (4.33) Time: 0.400s, 2558.80/s (0.410s, 2497.45/s) LR: 5.284e-03 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.406 (1.406) Loss: 7.070 ( 7.070) Acc@1: 5.176 ( 5.176) Acc@5: 12.402 ( 12.402) +Test: [ 48/48] Time: 0.088 (0.321) Loss: 6.839 ( 7.054) Acc@1: 7.311 ( 6.084) Acc@5: 16.627 ( 14.140) +Train: 140 [ 0/312 ( 0%)] Loss: 4.26 (4.26) Time: 1.518s, 674.78/s (1.518s, 674.78/s) LR: 4.370e-03 Data: 1.097 (1.097) +Train: 140 [ 50/312 ( 16%)] Loss: 4.39 (4.31) Time: 0.403s, 2539.79/s (0.423s, 2418.68/s) LR: 4.370e-03 Data: 0.028 (0.049) +Train: 140 [ 100/312 ( 32%)] Loss: 4.27 (4.31) Time: 0.405s, 2529.69/s (0.413s, 2477.22/s) LR: 4.370e-03 Data: 0.028 (0.038) +Train: 140 [ 150/312 ( 48%)] Loss: 4.35 (4.31) Time: 0.405s, 2527.81/s (0.411s, 2494.32/s) LR: 4.370e-03 Data: 0.029 (0.035) +Train: 140 [ 200/312 ( 64%)] Loss: 4.27 (4.31) Time: 0.407s, 2518.53/s (0.410s, 2499.04/s) LR: 4.370e-03 Data: 0.027 (0.033) +Train: 140 [ 250/312 ( 80%)] Loss: 4.37 (4.32) Time: 0.406s, 2522.48/s (0.409s, 2504.40/s) LR: 4.370e-03 Data: 0.030 (0.032) +Train: 140 [ 300/312 ( 96%)] Loss: 4.42 (4.32) Time: 0.405s, 2528.38/s (0.408s, 2508.60/s) LR: 4.370e-03 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 7.073 ( 7.073) Acc@1: 5.273 ( 5.273) Acc@5: 13.379 ( 13.379) +Test: [ 48/48] Time: 0.089 (0.318) Loss: 6.816 ( 7.049) Acc@1: 7.193 ( 6.116) Acc@5: 17.453 ( 14.162) +Train: 141 [ 0/312 ( 0%)] Loss: 4.27 (4.27) Time: 1.684s, 608.05/s (1.684s, 608.05/s) LR: 3.543e-03 Data: 1.066 (1.066) +Train: 141 [ 50/312 ( 16%)] Loss: 4.26 (4.29) Time: 0.405s, 2526.94/s (0.430s, 2379.08/s) LR: 3.543e-03 Data: 0.028 (0.048) +Train: 141 [ 100/312 ( 32%)] Loss: 4.27 (4.30) Time: 0.412s, 2488.09/s (0.419s, 2445.09/s) LR: 3.543e-03 Data: 0.033 (0.038) +Train: 141 [ 150/312 ( 48%)] Loss: 4.32 (4.30) Time: 0.403s, 2538.20/s (0.415s, 2469.82/s) LR: 3.543e-03 Data: 0.028 (0.035) +Train: 141 [ 200/312 ( 64%)] Loss: 4.20 (4.31) Time: 0.402s, 2549.92/s (0.412s, 2486.21/s) LR: 3.543e-03 Data: 0.028 (0.033) +Train: 141 [ 250/312 ( 80%)] Loss: 4.25 (4.31) Time: 0.403s, 2540.56/s (0.410s, 2496.51/s) LR: 3.543e-03 Data: 0.027 (0.032) +Train: 141 [ 300/312 ( 96%)] Loss: 4.29 (4.31) Time: 0.404s, 2535.19/s (0.409s, 2501.85/s) LR: 3.543e-03 Data: 0.027 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.422 (1.422) Loss: 7.074 ( 7.074) Acc@1: 5.273 ( 5.273) Acc@5: 12.598 ( 12.598) +Test: [ 48/48] Time: 0.091 (0.319) Loss: 6.815 ( 7.049) Acc@1: 7.547 ( 6.134) Acc@5: 16.863 ( 14.130) +Train: 142 [ 0/312 ( 0%)] Loss: 4.34 (4.34) Time: 1.750s, 585.17/s (1.750s, 585.17/s) LR: 2.801e-03 Data: 1.374 (1.374) +Train: 142 [ 50/312 ( 16%)] Loss: 4.33 (4.30) Time: 0.405s, 2529.11/s (0.443s, 2311.46/s) LR: 2.801e-03 Data: 0.028 (0.064) +Train: 142 [ 100/312 ( 32%)] Loss: 4.31 (4.30) Time: 0.402s, 2544.89/s (0.424s, 2415.14/s) LR: 2.801e-03 Data: 0.028 (0.046) +Train: 142 [ 150/312 ( 48%)] Loss: 4.31 (4.30) Time: 0.404s, 2532.76/s (0.417s, 2453.69/s) LR: 2.801e-03 Data: 0.028 (0.040) +Train: 142 [ 200/312 ( 64%)] Loss: 4.25 (4.30) Time: 0.407s, 2516.01/s (0.414s, 2471.33/s) LR: 2.801e-03 Data: 0.027 (0.037) +Train: 142 [ 250/312 ( 80%)] Loss: 4.29 (4.30) Time: 0.405s, 2526.62/s (0.413s, 2480.21/s) LR: 2.801e-03 Data: 0.027 (0.035) +Train: 142 [ 300/312 ( 96%)] Loss: 4.34 (4.30) Time: 0.406s, 2522.57/s (0.412s, 2486.48/s) LR: 2.801e-03 Data: 0.027 (0.034) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.418 (1.418) Loss: 7.100 ( 7.100) Acc@1: 5.371 ( 5.371) Acc@5: 12.891 ( 12.891) +Test: [ 48/48] Time: 0.090 (0.322) Loss: 6.850 ( 7.081) Acc@1: 7.075 ( 6.056) Acc@5: 17.217 ( 13.952) +Train: 143 [ 0/312 ( 0%)] Loss: 4.29 (4.29) Time: 2.239s, 457.38/s (2.239s, 457.38/s) LR: 2.146e-03 Data: 1.863 (1.863) +Train: 143 [ 50/312 ( 16%)] Loss: 4.29 (4.29) Time: 0.409s, 2500.86/s (0.443s, 2313.27/s) LR: 2.146e-03 Data: 0.032 (0.063) +Train: 143 [ 100/312 ( 32%)] Loss: 4.30 (4.28) Time: 0.403s, 2538.21/s (0.424s, 2413.39/s) LR: 2.146e-03 Data: 0.027 (0.046) +Train: 143 [ 150/312 ( 48%)] Loss: 4.28 (4.29) Time: 0.408s, 2508.24/s (0.418s, 2447.66/s) LR: 2.146e-03 Data: 0.028 (0.040) +Train: 143 [ 200/312 ( 64%)] Loss: 4.35 (4.29) Time: 0.409s, 2503.03/s (0.416s, 2464.04/s) LR: 2.146e-03 Data: 0.028 (0.037) +Train: 143 [ 250/312 ( 80%)] Loss: 4.36 (4.29) Time: 0.409s, 2505.53/s (0.414s, 2474.13/s) LR: 2.146e-03 Data: 0.028 (0.035) +Train: 143 [ 300/312 ( 96%)] Loss: 4.37 (4.29) Time: 0.407s, 2514.93/s (0.413s, 2481.43/s) LR: 2.146e-03 Data: 0.027 (0.034) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.401 (1.401) Loss: 7.076 ( 7.076) Acc@1: 5.273 ( 5.273) Acc@5: 12.793 ( 12.793) +Test: [ 48/48] Time: 0.090 (0.320) Loss: 6.831 ( 7.054) Acc@1: 6.958 ( 6.028) Acc@5: 16.863 ( 13.950) +Train: 144 [ 0/312 ( 0%)] Loss: 4.27 (4.27) Time: 1.523s, 672.46/s (1.523s, 672.46/s) LR: 1.577e-03 Data: 1.113 (1.113) +Train: 144 [ 50/312 ( 16%)] Loss: 4.35 (4.27) Time: 0.405s, 2531.51/s (0.429s, 2388.85/s) LR: 1.577e-03 Data: 0.027 (0.049) +Train: 144 [ 100/312 ( 32%)] Loss: 4.33 (4.28) Time: 0.407s, 2517.51/s (0.417s, 2453.82/s) LR: 1.577e-03 Data: 0.028 (0.039) +Train: 144 [ 150/312 ( 48%)] Loss: 4.26 (4.28) Time: 0.404s, 2533.96/s (0.414s, 2475.26/s) LR: 1.577e-03 Data: 0.027 (0.035) +Train: 144 [ 200/312 ( 64%)] Loss: 4.34 (4.28) Time: 0.407s, 2518.80/s (0.412s, 2487.40/s) LR: 1.577e-03 Data: 0.028 (0.033) +Train: 144 [ 250/312 ( 80%)] Loss: 4.34 (4.28) Time: 0.406s, 2525.13/s (0.411s, 2494.05/s) LR: 1.577e-03 Data: 0.028 (0.032) +Train: 144 [ 300/312 ( 96%)] Loss: 4.29 (4.28) Time: 0.405s, 2529.47/s (0.410s, 2498.11/s) LR: 1.577e-03 Data: 0.028 (0.031) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.417 (1.417) Loss: 7.066 ( 7.066) Acc@1: 5.078 ( 5.078) Acc@5: 12.207 ( 12.207) +Test: [ 48/48] Time: 0.089 (0.320) Loss: 6.823 ( 7.052) Acc@1: 6.840 ( 6.034) Acc@5: 17.335 ( 14.124) +Train: 145 [ 0/312 ( 0%)] Loss: 4.18 (4.18) Time: 1.535s, 667.19/s (1.535s, 667.19/s) LR: 1.096e-03 Data: 1.163 (1.163) +Train: 145 [ 50/312 ( 16%)] Loss: 4.25 (4.27) Time: 0.406s, 2521.05/s (0.427s, 2396.83/s) LR: 1.096e-03 Data: 0.027 (0.050) +Train: 145 [ 100/312 ( 32%)] Loss: 4.26 (4.27) Time: 0.405s, 2528.55/s (0.417s, 2457.35/s) LR: 1.096e-03 Data: 0.029 (0.039) +Train: 145 [ 150/312 ( 48%)] Loss: 4.33 (4.28) Time: 0.405s, 2528.72/s (0.413s, 2480.94/s) LR: 1.096e-03 Data: 0.028 (0.035) +Train: 145 [ 200/312 ( 64%)] Loss: 4.24 (4.28) Time: 0.405s, 2527.12/s (0.411s, 2492.18/s) LR: 1.096e-03 Data: 0.027 (0.033) +Train: 145 [ 250/312 ( 80%)] Loss: 4.34 (4.28) Time: 0.406s, 2523.44/s (0.410s, 2496.44/s) LR: 1.096e-03 Data: 0.027 (0.032) +Train: 145 [ 300/312 ( 96%)] Loss: 4.41 (4.28) Time: 0.404s, 2533.85/s (0.409s, 2501.00/s) LR: 1.096e-03 Data: 0.029 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.403 (1.403) Loss: 7.064 ( 7.064) Acc@1: 5.176 ( 5.176) Acc@5: 12.793 ( 12.793) +Test: [ 48/48] Time: 0.089 (0.322) Loss: 6.821 ( 7.055) Acc@1: 7.311 ( 6.092) Acc@5: 17.217 ( 14.094) +Train: 146 [ 0/312 ( 0%)] Loss: 4.23 (4.23) Time: 1.674s, 611.75/s (1.674s, 611.75/s) LR: 7.014e-04 Data: 1.274 (1.274) +Train: 146 [ 50/312 ( 16%)] Loss: 4.26 (4.27) Time: 0.403s, 2538.16/s (0.430s, 2382.02/s) LR: 7.014e-04 Data: 0.028 (0.053) +Train: 146 [ 100/312 ( 32%)] Loss: 4.25 (4.28) Time: 0.406s, 2524.29/s (0.417s, 2455.59/s) LR: 7.014e-04 Data: 0.028 (0.040) +Train: 146 [ 150/312 ( 48%)] Loss: 4.22 (4.28) Time: 0.407s, 2514.37/s (0.413s, 2477.07/s) LR: 7.014e-04 Data: 0.028 (0.036) +Train: 146 [ 200/312 ( 64%)] Loss: 4.16 (4.28) Time: 0.409s, 2502.10/s (0.412s, 2486.52/s) LR: 7.014e-04 Data: 0.029 (0.034) +Train: 146 [ 250/312 ( 80%)] Loss: 4.33 (4.28) Time: 0.407s, 2518.00/s (0.411s, 2492.47/s) LR: 7.014e-04 Data: 0.028 (0.033) +Train: 146 [ 300/312 ( 96%)] Loss: 4.31 (4.28) Time: 0.406s, 2520.04/s (0.410s, 2496.71/s) LR: 7.014e-04 Data: 0.028 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.423 (1.423) Loss: 7.084 ( 7.084) Acc@1: 4.883 ( 4.883) Acc@5: 12.988 ( 12.988) +Test: [ 48/48] Time: 0.090 (0.321) Loss: 6.838 ( 7.063) Acc@1: 7.547 ( 6.132) Acc@5: 16.981 ( 14.078) +Train: 147 [ 0/312 ( 0%)] Loss: 4.29 (4.29) Time: 1.562s, 655.50/s (1.562s, 655.50/s) LR: 3.947e-04 Data: 1.187 (1.187) +Train: 147 [ 50/312 ( 16%)] Loss: 4.22 (4.27) Time: 0.412s, 2488.14/s (0.430s, 2383.45/s) LR: 3.947e-04 Data: 0.033 (0.050) +Train: 147 [ 100/312 ( 32%)] Loss: 4.28 (4.28) Time: 0.405s, 2525.67/s (0.418s, 2449.34/s) LR: 3.947e-04 Data: 0.027 (0.039) +Train: 147 [ 150/312 ( 48%)] Loss: 4.24 (4.28) Time: 0.407s, 2515.23/s (0.414s, 2471.43/s) LR: 3.947e-04 Data: 0.028 (0.035) +Train: 147 [ 200/312 ( 64%)] Loss: 4.31 (4.28) Time: 0.405s, 2526.64/s (0.412s, 2483.46/s) LR: 3.947e-04 Data: 0.029 (0.033) +Train: 147 [ 250/312 ( 80%)] Loss: 4.34 (4.27) Time: 0.403s, 2539.76/s (0.411s, 2493.35/s) LR: 3.947e-04 Data: 0.027 (0.032) +Train: 147 [ 300/312 ( 96%)] Loss: 4.21 (4.27) Time: 0.402s, 2547.70/s (0.410s, 2500.09/s) LR: 3.947e-04 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.437 (1.437) Loss: 7.089 ( 7.089) Acc@1: 5.078 ( 5.078) Acc@5: 13.379 ( 13.379) +Test: [ 48/48] Time: 0.090 (0.321) Loss: 6.835 ( 7.070) Acc@1: 7.665 ( 6.088) Acc@5: 16.745 ( 13.952) +Train: 148 [ 0/312 ( 0%)] Loss: 4.29 (4.29) Time: 1.573s, 650.97/s (1.573s, 650.97/s) LR: 1.754e-04 Data: 1.111 (1.111) +Train: 148 [ 50/312 ( 16%)] Loss: 4.23 (4.29) Time: 0.408s, 2512.36/s (0.434s, 2357.09/s) LR: 1.754e-04 Data: 0.028 (0.054) +Train: 148 [ 100/312 ( 32%)] Loss: 4.24 (4.28) Time: 0.407s, 2517.39/s (0.421s, 2430.03/s) LR: 1.754e-04 Data: 0.027 (0.041) +Train: 148 [ 150/312 ( 48%)] Loss: 4.35 (4.28) Time: 0.406s, 2520.45/s (0.417s, 2457.47/s) LR: 1.754e-04 Data: 0.027 (0.037) +Train: 148 [ 200/312 ( 64%)] Loss: 4.26 (4.28) Time: 0.406s, 2525.22/s (0.414s, 2471.14/s) LR: 1.754e-04 Data: 0.029 (0.035) +Train: 148 [ 250/312 ( 80%)] Loss: 4.31 (4.28) Time: 0.405s, 2527.96/s (0.413s, 2479.67/s) LR: 1.754e-04 Data: 0.026 (0.033) +Train: 148 [ 300/312 ( 96%)] Loss: 4.26 (4.28) Time: 0.403s, 2543.06/s (0.412s, 2487.34/s) LR: 1.754e-04 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.414 (1.414) Loss: 7.062 ( 7.062) Acc@1: 4.980 ( 4.980) Acc@5: 12.891 ( 12.891) +Test: [ 48/48] Time: 0.089 (0.318) Loss: 6.819 ( 7.046) Acc@1: 7.311 ( 6.108) Acc@5: 17.217 ( 14.020) +Train: 149 [ 0/312 ( 0%)] Loss: 4.25 (4.25) Time: 1.567s, 653.66/s (1.567s, 653.66/s) LR: 4.386e-05 Data: 1.196 (1.196) +Train: 149 [ 50/312 ( 16%)] Loss: 4.27 (4.26) Time: 0.409s, 2504.89/s (0.428s, 2394.28/s) LR: 4.386e-05 Data: 0.031 (0.051) +Train: 149 [ 100/312 ( 32%)] Loss: 4.23 (4.26) Time: 0.406s, 2522.89/s (0.417s, 2452.75/s) LR: 4.386e-05 Data: 0.027 (0.040) +Train: 149 [ 150/312 ( 48%)] Loss: 4.29 (4.27) Time: 0.406s, 2524.44/s (0.414s, 2476.01/s) LR: 4.386e-05 Data: 0.028 (0.036) +Train: 149 [ 200/312 ( 64%)] Loss: 4.21 (4.27) Time: 0.410s, 2495.59/s (0.412s, 2486.04/s) LR: 4.386e-05 Data: 0.031 (0.034) +Train: 149 [ 250/312 ( 80%)] Loss: 4.31 (4.27) Time: 0.402s, 2545.36/s (0.411s, 2492.76/s) LR: 4.386e-05 Data: 0.027 (0.033) +Train: 149 [ 300/312 ( 96%)] Loss: 4.23 (4.27) Time: 0.401s, 2550.73/s (0.409s, 2500.70/s) LR: 4.386e-05 Data: 0.027 (0.032) +Distributing BatchNorm running means and vars +Test: [ 0/48] Time: 1.412 (1.412) Loss: 7.080 ( 7.080) Acc@1: 4.883 ( 4.883) Acc@5: 12.793 ( 12.793) +Test: [ 48/48] Time: 0.089 (0.321) Loss: 6.832 ( 7.054) Acc@1: 7.429 ( 6.082) Acc@5: 17.099 ( 14.124) +*** Best metric: 12.980000009460449 (epoch 58) +--result +[ + { + "epoch": 67, + "train": { + "loss": 6.150979995727539 + }, + "validation": { + "loss": 5.668644493713379, + "top1": 12.175999997558593, + "top5": 24.590000033569336 + } + }, + { + "epoch": 62, + "train": { + "loss": 6.253303527832031 + }, + "validation": { + "loss": 5.642817199707031, + "top1": 12.296000000610352, + "top5": 25.55799999572754 + } + }, + { + "epoch": 53, + "train": { + "loss": 6.41491174697876 + }, + "validation": { + "loss": 5.617567540588379, + "top1": 12.301999997253418, + "top5": 26.39400001220703 + } + }, + { + "epoch": 64, + "train": { + "loss": 6.209104537963867 + }, + "validation": { + "loss": 5.588766520690918, + "top1": 12.566000014038085, + "top5": 26.24800000732422 + } + }, + { + "epoch": 55, + "train": { + "loss": 6.3817901611328125 + }, + "validation": { + "loss": 5.56478563659668, + "top1": 12.590000011596679, + "top5": 27.05000003112793 + } + }, + { + "epoch": 54, + "train": { + "loss": 6.397539138793945 + }, + "validation": { + "loss": 5.626161472015381, + "top1": 12.628000014038086, + "top5": 26.41600001464844 + } + }, + { + "epoch": 51, + "train": { + "loss": 6.446957111358643 + }, + "validation": { + "loss": 5.596342398986817, + "top1": 12.71800000579834, + "top5": 27.302000007324217 + } + }, + { + "epoch": 56, + "train": { + "loss": 6.365302562713623 + }, + "validation": { + "loss": 5.570098075714111, + "top1": 12.786000000610352, + "top5": 27.398000026245118 + } + }, + { + "epoch": 60, + "train": { + "loss": 6.292933940887451 + }, + "validation": { + "loss": 5.580106327819824, + "top1": 12.810000006103516, + "top5": 26.62999998840332 + } + }, + { + "epoch": 58, + "train": { + "loss": 6.326818466186523 + }, + "validation": { + "loss": 5.561600781555176, + "top1": 12.980000009460449, + "top5": 27.197999997558593 + } + } +]