poca-SoccerTwos / run_logs /timers.json
kelvinksau's picture
First Push`
c212f31 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.767143726348877,
"min": 1.6312912702560425,
"max": 3.295707941055298,
"count": 551
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 35512.51953125,
"min": 16946.1484375,
"max": 113590.4375,
"count": 551
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 53.380434782608695,
"min": 39.08870967741935,
"max": 999.0,
"count": 551
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19644.0,
"min": 16052.0,
"max": 24328.0,
"count": 551
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1601.3617010516243,
"min": 1194.7443324197643,
"max": 1629.517499433453,
"count": 550
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 294650.55299349886,
"min": 2390.310295175923,
"max": 391780.55871929385,
"count": 550
},
"SoccerTwos.Step.mean": {
"value": 5509992.0,
"min": 9180.0,
"max": 5509992.0,
"count": 551
},
"SoccerTwos.Step.sum": {
"value": 5509992.0,
"min": 9180.0,
"max": 5509992.0,
"count": 551
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0696505755186081,
"min": -0.10691600292921066,
"max": 0.16526968777179718,
"count": 551
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -12.815706253051758,
"min": -19.24488067626953,
"max": 27.60003662109375,
"count": 551
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07309786975383759,
"min": -0.11037766933441162,
"max": 0.1634364277124405,
"count": 551
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -13.450007438659668,
"min": -19.86798095703125,
"max": 27.29388427734375,
"count": 551
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 551
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 551
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.1839304336387178,
"min": -0.5454545454545454,
"max": 0.583878160893232,
"count": 551
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -33.84319978952408,
"min": -64.55040001869202,
"max": 65.48399949073792,
"count": 551
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.1839304336387178,
"min": -0.5454545454545454,
"max": 0.583878160893232,
"count": 551
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -33.84319978952408,
"min": -64.55040001869202,
"max": 65.48399949073792,
"count": 551
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 551
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 551
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.0173188211202311,
"min": 0.010787460731808095,
"max": 0.02419288019494464,
"count": 265
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.0173188211202311,
"min": 0.010787460731808095,
"max": 0.02419288019494464,
"count": 265
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.1124846950173378,
"min": 0.0012137961050029844,
"max": 0.12875046456853548,
"count": 265
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.1124846950173378,
"min": 0.0012137961050029844,
"max": 0.12875046456853548,
"count": 265
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.1148222138484319,
"min": 0.0012359186133835466,
"max": 0.1316766545176506,
"count": 265
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.1148222138484319,
"min": 0.0012359186133835466,
"max": 0.1316766545176506,
"count": 265
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 265
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 265
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 265
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 265
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 265
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 265
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742202553",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\kelvi\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1742226313"
},
"total": 23760.206119399983,
"count": 1,
"self": 1.432051899842918,
"children": {
"run_training.setup": {
"total": 0.16309370007365942,
"count": 1,
"self": 0.16309370007365942
},
"TrainerController.start_learning": {
"total": 23758.610973800067,
"count": 1,
"self": 17.049962282646447,
"children": {
"TrainerController._reset_env": {
"total": 16.863394799176604,
"count": 28,
"self": 16.863394799176604
},
"TrainerController.advance": {
"total": 23724.444342717994,
"count": 378846,
"self": 18.46539285313338,
"children": {
"env_step": {
"total": 12631.155832599383,
"count": 378846,
"self": 9920.716092824936,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2700.085324431304,
"count": 378846,
"self": 99.4496173276566,
"children": {
"TorchPolicy.evaluate": {
"total": 2600.6357071036473,
"count": 695272,
"self": 2600.6357071036473
}
}
},
"workers": {
"total": 10.354415343143046,
"count": 378846,
"self": 0.0,
"children": {
"worker_root": {
"total": 23698.835580498446,
"count": 378846,
"is_parallel": true,
"self": 15881.606932221912,
"children": {
"steps_from_proto": {
"total": 0.09918919950723648,
"count": 56,
"is_parallel": true,
"self": 0.019583395682275295,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.07960580382496119,
"count": 224,
"is_parallel": true,
"self": 0.07960580382496119
}
}
},
"UnityEnvironment.step": {
"total": 7817.129459077027,
"count": 378846,
"is_parallel": true,
"self": 425.8636191175319,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 383.44411973049864,
"count": 378846,
"is_parallel": true,
"self": 383.44411973049864
},
"communicator.exchange": {
"total": 5645.902554647066,
"count": 378846,
"is_parallel": true,
"self": 5645.902554647066
},
"steps_from_proto": {
"total": 1361.9191655819304,
"count": 757692,
"is_parallel": true,
"self": 283.5340147432871,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1078.3851508386433,
"count": 3030768,
"is_parallel": true,
"self": 1078.3851508386433
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 11074.823117265478,
"count": 378846,
"self": 93.72514483705163,
"children": {
"process_trajectory": {
"total": 2600.173202627804,
"count": 378846,
"self": 2597.4539079284295,
"children": {
"RLTrainer._checkpoint": {
"total": 2.7192946993745863,
"count": 11,
"self": 2.7192946993745863
}
}
},
"_update_policy": {
"total": 8380.924769800622,
"count": 266,
"self": 1360.3877046112902,
"children": {
"TorchPOCAOptimizer.update": {
"total": 7020.537065189332,
"count": 7977,
"self": 7020.537065189332
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.700129359960556e-06,
"count": 1,
"self": 1.700129359960556e-06
},
"TrainerController._save_models": {
"total": 0.2532723001204431,
"count": 1,
"self": 0.01821870030835271,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2350535998120904,
"count": 1,
"self": 0.2350535998120904
}
}
}
}
}
}
}