ppo-Pyramids / run_logs /timers.json
staycoolish's picture
First Push
435303d
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.0083420276641846,
"min": 1.0083420276641846,
"max": 1.4302774667739868,
"count": 3
},
"Pyramids.Policy.Entropy.sum": {
"value": 30201.859375,
"min": 30201.859375,
"max": 43388.8984375,
"count": 3
},
"Pyramids.Step.mean": {
"value": 89973.0,
"min": 29952.0,
"max": 89973.0,
"count": 3
},
"Pyramids.Step.sum": {
"value": 89973.0,
"min": 29952.0,
"max": 89973.0,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07018574327230453,
"min": -0.12179777771234512,
"max": -0.07018574327230453,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -16.984949111938477,
"min": -29.35326385498047,
"max": -16.984949111938477,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.18007464706897736,
"min": 0.18007464706897736,
"max": 0.33111318945884705,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 43.57806396484375,
"min": 43.57806396484375,
"max": 79.79827880859375,
"count": 3
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06840852902052968,
"min": 0.06840852902052968,
"max": 0.07262600321856832,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.8893108772668858,
"min": 0.48539814353591343,
"max": 0.8893108772668858,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0019146189621139795,
"min": 0.0019146189621139795,
"max": 0.007645753753913804,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.024890046507481733,
"min": 0.021900026301918066,
"max": 0.053520276277396625,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.585484394584615e-05,
"min": 7.585484394584615e-05,
"max": 0.0002515063018788571,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.000986112971296,
"min": 0.000986112971296,
"max": 0.0017605441131519997,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1252849230769231,
"min": 0.1252849230769231,
"max": 0.1838354285714286,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.6287040000000002,
"min": 1.2868480000000002,
"max": 1.6287040000000002,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0025359638153846157,
"min": 0.0025359638153846157,
"max": 0.008385159314285713,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0329675296,
"min": 0.0329675296,
"max": 0.058696115199999996,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.12500633299350739,
"min": 0.12500633299350739,
"max": 0.5237194895744324,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.62508225440979,
"min": 1.62508225440979,
"max": 3.666036605834961,
"count": 3
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 952.2058823529412,
"min": 952.2058823529412,
"max": 999.0,
"count": 3
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32375.0,
"min": 15984.0,
"max": 32954.0,
"count": 3
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.7766000507947277,
"min": -1.0000000521540642,
"max": -0.7766000507947277,
"count": 3
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -26.40440172702074,
"min": -28.98540175706148,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.7766000507947277,
"min": -1.0000000521540642,
"max": -0.7766000507947277,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -26.40440172702074,
"min": -28.98540175706148,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.3942591010538095,
"min": 1.3942591010538095,
"max": 10.68658627383411,
"count": 3
},
"Pyramids.Policy.RndReward.sum": {
"value": 47.40480943582952,
"min": 47.40480943582952,
"max": 170.98538038134575,
"count": 3
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677608623",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677608825"
},
"total": 202.91046453199988,
"count": 1,
"self": 0.4258551559996704,
"children": {
"run_training.setup": {
"total": 0.11483413800010567,
"count": 1,
"self": 0.11483413800010567
},
"TrainerController.start_learning": {
"total": 202.3697752380001,
"count": 1,
"self": 0.1167557390060665,
"children": {
"TrainerController._reset_env": {
"total": 6.500280959999827,
"count": 1,
"self": 6.500280959999827
},
"TrainerController.advance": {
"total": 195.65607449099411,
"count": 6289,
"self": 0.13216598499798238,
"children": {
"env_step": {
"total": 125.48025085999984,
"count": 6289,
"self": 114.40631751500655,
"children": {
"SubprocessEnvManager._take_step": {
"total": 10.998993092988712,
"count": 6289,
"self": 0.44313804499188336,
"children": {
"TorchPolicy.evaluate": {
"total": 10.555855047996829,
"count": 6278,
"self": 3.5656529579969174,
"children": {
"TorchPolicy.sample_actions": {
"total": 6.9902020899999115,
"count": 6278,
"self": 6.9902020899999115
}
}
}
}
},
"workers": {
"total": 0.07494025200458054,
"count": 6289,
"self": 0.0,
"children": {
"worker_root": {
"total": 201.8265244829911,
"count": 6289,
"is_parallel": true,
"self": 98.24119292599244,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018671289999474538,
"count": 1,
"is_parallel": true,
"self": 0.0006774579999273556,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011896710000200983,
"count": 8,
"is_parallel": true,
"self": 0.0011896710000200983
}
}
},
"UnityEnvironment.step": {
"total": 0.10972523999998884,
"count": 1,
"is_parallel": true,
"self": 0.000557325000045239,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004595159998643794,
"count": 1,
"is_parallel": true,
"self": 0.0004595159998643794
},
"communicator.exchange": {
"total": 0.10702670299997408,
"count": 1,
"is_parallel": true,
"self": 0.10702670299997408
},
"steps_from_proto": {
"total": 0.0016816960001051484,
"count": 1,
"is_parallel": true,
"self": 0.0004144690003613505,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001267226999743798,
"count": 8,
"is_parallel": true,
"self": 0.001267226999743798
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 103.58533155699865,
"count": 6288,
"is_parallel": true,
"self": 3.0723196159704003,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.235619281002073,
"count": 6288,
"is_parallel": true,
"self": 2.235619281002073
},
"communicator.exchange": {
"total": 89.35091977301954,
"count": 6288,
"is_parallel": true,
"self": 89.35091977301954
},
"steps_from_proto": {
"total": 8.92647288700664,
"count": 6288,
"is_parallel": true,
"self": 2.0610056149921547,
"children": {
"_process_rank_one_or_two_observation": {
"total": 6.8654672720144845,
"count": 50304,
"is_parallel": true,
"self": 6.8654672720144845
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 70.04365764599629,
"count": 6289,
"self": 0.16077571099390298,
"children": {
"process_trajectory": {
"total": 15.77709871800198,
"count": 6289,
"self": 15.77709871800198
},
"_update_policy": {
"total": 54.10578321700041,
"count": 33,
"self": 20.686629184001276,
"children": {
"TorchPPOOptimizer.update": {
"total": 33.41915403299913,
"count": 2262,
"self": 33.41915403299913
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.460001481580548e-07,
"count": 1,
"self": 8.460001481580548e-07
},
"TrainerController._save_models": {
"total": 0.09666320199994516,
"count": 1,
"self": 0.0013415470000381902,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09532165499990697,
"count": 1,
"self": 0.09532165499990697
}
}
}
}
}
}
}