ppo-Huggy / run_logs /timers.json
agercas's picture
Huggy
e74c79c
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4035046100616455,
"min": 1.4035046100616455,
"max": 1.4313666820526123,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71136.6328125,
"min": 67548.984375,
"max": 75739.6875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 102.03703703703704,
"min": 96.27237354085604,
"max": 377.8496240601504,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49590.0,
"min": 49460.0,
"max": 50254.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999336.0,
"min": 49903.0,
"max": 1999336.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999336.0,
"min": 49903.0,
"max": 1999336.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.321157455444336,
"min": 0.09860001504421234,
"max": 2.4084529876708984,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1128.08251953125,
"min": 13.015201568603516,
"max": 1168.315673828125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.4953451615302162,
"min": 1.985199302209146,
"max": 3.7648092025067226,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1698.737748503685,
"min": 262.0463078916073,
"max": 1920.365609049797,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.4953451615302162,
"min": 1.985199302209146,
"max": 3.7648092025067226,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1698.737748503685,
"min": 262.0463078916073,
"max": 1920.365609049797,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.019926153119983307,
"min": 0.0145245876609503,
"max": 0.019926153119983307,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03985230623996661,
"min": 0.0290491753219006,
"max": 0.05850901313388022,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.049261174040536086,
"min": 0.021393186474839846,
"max": 0.053663690884908036,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09852234808107217,
"min": 0.04278637294967969,
"max": 0.15664421170949938,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.39232353592501e-06,
"min": 4.39232353592501e-06,
"max": 0.0002953708515430499,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.78464707185002e-06,
"min": 8.78464707185002e-06,
"max": 0.0008440785186405,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101464075,
"min": 0.101464075,
"max": 0.19845694999999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20292815,
"min": 0.20292815,
"max": 0.5813594999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.305734250000018e-05,
"min": 8.305734250000018e-05,
"max": 0.004923001805000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016611468500000036,
"min": 0.00016611468500000036,
"max": 0.014069839049999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670624233",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670626387"
},
"total": 2154.273322769,
"count": 1,
"self": 0.39145269300024665,
"children": {
"run_training.setup": {
"total": 0.11835161799990601,
"count": 1,
"self": 0.11835161799990601
},
"TrainerController.start_learning": {
"total": 2153.763518458,
"count": 1,
"self": 3.709432787962214,
"children": {
"TrainerController._reset_env": {
"total": 10.93814946100008,
"count": 1,
"self": 10.93814946100008
},
"TrainerController.advance": {
"total": 2138.9905523130374,
"count": 231190,
"self": 3.9490364579860397,
"children": {
"env_step": {
"total": 1683.8983058140443,
"count": 231190,
"self": 1408.0663707120193,
"children": {
"SubprocessEnvManager._take_step": {
"total": 273.3073101059449,
"count": 231190,
"self": 14.284054822919188,
"children": {
"TorchPolicy.evaluate": {
"total": 259.0232552830257,
"count": 222916,
"self": 64.76242196605119,
"children": {
"TorchPolicy.sample_actions": {
"total": 194.26083331697453,
"count": 222916,
"self": 194.26083331697453
}
}
}
}
},
"workers": {
"total": 2.5246249960799787,
"count": 231190,
"self": 0.0,
"children": {
"worker_root": {
"total": 2146.2525968799837,
"count": 231190,
"is_parallel": true,
"self": 993.0877181439594,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002253793000022597,
"count": 1,
"is_parallel": true,
"self": 0.00031262699997114396,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001941166000051453,
"count": 2,
"is_parallel": true,
"self": 0.001941166000051453
}
}
},
"UnityEnvironment.step": {
"total": 0.027536372999975356,
"count": 1,
"is_parallel": true,
"self": 0.0002810740000995793,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018348499997955514,
"count": 1,
"is_parallel": true,
"self": 0.00018348499997955514
},
"communicator.exchange": {
"total": 0.02640535499995167,
"count": 1,
"is_parallel": true,
"self": 0.02640535499995167
},
"steps_from_proto": {
"total": 0.0006664589999445525,
"count": 1,
"is_parallel": true,
"self": 0.00021712199986723135,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00044933700007732114,
"count": 2,
"is_parallel": true,
"self": 0.00044933700007732114
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1153.1648787360243,
"count": 231189,
"is_parallel": true,
"self": 33.9997877760577,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.37747400907108,
"count": 231189,
"is_parallel": true,
"self": 74.37747400907108
},
"communicator.exchange": {
"total": 955.6025972439624,
"count": 231189,
"is_parallel": true,
"self": 955.6025972439624
},
"steps_from_proto": {
"total": 89.18501970693308,
"count": 231189,
"is_parallel": true,
"self": 36.37197363995131,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.81304606698177,
"count": 462378,
"is_parallel": true,
"self": 52.81304606698177
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 451.143210041007,
"count": 231190,
"self": 5.925297012019314,
"children": {
"process_trajectory": {
"total": 138.93527564398676,
"count": 231190,
"self": 138.46838353798705,
"children": {
"RLTrainer._checkpoint": {
"total": 0.46689210599970465,
"count": 4,
"self": 0.46689210599970465
}
}
},
"_update_policy": {
"total": 306.28263738500095,
"count": 96,
"self": 253.34017950700354,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.94245787799741,
"count": 2880,
"self": 52.94245787799741
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.550000529794488e-07,
"count": 1,
"self": 8.550000529794488e-07
},
"TrainerController._save_models": {
"total": 0.12538304100007736,
"count": 1,
"self": 0.0019440570004007895,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12343898399967657,
"count": 1,
"self": 0.12343898399967657
}
}
}
}
}
}
}