rl-ppo-Huggy / run_logs /timers.json
infinitas9's picture
Huggy
b867e87
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.407869815826416,
"min": 1.407869815826416,
"max": 1.4265393018722534,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70753.90625,
"min": 69450.8828125,
"max": 76050.546875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 93.59848484848484,
"min": 77.78582677165355,
"max": 389.875,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49420.0,
"min": 49266.0,
"max": 50143.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999970.0,
"min": 49823.0,
"max": 1999970.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999970.0,
"min": 49823.0,
"max": 1999970.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.437657117843628,
"min": 0.04850928112864494,
"max": 2.47505784034729,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1287.0830078125,
"min": 6.160678863525391,
"max": 1529.36865234375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.722964313219894,
"min": 1.7902840810028586,
"max": 3.916819179058075,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1965.725157380104,
"min": 227.36607828736305,
"max": 2427.0636330246925,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.722964313219894,
"min": 1.7902840810028586,
"max": 3.916819179058075,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1965.725157380104,
"min": 227.36607828736305,
"max": 2427.0636330246925,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016721599680022334,
"min": 0.012714983720858677,
"max": 0.020466300013746754,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.050164799040067,
"min": 0.027761302406239943,
"max": 0.05846165645731768,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.055136829242110254,
"min": 0.02189748411377271,
"max": 0.05811110238234202,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16541048772633077,
"min": 0.04379496822754542,
"max": 0.17024877617756526,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.853248715616661e-06,
"min": 3.853248715616661e-06,
"max": 0.0002953709265430249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1559746146849983e-05,
"min": 1.1559746146849983e-05,
"max": 0.0008442138185954,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10128438333333334,
"min": 0.10128438333333334,
"max": 0.19845697500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30385315,
"min": 0.20776549999999996,
"max": 0.5814046,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.409072833333322e-05,
"min": 7.409072833333322e-05,
"max": 0.004923003052500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022227218499999967,
"min": 0.00022227218499999967,
"max": 0.01407208954,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671708178",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671710510"
},
"total": 2332.014685921,
"count": 1,
"self": 0.3831058920000032,
"children": {
"run_training.setup": {
"total": 0.12697928000000047,
"count": 1,
"self": 0.12697928000000047
},
"TrainerController.start_learning": {
"total": 2331.504600749,
"count": 1,
"self": 4.1513003689783545,
"children": {
"TrainerController._reset_env": {
"total": 7.176792053999975,
"count": 1,
"self": 7.176792053999975
},
"TrainerController.advance": {
"total": 2320.0621033270218,
"count": 232849,
"self": 4.370364315036113,
"children": {
"env_step": {
"total": 1838.072958746035,
"count": 232849,
"self": 1546.0973987650482,
"children": {
"SubprocessEnvManager._take_step": {
"total": 289.19850539898744,
"count": 232849,
"self": 15.11978772186444,
"children": {
"TorchPolicy.evaluate": {
"total": 274.078717677123,
"count": 223035,
"self": 67.80950885506059,
"children": {
"TorchPolicy.sample_actions": {
"total": 206.2692088220624,
"count": 223035,
"self": 206.2692088220624
}
}
}
}
},
"workers": {
"total": 2.7770545819994368,
"count": 232849,
"self": 0.0,
"children": {
"worker_root": {
"total": 2323.4273593320745,
"count": 232849,
"is_parallel": true,
"self": 1051.0722172812075,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002116997999905834,
"count": 1,
"is_parallel": true,
"self": 0.0003279799999518218,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017890179999540123,
"count": 2,
"is_parallel": true,
"self": 0.0017890179999540123
}
}
},
"UnityEnvironment.step": {
"total": 0.028686829999969632,
"count": 1,
"is_parallel": true,
"self": 0.0003155329999344758,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002056120000588635,
"count": 1,
"is_parallel": true,
"self": 0.0002056120000588635
},
"communicator.exchange": {
"total": 0.027366318999952455,
"count": 1,
"is_parallel": true,
"self": 0.027366318999952455
},
"steps_from_proto": {
"total": 0.0007993660000238378,
"count": 1,
"is_parallel": true,
"self": 0.00028212500001245644,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005172410000113814,
"count": 2,
"is_parallel": true,
"self": 0.0005172410000113814
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1272.355142050867,
"count": 232848,
"is_parallel": true,
"self": 35.8458943797275,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.41339243016739,
"count": 232848,
"is_parallel": true,
"self": 82.41339243016739
},
"communicator.exchange": {
"total": 1055.3638407149533,
"count": 232848,
"is_parallel": true,
"self": 1055.3638407149533
},
"steps_from_proto": {
"total": 98.73201452601893,
"count": 232848,
"is_parallel": true,
"self": 43.30700306499398,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.42501146102495,
"count": 465696,
"is_parallel": true,
"self": 55.42501146102495
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 477.6187802659505,
"count": 232849,
"self": 6.46728056882273,
"children": {
"process_trajectory": {
"total": 156.55128344412776,
"count": 232849,
"self": 155.37826330512826,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1730201389995045,
"count": 10,
"self": 1.1730201389995045
}
}
},
"_update_policy": {
"total": 314.600216253,
"count": 97,
"self": 261.2806386340093,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.31957761899071,
"count": 2910,
"self": 53.31957761899071
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.279998837679159e-07,
"count": 1,
"self": 8.279998837679159e-07
},
"TrainerController._save_models": {
"total": 0.11440417099993283,
"count": 1,
"self": 0.002025120999860519,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11237905000007231,
"count": 1,
"self": 0.11237905000007231
}
}
}
}
}
}
}