HeinrichWirth's picture
First Push
f488300
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7830100655555725,
"min": 0.7815656065940857,
"max": 2.816544771194458,
"count": 15
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 15951.48046875,
"min": 15535.1796875,
"max": 57874.36328125,
"count": 15
},
"SnowballTarget.Step.mean": {
"value": 299968.0,
"min": 19992.0,
"max": 299968.0,
"count": 15
},
"SnowballTarget.Step.sum": {
"value": 299968.0,
"min": 19992.0,
"max": 299968.0,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.170060157775879,
"min": 0.837290346622467,
"max": 13.170060157775879,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 5254.85400390625,
"min": 334.078857421875,
"max": 5324.095703125,
"count": 15
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 15
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 19701.0,
"min": 19701.0,
"max": 21890.0,
"count": 15
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06653407526333023,
"min": 0.06410942371220538,
"max": 0.07133008862710162,
"count": 15
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.5988066773699721,
"min": 0.5769848134098484,
"max": 0.6996921397912955,
"count": 15
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20027990637586524,
"min": 0.1598888468514714,
"max": 0.2642679975909832,
"count": 15
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.8025191573827872,
"min": 1.4389996216632426,
"max": 2.378411978318849,
"count": 15
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 9.688096770666668e-06,
"min": 9.688096770666668e-06,
"max": 0.0002890880036373332,
"count": 15
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 8.719287093600001e-05,
"min": 8.719287093600001e-05,
"max": 0.002601792032735999,
"count": 15
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10322933333333333,
"min": 0.10322933333333333,
"max": 0.19636266666666666,
"count": 15
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.9290639999999999,
"min": 0.9290639999999999,
"max": 1.767264,
"count": 15
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00017114373333333333,
"min": 0.00017114373333333333,
"max": 0.004818497066666667,
"count": 15
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0015402936,
"min": 0.0015402936,
"max": 0.043366473600000004,
"count": 15
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.818181818181817,
"min": 4.646464646464646,
"max": 26.050505050505052,
"count": 15
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 2556.0,
"min": 460.0,
"max": 2723.0,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.818181818181817,
"min": 4.646464646464646,
"max": 26.050505050505052,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 2556.0,
"min": 460.0,
"max": 2723.0,
"count": 15
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 15
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 15
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691493431",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691494152"
},
"total": 721.679895645,
"count": 1,
"self": 0.4998370689999092,
"children": {
"run_training.setup": {
"total": 0.041360203000067486,
"count": 1,
"self": 0.041360203000067486
},
"TrainerController.start_learning": {
"total": 721.138698373,
"count": 1,
"self": 0.8927334799814162,
"children": {
"TrainerController._reset_env": {
"total": 5.411870992000104,
"count": 1,
"self": 5.411870992000104
},
"TrainerController.advance": {
"total": 714.5967126690185,
"count": 27352,
"self": 0.44367263401693435,
"children": {
"env_step": {
"total": 714.1530400350016,
"count": 27352,
"self": 520.2403875770093,
"children": {
"SubprocessEnvManager._take_step": {
"total": 193.4659061219793,
"count": 27352,
"self": 2.8844986479780346,
"children": {
"TorchPolicy.evaluate": {
"total": 190.58140747400125,
"count": 27352,
"self": 190.58140747400125
}
}
},
"workers": {
"total": 0.44674633601300684,
"count": 27352,
"self": 0.0,
"children": {
"worker_root": {
"total": 718.495603443009,
"count": 27352,
"is_parallel": true,
"self": 336.14692276500875,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00647998900001312,
"count": 1,
"is_parallel": true,
"self": 0.005078616999867336,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014013720001457841,
"count": 10,
"is_parallel": true,
"self": 0.0014013720001457841
}
}
},
"UnityEnvironment.step": {
"total": 0.041327096000031815,
"count": 1,
"is_parallel": true,
"self": 0.000669295999955466,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003234329999486363,
"count": 1,
"is_parallel": true,
"self": 0.0003234329999486363
},
"communicator.exchange": {
"total": 0.038051583000083156,
"count": 1,
"is_parallel": true,
"self": 0.038051583000083156
},
"steps_from_proto": {
"total": 0.0022827840000445576,
"count": 1,
"is_parallel": true,
"self": 0.0005188630001384809,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017639209999060768,
"count": 10,
"is_parallel": true,
"self": 0.0017639209999060768
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 382.3486806780003,
"count": 27351,
"is_parallel": true,
"self": 15.756078483041506,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.193371961976936,
"count": 27351,
"is_parallel": true,
"self": 8.193371961976936
},
"communicator.exchange": {
"total": 304.04878324899266,
"count": 27351,
"is_parallel": true,
"self": 304.04878324899266
},
"steps_from_proto": {
"total": 54.35044698398917,
"count": 27351,
"is_parallel": true,
"self": 10.09255468792594,
"children": {
"_process_rank_one_or_two_observation": {
"total": 44.25789229606323,
"count": 273510,
"is_parallel": true,
"self": 44.25789229606323
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.883100005936285e-05,
"count": 1,
"self": 9.883100005936285e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 709.412736343983,
"count": 661601,
"is_parallel": true,
"self": 14.612093222056615,
"children": {
"process_trajectory": {
"total": 386.69589968992636,
"count": 661601,
"is_parallel": true,
"self": 384.5368852719264,
"children": {
"RLTrainer._checkpoint": {
"total": 2.1590144179999697,
"count": 6,
"is_parallel": true,
"self": 2.1590144179999697
}
}
},
"_update_policy": {
"total": 308.1047434320001,
"count": 136,
"is_parallel": true,
"self": 121.7142510920055,
"children": {
"TorchPPOOptimizer.update": {
"total": 186.39049233999458,
"count": 6930,
"is_parallel": true,
"self": 186.39049233999458
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.23728240099990217,
"count": 1,
"self": 0.0012752610000461573,
"children": {
"RLTrainer._checkpoint": {
"total": 0.236007139999856,
"count": 1,
"self": 0.236007139999856
}
}
}
}
}
}
}