xwind's picture
First Push
9ca167a verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7317606210708618,
"min": 0.7317606210708618,
"max": 2.866332530975342,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6954.65283203125,
"min": 6954.65283203125,
"max": 29259.521484375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.209382057189941,
"min": 0.4157317578792572,
"max": 13.209382057189941,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2575.82958984375,
"min": 80.65196228027344,
"max": 2687.591064453125,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0766175562249355,
"min": 0.0625236720494999,
"max": 0.0766175562249355,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.306470224899742,
"min": 0.2591419503830063,
"max": 0.37541138869556906,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17844664144749736,
"min": 0.13906917640683697,
"max": 0.27258884310722353,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7137865657899894,
"min": 0.5562767056273479,
"max": 1.3629442155361176,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.954545454545453,
"min": 3.6818181818181817,
"max": 26.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1142.0,
"min": 162.0,
"max": 1427.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.954545454545453,
"min": 3.6818181818181817,
"max": 26.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1142.0,
"min": 162.0,
"max": 1427.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742829063",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1742829632"
},
"total": 569.1750338270001,
"count": 1,
"self": 0.5909373079999796,
"children": {
"run_training.setup": {
"total": 0.0390871590000188,
"count": 1,
"self": 0.0390871590000188
},
"TrainerController.start_learning": {
"total": 568.5450093600001,
"count": 1,
"self": 0.6154565259994342,
"children": {
"TrainerController._reset_env": {
"total": 4.076307128999929,
"count": 1,
"self": 4.076307128999929
},
"TrainerController.advance": {
"total": 563.7633558000009,
"count": 18192,
"self": 0.7090669550007078,
"children": {
"env_step": {
"total": 393.15322856100124,
"count": 18192,
"self": 334.96020108200344,
"children": {
"SubprocessEnvManager._take_step": {
"total": 57.81017542100153,
"count": 18192,
"self": 2.1692854700000908,
"children": {
"TorchPolicy.evaluate": {
"total": 55.64088995100144,
"count": 18192,
"self": 55.64088995100144
}
}
},
"workers": {
"total": 0.3828520579962742,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 566.4243109680016,
"count": 18192,
"is_parallel": true,
"self": 277.8773330459944,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00814789399998972,
"count": 1,
"is_parallel": true,
"self": 0.005368908000036754,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0027789859999529654,
"count": 10,
"is_parallel": true,
"self": 0.0027789859999529654
}
}
},
"UnityEnvironment.step": {
"total": 0.04895975399995223,
"count": 1,
"is_parallel": true,
"self": 0.0007184189997815338,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046647400006349926,
"count": 1,
"is_parallel": true,
"self": 0.00046647400006349926
},
"communicator.exchange": {
"total": 0.045343809000087276,
"count": 1,
"is_parallel": true,
"self": 0.045343809000087276
},
"steps_from_proto": {
"total": 0.0024310520000199176,
"count": 1,
"is_parallel": true,
"self": 0.00047247399959360337,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019585780004263142,
"count": 10,
"is_parallel": true,
"self": 0.0019585780004263142
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 288.54697792200716,
"count": 18191,
"is_parallel": true,
"self": 14.381089579983495,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.975835055008588,
"count": 18191,
"is_parallel": true,
"self": 7.975835055008588
},
"communicator.exchange": {
"total": 224.1974798729998,
"count": 18191,
"is_parallel": true,
"self": 224.1974798729998
},
"steps_from_proto": {
"total": 41.99257341401528,
"count": 18191,
"is_parallel": true,
"self": 8.059748634032417,
"children": {
"_process_rank_one_or_two_observation": {
"total": 33.932824779982866,
"count": 181910,
"is_parallel": true,
"self": 33.932824779982866
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 169.90106028399896,
"count": 18192,
"self": 0.8360467400152629,
"children": {
"process_trajectory": {
"total": 32.409605009983466,
"count": 18192,
"self": 31.880500974983192,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5291040350002731,
"count": 4,
"self": 0.5291040350002731
}
}
},
"_update_policy": {
"total": 136.65540853400023,
"count": 90,
"self": 50.975593196004866,
"children": {
"TorchPPOOptimizer.update": {
"total": 85.67981533799536,
"count": 4587,
"self": 85.67981533799536
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2250000054336851e-06,
"count": 1,
"self": 1.2250000054336851e-06
},
"TrainerController._save_models": {
"total": 0.08988867999983086,
"count": 1,
"self": 0.0012532539997209824,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08863542600010987,
"count": 1,
"self": 0.08863542600010987
}
}
}
}
}
}
}