u79jm's picture
First Push
5dba0bb verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.2039570808410645,
"min": 1.1940728425979614,
"max": 2.829486846923828,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 11442.408203125,
"min": 11442.408203125,
"max": 28883.40234375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.770524978637695,
"min": 0.2557782530784607,
"max": 11.770524978637695,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2295.25244140625,
"min": 49.6209831237793,
"max": 2316.1552734375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06633045652722334,
"min": 0.059927960710517834,
"max": 0.0735167631977603,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2653218261088934,
"min": 0.23984810241866703,
"max": 0.35594934982809184,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20199138021060065,
"min": 0.11114127354070946,
"max": 0.2965753060345556,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8079655208424026,
"min": 0.44456509416283785,
"max": 1.4828765301727782,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.75,
"min": 3.227272727272727,
"max": 23.8,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1045.0,
"min": 142.0,
"max": 1309.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.75,
"min": 3.227272727272727,
"max": 23.8,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1045.0,
"min": 142.0,
"max": 1309.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1738558002",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1738558435"
},
"total": 433.1388834800001,
"count": 1,
"self": 0.7817245270001649,
"children": {
"run_training.setup": {
"total": 0.0236553400000048,
"count": 1,
"self": 0.0236553400000048
},
"TrainerController.start_learning": {
"total": 432.33350361299995,
"count": 1,
"self": 0.34334170700356026,
"children": {
"TrainerController._reset_env": {
"total": 3.283961299999987,
"count": 1,
"self": 3.283961299999987
},
"TrainerController.advance": {
"total": 428.61831320299643,
"count": 18192,
"self": 0.34479967600589134,
"children": {
"env_step": {
"total": 301.9074168909922,
"count": 18192,
"self": 229.89139947898002,
"children": {
"SubprocessEnvManager._take_step": {
"total": 71.81434084001023,
"count": 18192,
"self": 1.2589827060057814,
"children": {
"TorchPolicy.evaluate": {
"total": 70.55535813400445,
"count": 18192,
"self": 70.55535813400445
}
}
},
"workers": {
"total": 0.20167657200192934,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 430.8199215820073,
"count": 18192,
"is_parallel": true,
"self": 229.05596041400963,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006604494999919552,
"count": 1,
"is_parallel": true,
"self": 0.004877705000126298,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017267899997932545,
"count": 10,
"is_parallel": true,
"self": 0.0017267899997932545
}
}
},
"UnityEnvironment.step": {
"total": 0.03772479999997813,
"count": 1,
"is_parallel": true,
"self": 0.0006123319999460364,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042488499991577555,
"count": 1,
"is_parallel": true,
"self": 0.00042488499991577555
},
"communicator.exchange": {
"total": 0.034827874000029624,
"count": 1,
"is_parallel": true,
"self": 0.034827874000029624
},
"steps_from_proto": {
"total": 0.0018597090000866956,
"count": 1,
"is_parallel": true,
"self": 0.00036666400012563827,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014930449999610573,
"count": 10,
"is_parallel": true,
"self": 0.0014930449999610573
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 201.76396116799765,
"count": 18191,
"is_parallel": true,
"self": 9.812188242992761,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.363730858009262,
"count": 18191,
"is_parallel": true,
"self": 5.363730858009262
},
"communicator.exchange": {
"total": 155.17709658099693,
"count": 18191,
"is_parallel": true,
"self": 155.17709658099693
},
"steps_from_proto": {
"total": 31.410945485998695,
"count": 18191,
"is_parallel": true,
"self": 5.501424986994152,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.909520499004543,
"count": 181910,
"is_parallel": true,
"self": 25.909520499004543
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 126.36609663599836,
"count": 18192,
"self": 0.40598152899804063,
"children": {
"process_trajectory": {
"total": 27.5930933660012,
"count": 18192,
"self": 27.052099163001117,
"children": {
"RLTrainer._checkpoint": {
"total": 0.540994203000082,
"count": 4,
"self": 0.540994203000082
}
}
},
"_update_policy": {
"total": 98.36702174099912,
"count": 90,
"self": 39.35241674700512,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.014604993994,
"count": 4587,
"self": 59.014604993994
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0520000159885967e-06,
"count": 1,
"self": 1.0520000159885967e-06
},
"TrainerController._save_models": {
"total": 0.0878863509999519,
"count": 1,
"self": 0.0008765599998241669,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08700979100012773,
"count": 1,
"self": 0.08700979100012773
}
}
}
}
}
}
}