pixeldoggo's picture
Initial commit
bb73fd8 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.349806547164917,
"min": 0.349806547164917,
"max": 1.4563781023025513,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10415.83984375,
"min": 10415.83984375,
"max": 44180.6875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989998.0,
"min": 29952.0,
"max": 989998.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989998.0,
"min": 29952.0,
"max": 989998.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5775057077407837,
"min": -0.10548482090234756,
"max": 0.5795166492462158,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 165.1666259765625,
"min": -25.316356658935547,
"max": 165.1666259765625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03202696144580841,
"min": 0.0017850997392088175,
"max": 0.29610657691955566,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.159710884094238,
"min": 0.47305142879486084,
"max": 71.65779113769531,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06918503941380463,
"min": 0.06610956231299436,
"max": 0.07518119917693201,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9685905517932649,
"min": 0.5117288717485552,
"max": 1.0710794845465843,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01679382115149305,
"min": 0.00021893618095739363,
"max": 0.01679382115149305,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2351134961209027,
"min": 0.0028461703524461173,
"max": 0.24513486924600633,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.319518988764285e-06,
"min": 7.319518988764285e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010247326584269999,
"min": 0.00010247326584269999,
"max": 0.0033760849746384,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243980714285716,
"min": 0.10243980714285716,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4341573000000003,
"min": 1.3886848,
"max": 2.4424642000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025373673357142866,
"min": 0.00025373673357142866,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003552314270000001,
"min": 0.003552314270000001,
"max": 0.11255362384,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014430887997150421,
"min": 0.014042859897017479,
"max": 0.4727877974510193,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2020324319601059,
"min": 0.1966000348329544,
"max": 3.3095145225524902,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 299.74528301886795,
"min": 299.74528301886795,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31773.0,
"min": 15984.0,
"max": 34348.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6625150751392797,
"min": -1.0000000521540642,
"max": 1.6625150751392797,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 176.22659796476364,
"min": -31.995201647281647,
"max": 176.22659796476364,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6625150751392797,
"min": -1.0000000521540642,
"max": 1.6625150751392797,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 176.22659796476364,
"min": -31.995201647281647,
"max": 176.22659796476364,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04493558130219891,
"min": 0.04493558130219891,
"max": 9.861752872355282,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.763171618033084,
"min": 4.3316115870838985,
"max": 157.78804595768452,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1731440446",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0",
"mlagents_envs_version": "1.1.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1731442736"
},
"total": 2289.620816955,
"count": 1,
"self": 0.8357121679996453,
"children": {
"run_training.setup": {
"total": 0.05608939199998986,
"count": 1,
"self": 0.05608939199998986
},
"TrainerController.start_learning": {
"total": 2288.7290153950003,
"count": 1,
"self": 1.4034999669752324,
"children": {
"TrainerController._reset_env": {
"total": 5.014188101000002,
"count": 1,
"self": 5.014188101000002
},
"TrainerController.advance": {
"total": 2282.1887097910253,
"count": 63894,
"self": 1.392570656017142,
"children": {
"env_step": {
"total": 1595.312173241022,
"count": 63894,
"self": 1446.3888284480468,
"children": {
"SubprocessEnvManager._take_step": {
"total": 148.10963221197733,
"count": 63894,
"self": 4.595729609972295,
"children": {
"TorchPolicy.evaluate": {
"total": 143.51390260200503,
"count": 62570,
"self": 143.51390260200503
}
}
},
"workers": {
"total": 0.8137125809980148,
"count": 63894,
"self": 0.0,
"children": {
"worker_root": {
"total": 2283.7533841229965,
"count": 63894,
"is_parallel": true,
"self": 954.0354314479973,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00204482399999506,
"count": 1,
"is_parallel": true,
"self": 0.000659320000011121,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013855039999839391,
"count": 8,
"is_parallel": true,
"self": 0.0013855039999839391
}
}
},
"UnityEnvironment.step": {
"total": 0.04801028399998586,
"count": 1,
"is_parallel": true,
"self": 0.0005988659999047741,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048103700004276106,
"count": 1,
"is_parallel": true,
"self": 0.00048103700004276106
},
"communicator.exchange": {
"total": 0.04531488300000319,
"count": 1,
"is_parallel": true,
"self": 0.04531488300000319
},
"steps_from_proto": {
"total": 0.0016154980000351316,
"count": 1,
"is_parallel": true,
"self": 0.000328175000106512,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012873229999286195,
"count": 8,
"is_parallel": true,
"self": 0.0012873229999286195
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1329.7179526749992,
"count": 63893,
"is_parallel": true,
"self": 32.85311128198305,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.96328993699899,
"count": 63893,
"is_parallel": true,
"self": 22.96328993699899
},
"communicator.exchange": {
"total": 1177.5388798530137,
"count": 63893,
"is_parallel": true,
"self": 1177.5388798530137
},
"steps_from_proto": {
"total": 96.36267160300349,
"count": 63893,
"is_parallel": true,
"self": 19.588670078894893,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.7740015241086,
"count": 511144,
"is_parallel": true,
"self": 76.7740015241086
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 685.4839658939864,
"count": 63894,
"self": 2.6168467409582945,
"children": {
"process_trajectory": {
"total": 136.51072315302883,
"count": 63894,
"self": 136.23353593302852,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2771872200003145,
"count": 2,
"self": 0.2771872200003145
}
}
},
"_update_policy": {
"total": 546.3563959999992,
"count": 451,
"self": 306.74996314398896,
"children": {
"TorchPPOOptimizer.update": {
"total": 239.60643285601026,
"count": 22809,
"self": 239.60643285601026
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3879998732591048e-06,
"count": 1,
"self": 1.3879998732591048e-06
},
"TrainerController._save_models": {
"total": 0.12261614799990639,
"count": 1,
"self": 0.0019690029998855607,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12064714500002083,
"count": 1,
"self": 0.12064714500002083
}
}
}
}
}
}
}