Guillaume63's picture
Trained pyramids agent upload
81aed39
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4190083146095276,
"min": 0.4066978394985199,
"max": 1.4659582376480103,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12550.13671875,
"min": 12124.8173828125,
"max": 44471.30859375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989966.0,
"min": 29952.0,
"max": 989966.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989966.0,
"min": 29952.0,
"max": 989966.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5479588508605957,
"min": -0.13157585263252258,
"max": 0.5654096007347107,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 151.2366485595703,
"min": -31.1834774017334,
"max": 160.57632446289062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.028659116476774216,
"min": 0.0009209717973135412,
"max": 0.31008246541023254,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.909916400909424,
"min": 0.24589946866035461,
"max": 74.72987365722656,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06705080495850693,
"min": 0.06577291301817173,
"max": 0.07177902248180804,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.938711269419097,
"min": 0.49933333559551857,
"max": 1.0685998194157054,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016395423431406795,
"min": 0.0005200350171332913,
"max": 0.01690677591908579,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22953592803969514,
"min": 0.004680315154199622,
"max": 0.23669486286720107,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.41466181419286e-06,
"min": 7.41466181419286e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010380526539870005,
"min": 0.00010380526539870005,
"max": 0.0035076767307745,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247152142857144,
"min": 0.10247152142857144,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346013000000002,
"min": 1.3886848,
"max": 2.5692255000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002569049907142858,
"min": 0.0002569049907142858,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035966698700000015,
"min": 0.0035966698700000015,
"max": 0.11694562745000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.017051640897989273,
"min": 0.016953514888882637,
"max": 0.5215116143226624,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.23872298002243042,
"min": 0.23734919726848602,
"max": 3.6505813598632812,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 324.3333333333333,
"min": 324.3333333333333,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29190.0,
"min": 15984.0,
"max": 33290.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5867488666541048,
"min": -1.0000000521540642,
"max": 1.6427666520906818,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 142.80739799886942,
"min": -31.99920167028904,
"max": 147.84899868816137,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5867488666541048,
"min": -1.0000000521540642,
"max": 1.6427666520906818,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 142.80739799886942,
"min": -31.99920167028904,
"max": 147.84899868816137,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05624282301739893,
"min": 0.05624282301739893,
"max": 10.46531671565026,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.061854071565904,
"min": 5.004755622416269,
"max": 167.44506745040417,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1656679201",
"python_version": "3.8.13 (default, Mar 28 2022, 11:38:47) \n[GCC 7.5.0]",
"command_line_arguments": "/home/explore/miniconda3/envs/ml-agents/bin/mlagents-learn config/ppo/PyramidsRND.yaml --env=trained-envs-executables/linux/Pyramids/Pyramids --run-id=First Training --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.18.5",
"end_time_seconds": "1656680650"
},
"total": 1448.9538334649988,
"count": 1,
"self": 0.31970918201841414,
"children": {
"run_training.setup": {
"total": 0.014047067990759388,
"count": 1,
"self": 0.014047067990759388
},
"TrainerController.start_learning": {
"total": 1448.6200772149896,
"count": 1,
"self": 1.235912938049296,
"children": {
"TrainerController._reset_env": {
"total": 3.8956014170107665,
"count": 1,
"self": 3.8956014170107665
},
"TrainerController.advance": {
"total": 1443.41229452494,
"count": 63797,
"self": 1.206386359393946,
"children": {
"env_step": {
"total": 778.4604620045429,
"count": 63797,
"self": 669.5955909211189,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.13324421567086,
"count": 63797,
"self": 3.3125427714403486,
"children": {
"TorchPolicy.evaluate": {
"total": 104.82070144423051,
"count": 62567,
"self": 40.13264421428903,
"children": {
"TorchPolicy.sample_actions": {
"total": 64.68805722994148,
"count": 62567,
"self": 64.68805722994148
}
}
}
}
},
"workers": {
"total": 0.7316268677532207,
"count": 63797,
"self": 0.0,
"children": {
"worker_root": {
"total": 1446.840786701403,
"count": 63797,
"is_parallel": true,
"self": 858.5721889043925,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006869019998703152,
"count": 1,
"is_parallel": true,
"self": 0.0014228719956008717,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00544614800310228,
"count": 8,
"is_parallel": true,
"self": 0.00544614800310228
}
}
},
"UnityEnvironment.step": {
"total": 0.02236071501101833,
"count": 1,
"is_parallel": true,
"self": 0.00023169901396613568,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018897799600381404,
"count": 1,
"is_parallel": true,
"self": 0.00018897799600381404
},
"communicator.exchange": {
"total": 0.020894086992484517,
"count": 1,
"is_parallel": true,
"self": 0.020894086992484517
},
"steps_from_proto": {
"total": 0.0010459510085638613,
"count": 1,
"is_parallel": true,
"self": 0.00020707602379843593,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008388749847654253,
"count": 8,
"is_parallel": true,
"self": 0.0008388749847654253
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 588.2685977970104,
"count": 63796,
"is_parallel": true,
"self": 15.852388013299787,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 10.99923784994462,
"count": 63796,
"is_parallel": true,
"self": 10.99923784994462
},
"communicator.exchange": {
"total": 493.1164156473242,
"count": 63796,
"is_parallel": true,
"self": 493.1164156473242
},
"steps_from_proto": {
"total": 68.30055628644186,
"count": 63796,
"is_parallel": true,
"self": 12.74069020722527,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.55986607921659,
"count": 510368,
"is_parallel": true,
"self": 55.55986607921659
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 663.7454461610032,
"count": 63797,
"self": 2.0888420185801806,
"children": {
"process_trajectory": {
"total": 137.03766261640703,
"count": 63797,
"self": 136.90352060840814,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1341420079988893,
"count": 2,
"self": 0.1341420079988893
}
}
},
"_update_policy": {
"total": 524.618941526016,
"count": 447,
"self": 213.52586585309473,
"children": {
"TorchPPOOptimizer.update": {
"total": 311.09307567292126,
"count": 22857,
"self": 311.09307567292126
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.489885997027159e-07,
"count": 1,
"self": 9.489885997027159e-07
},
"TrainerController._save_models": {
"total": 0.07626738600083627,
"count": 1,
"self": 0.0007153129990911111,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07555207300174516,
"count": 1,
"self": 0.07555207300174516
}
}
}
}
}
}
}