Pyramids-1 / run_logs /timers.json
NiscR's picture
Commit
8ee2f95
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4992523491382599,
"min": 0.4992523491382599,
"max": 1.4368122816085815,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14977.5703125,
"min": 14977.5703125,
"max": 43587.13671875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989883.0,
"min": 29952.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989883.0,
"min": 29952.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4695327877998352,
"min": -0.10257785767316818,
"max": 0.4695327877998352,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 126.77384948730469,
"min": -24.721263885498047,
"max": 126.77384948730469,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.00361551227979362,
"min": -0.00361551227979362,
"max": 0.2599310576915741,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.9761883020401001,
"min": -0.9761883020401001,
"max": 62.38345718383789,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06565213015111272,
"min": 0.06459844327487406,
"max": 0.07325088515792441,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9191298221155781,
"min": 0.5001573439937717,
"max": 1.0789947755530356,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013295434062364766,
"min": 0.0002453176367023755,
"max": 0.013295434062364766,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18613607687310674,
"min": 0.003189129277130882,
"max": 0.18613607687310674,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.606061750392858e-06,
"min": 7.606061750392858e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010648486450550002,
"min": 0.00010648486450550002,
"max": 0.0032527097157634996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253532142857144,
"min": 0.10253532142857144,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4354945000000001,
"min": 1.3886848,
"max": 2.4436013999999995,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002632786107142858,
"min": 0.0002632786107142858,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036859005500000015,
"min": 0.0036859005500000015,
"max": 0.10844522635,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010151459835469723,
"min": 0.009942200966179371,
"max": 0.4211326241493225,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14212043583393097,
"min": 0.13919080793857574,
"max": 2.9479284286499023,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 391.97297297297297,
"min": 391.97297297297297,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29006.0,
"min": 15984.0,
"max": 33631.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5269270047948167,
"min": -1.0000000521540642,
"max": 1.5269270047948167,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 112.99259835481644,
"min": -31.999601677060127,
"max": 112.99259835481644,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5269270047948167,
"min": -1.0000000521540642,
"max": 1.5269270047948167,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 112.99259835481644,
"min": -31.999601677060127,
"max": 112.99259835481644,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04090656361753448,
"min": 0.04090656361753448,
"max": 8.10820494685322,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.0270857076975517,
"min": 2.854461886920035,
"max": 129.73127914965153,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693660006",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693662266"
},
"total": 2260.477208345,
"count": 1,
"self": 0.5375704119996954,
"children": {
"run_training.setup": {
"total": 0.04846043599997074,
"count": 1,
"self": 0.04846043599997074
},
"TrainerController.start_learning": {
"total": 2259.891177497,
"count": 1,
"self": 1.6183679250257228,
"children": {
"TrainerController._reset_env": {
"total": 4.312574903000041,
"count": 1,
"self": 4.312574903000041
},
"TrainerController.advance": {
"total": 2253.856821898975,
"count": 63540,
"self": 1.645623145972877,
"children": {
"env_step": {
"total": 1580.7124387809886,
"count": 63540,
"self": 1459.5707891489892,
"children": {
"SubprocessEnvManager._take_step": {
"total": 120.14064883898482,
"count": 63540,
"self": 5.051337129938929,
"children": {
"TorchPolicy.evaluate": {
"total": 115.08931170904589,
"count": 62561,
"self": 115.08931170904589
}
}
},
"workers": {
"total": 1.001000793014498,
"count": 63540,
"self": 0.0,
"children": {
"worker_root": {
"total": 2254.41834098898,
"count": 63540,
"is_parallel": true,
"self": 920.5158193690017,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0048293419999936305,
"count": 1,
"is_parallel": true,
"self": 0.003384422999886283,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014449190001073475,
"count": 8,
"is_parallel": true,
"self": 0.0014449190001073475
}
}
},
"UnityEnvironment.step": {
"total": 0.046218334000002415,
"count": 1,
"is_parallel": true,
"self": 0.0006432119999431052,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000472922000028575,
"count": 1,
"is_parallel": true,
"self": 0.000472922000028575
},
"communicator.exchange": {
"total": 0.043152355000017906,
"count": 1,
"is_parallel": true,
"self": 0.043152355000017906
},
"steps_from_proto": {
"total": 0.0019498450000128287,
"count": 1,
"is_parallel": true,
"self": 0.0003827550000323754,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015670899999804533,
"count": 8,
"is_parallel": true,
"self": 0.0015670899999804533
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1333.9025216199784,
"count": 63539,
"is_parallel": true,
"self": 35.86950667289943,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.846895921035525,
"count": 63539,
"is_parallel": true,
"self": 23.846895921035525
},
"communicator.exchange": {
"total": 1165.5948715730358,
"count": 63539,
"is_parallel": true,
"self": 1165.5948715730358
},
"steps_from_proto": {
"total": 108.59124745300761,
"count": 63539,
"is_parallel": true,
"self": 21.767169268011628,
"children": {
"_process_rank_one_or_two_observation": {
"total": 86.82407818499598,
"count": 508312,
"is_parallel": true,
"self": 86.82407818499598
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 671.4987599720133,
"count": 63540,
"self": 3.057386330019085,
"children": {
"process_trajectory": {
"total": 112.76556404499348,
"count": 63540,
"self": 112.5598744439938,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20568960099967626,
"count": 2,
"self": 0.20568960099967626
}
}
},
"_update_policy": {
"total": 555.6758095970008,
"count": 447,
"self": 363.09138487597755,
"children": {
"TorchPPOOptimizer.update": {
"total": 192.58442472102325,
"count": 22791,
"self": 192.58442472102325
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2579998838191386e-06,
"count": 1,
"self": 1.2579998838191386e-06
},
"TrainerController._save_models": {
"total": 0.1034115119996386,
"count": 1,
"self": 0.0014972879998822464,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10191422399975636,
"count": 1,
"self": 0.10191422399975636
}
}
}
}
}
}
}