poca-SoccerTwos / run_logs /timers.json
ronanlobo7's picture
First Push
e3fdc6a verified
raw
history blame
20.1 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.101351261138916,
"min": 3.054426670074463,
"max": 3.295708417892456,
"count": 250
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 55080.0,
"min": 20167.59375,
"max": 150900.4375,
"count": 250
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 470.3333333333333,
"max": 999.0,
"count": 250
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 16040.0,
"max": 31968.0,
"count": 250
},
"SoccerTwos.Step.mean": {
"value": 2509756.0,
"min": 9000.0,
"max": 2509756.0,
"count": 251
},
"SoccerTwos.Step.sum": {
"value": 2509756.0,
"min": 9000.0,
"max": 2509756.0,
"count": 251
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 9.941160533344373e-05,
"min": -0.01774565503001213,
"max": 0.10760049521923065,
"count": 251
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.0009941160678863525,
"min": -0.23319688439369202,
"max": 1.722970962524414,
"count": 251
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.00017016730271279812,
"min": -0.017709609121084213,
"max": 0.10757717490196228,
"count": 251
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.0017016730271279812,
"min": -0.21772049367427826,
"max": 1.6970899105072021,
"count": 251
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 251
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 251
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.4666666666666667,
"max": 0.29162856936454773,
"count": 251
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -7.0,
"max": 4.082799971103668,
"count": 251
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.4666666666666667,
"max": 0.29162856936454773,
"count": 251
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -7.0,
"max": 4.082799971103668,
"count": 251
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 251
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 251
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.021670383154802644,
"min": 0.01110799467326918,
"max": 0.022893946474262825,
"count": 116
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.021670383154802644,
"min": 0.01110799467326918,
"max": 0.022893946474262825,
"count": 116
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 4.298182193451794e-06,
"min": 6.954269515802025e-08,
"max": 0.0046996509113038576,
"count": 116
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 4.298182193451794e-06,
"min": 6.954269515802025e-08,
"max": 0.0046996509113038576,
"count": 116
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 4.265978623152477e-06,
"min": 8.857286554568115e-08,
"max": 0.004718441826601823,
"count": 116
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 4.265978623152477e-06,
"min": 8.857286554568115e-08,
"max": 0.004718441826601823,
"count": 116
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 116
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 116
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 116
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 116
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 116
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 116
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1179.9717510387964,
"min": 1176.6649807854974,
"max": 1200.4971269630323,
"count": 127
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2359.9435020775927,
"min": 2353.329961570995,
"max": 16800.0,
"count": 127
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1726771424",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/loboronan/miniconda3/envs/unityrl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1726777445"
},
"total": 6021.641882481,
"count": 1,
"self": 0.04719205899982626,
"children": {
"run_training.setup": {
"total": 0.012762701000156085,
"count": 1,
"self": 0.012762701000156085
},
"TrainerController.start_learning": {
"total": 6021.581927720999,
"count": 1,
"self": 3.5443163798727255,
"children": {
"TrainerController._reset_env": {
"total": 3.5546105259995784,
"count": 13,
"self": 3.5546105259995784
},
"TrainerController.advance": {
"total": 6014.262929808127,
"count": 163635,
"self": 4.22816758198951,
"children": {
"env_step": {
"total": 4987.2101655220795,
"count": 163635,
"self": 3139.435143158827,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1845.3918190261047,
"count": 163635,
"self": 23.4835447971368,
"children": {
"TorchPolicy.evaluate": {
"total": 1821.908274228968,
"count": 325022,
"self": 1821.908274228968
}
}
},
"workers": {
"total": 2.3832033371477337,
"count": 163634,
"self": 0.0,
"children": {
"worker_root": {
"total": 6012.734764201937,
"count": 163634,
"is_parallel": true,
"self": 3303.017431428965,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002960332999464299,
"count": 2,
"is_parallel": true,
"self": 0.0011569969988158846,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018033360006484145,
"count": 8,
"is_parallel": true,
"self": 0.0018033360006484145
}
}
},
"UnityEnvironment.step": {
"total": 0.027680839999902673,
"count": 1,
"is_parallel": true,
"self": 0.0004136850002396386,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039316999982474954,
"count": 1,
"is_parallel": true,
"self": 0.00039316999982474954
},
"communicator.exchange": {
"total": 0.025436268000248674,
"count": 1,
"is_parallel": true,
"self": 0.025436268000248674
},
"steps_from_proto": {
"total": 0.001437716999589611,
"count": 2,
"is_parallel": true,
"self": 0.0003472859993962629,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010904310001933482,
"count": 8,
"is_parallel": true,
"self": 0.0010904310001933482
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2709.699229369971,
"count": 163633,
"is_parallel": true,
"self": 76.63292334552898,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 56.14572497215113,
"count": 163633,
"is_parallel": true,
"self": 56.14572497215113
},
"communicator.exchange": {
"total": 2316.4550215803497,
"count": 163633,
"is_parallel": true,
"self": 2316.4550215803497
},
"steps_from_proto": {
"total": 260.4655594719411,
"count": 327266,
"is_parallel": true,
"self": 58.7047048213708,
"children": {
"_process_rank_one_or_two_observation": {
"total": 201.7608546505703,
"count": 1309064,
"is_parallel": true,
"self": 201.7608546505703
}
}
}
}
},
"steps_from_proto": {
"total": 0.018103403000623075,
"count": 24,
"is_parallel": true,
"self": 0.004023700000743702,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.014079702999879373,
"count": 96,
"is_parallel": true,
"self": 0.014079702999879373
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1022.8245967040584,
"count": 163634,
"self": 28.723119664031856,
"children": {
"process_trajectory": {
"total": 397.0067258050244,
"count": 163634,
"self": 395.9022162810252,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1045095239992406,
"count": 5,
"self": 1.1045095239992406
}
}
},
"_update_policy": {
"total": 597.0947512350021,
"count": 116,
"self": 242.8269063930552,
"children": {
"TorchPOCAOptimizer.update": {
"total": 354.26784484194695,
"count": 3495,
"self": 354.26784484194695
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.540000635548495e-07,
"count": 1,
"self": 8.540000635548495e-07
},
"TrainerController._save_models": {
"total": 0.2200701530000515,
"count": 1,
"self": 0.002833712000210653,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21723644099984085,
"count": 1,
"self": 0.21723644099984085
}
}
}
}
}
}
}