poca-SoccerTwos / run_logs /timers.json
guife33's picture
First Push
029bb2e verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.36985445022583,
"min": 1.3302485942840576,
"max": 3.295691967010498,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 27177.912109375,
"min": 6765.84716796875,
"max": 113482.421875,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 70.25714285714285,
"min": 42.94117647058823,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19672.0,
"min": 12224.0,
"max": 28348.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1491.0628039525184,
"min": 1182.548210703926,
"max": 1516.7776235279837,
"count": 4989
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 208748.79255335257,
"min": 2365.096421407852,
"max": 329488.51982636034,
"count": 4989
},
"SoccerTwos.Step.mean": {
"value": 49999978.0,
"min": 9192.0,
"max": 49999978.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999978.0,
"min": 9192.0,
"max": 49999978.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.008446477353572845,
"min": -0.1378321647644043,
"max": 0.2507602572441101,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.182506799697876,
"min": -22.672176361083984,
"max": 32.625816345214844,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.008503497578203678,
"min": -0.13763639330863953,
"max": 0.250055193901062,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.190489649772644,
"min": -22.192230224609375,
"max": 32.972434997558594,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.1979771431003298,
"min": -0.6875,
"max": 0.569142861025674,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -27.716800034046173,
"min": -63.01819998025894,
"max": 60.70080053806305,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.1979771431003298,
"min": -0.6875,
"max": 0.569142861025674,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -27.716800034046173,
"min": -63.01819998025894,
"max": 60.70080053806305,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01857423917778457,
"min": 0.009236962045542895,
"max": 0.02754863896795238,
"count": 2423
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01857423917778457,
"min": 0.009236962045542895,
"max": 0.02754863896795238,
"count": 2423
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08878144746025403,
"min": 0.00019966156493561965,
"max": 0.11898887902498245,
"count": 2423
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08878144746025403,
"min": 0.00019966156493561965,
"max": 0.11898887902498245,
"count": 2423
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0897236814101537,
"min": 0.0001982558096642606,
"max": 0.12116950526833534,
"count": 2423
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0897236814101537,
"min": 0.0001982558096642606,
"max": 0.12116950526833534,
"count": 2423
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2423
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2423
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2423
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2423
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2423
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2423
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1741515726",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/carbajal/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1741603594"
},
"total": 87867.567145246,
"count": 1,
"self": 0.2719908799917903,
"children": {
"run_training.setup": {
"total": 0.014040420006494969,
"count": 1,
"self": 0.014040420006494969
},
"TrainerController.start_learning": {
"total": 87867.281113946,
"count": 1,
"self": 70.07043651660206,
"children": {
"TrainerController._reset_env": {
"total": 6.217110827594297,
"count": 250,
"self": 6.217110827594297
},
"TrainerController.advance": {
"total": 87790.84123178979,
"count": 3428405,
"self": 63.52165870752651,
"children": {
"env_step": {
"total": 65463.16093376643,
"count": 3428405,
"self": 47247.172181124944,
"children": {
"SubprocessEnvManager._take_step": {
"total": 18172.926531222416,
"count": 3428405,
"self": 375.9177835499577,
"children": {
"TorchPolicy.evaluate": {
"total": 17797.00874767246,
"count": 6284006,
"self": 17797.00874767246
}
}
},
"workers": {
"total": 43.06222141906619,
"count": 3428405,
"self": 0.0,
"children": {
"worker_root": {
"total": 87778.782617174,
"count": 3428405,
"is_parallel": true,
"self": 48104.57271212834,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019256249943282455,
"count": 2,
"is_parallel": true,
"self": 0.00045586604392156005,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014697589504066855,
"count": 8,
"is_parallel": true,
"self": 0.0014697589504066855
}
}
},
"UnityEnvironment.step": {
"total": 0.03152862499700859,
"count": 1,
"is_parallel": true,
"self": 0.000490236998302862,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003318740054965019,
"count": 1,
"is_parallel": true,
"self": 0.0003318740054965019
},
"communicator.exchange": {
"total": 0.02909855797770433,
"count": 1,
"is_parallel": true,
"self": 0.02909855797770433
},
"steps_from_proto": {
"total": 0.0016079560155048966,
"count": 2,
"is_parallel": true,
"self": 0.00033470705966465175,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012732489558402449,
"count": 8,
"is_parallel": true,
"self": 0.0012732489558402449
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 39673.80391099339,
"count": 3428404,
"is_parallel": true,
"self": 1910.256119078549,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1132.760242002434,
"count": 3428404,
"is_parallel": true,
"self": 1132.760242002434
},
"communicator.exchange": {
"total": 31082.231930095644,
"count": 3428404,
"is_parallel": true,
"self": 31082.231930095644
},
"steps_from_proto": {
"total": 5548.5556198167615,
"count": 6856808,
"is_parallel": true,
"self": 1055.9510837357084,
"children": {
"_process_rank_one_or_two_observation": {
"total": 4492.604536081053,
"count": 27427232,
"is_parallel": true,
"self": 4492.604536081053
}
}
}
}
},
"steps_from_proto": {
"total": 0.40599405227112584,
"count": 498,
"is_parallel": true,
"self": 0.07375247427262366,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.3322415779985022,
"count": 1992,
"is_parallel": true,
"self": 0.3322415779985022
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 22264.158639315836,
"count": 3428405,
"self": 447.3205651437456,
"children": {
"process_trajectory": {
"total": 9309.08619754741,
"count": 3428405,
"self": 9293.06106790845,
"children": {
"RLTrainer._checkpoint": {
"total": 16.025129638961516,
"count": 100,
"self": 16.025129638961516
}
}
},
"_update_policy": {
"total": 12507.75187662468,
"count": 2423,
"self": 5562.759891700873,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6944.991984923807,
"count": 72690,
"self": 6944.991984923807
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.300106178969145e-07,
"count": 1,
"self": 6.300106178969145e-07
},
"TrainerController._save_models": {
"total": 0.15233418199932203,
"count": 1,
"self": 0.0016578139911871403,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1506763680081349,
"count": 1,
"self": 0.1506763680081349
}
}
}
}
}
}
}