SebastianS's picture
first push
4691b0e
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.814974308013916,
"min": 1.7633943557739258,
"max": 3.29575252532959,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 36589.8828125,
"min": 13200.7138671875,
"max": 114470.890625,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 45.407407407407405,
"min": 35.6865671641791,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19616.0,
"min": 11588.0,
"max": 30096.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1491.125756455233,
"min": 1201.2421091626256,
"max": 1505.2723340072453,
"count": 491
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 322083.1633943303,
"min": 2403.301746740039,
"max": 396736.09886158083,
"count": 491
},
"SoccerTwos.Step.mean": {
"value": 4999848.0,
"min": 9066.0,
"max": 4999848.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999848.0,
"min": 9066.0,
"max": 4999848.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.045013878494501114,
"min": -0.13065846264362335,
"max": 0.15592828392982483,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -9.722997665405273,
"min": -27.96091079711914,
"max": 38.202430725097656,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.046934738755226135,
"min": -0.1341095268726349,
"max": 0.15659089386463165,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -10.137903213500977,
"min": -28.699440002441406,
"max": 37.71289825439453,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.039942592934325886,
"min": -0.5856400012969971,
"max": 0.5111698630737932,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -8.627600073814392,
"min": -82.45099991559982,
"max": 56.61560016870499,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.039942592934325886,
"min": -0.5856400012969971,
"max": 0.5111698630737932,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -8.627600073814392,
"min": -82.45099991559982,
"max": 56.61560016870499,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.012566837597599563,
"min": 0.010382519401415873,
"max": 0.025903802906395868,
"count": 240
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.012566837597599563,
"min": 0.010382519401415873,
"max": 0.025903802906395868,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.12692004963755607,
"min": 7.000466393947136e-05,
"max": 0.13487323969602585,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.12692004963755607,
"min": 7.000466393947136e-05,
"max": 0.13487323969602585,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.12931963553031287,
"min": 7.382410900997153e-05,
"max": 0.13686487426360447,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.12931963553031287,
"min": 7.382410900997153e-05,
"max": 0.13686487426360447,
"count": 240
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678764878",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Notes\\drl-course\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1678796603"
},
"total": 31724.854561800003,
"count": 1,
"self": 0.544815600002039,
"children": {
"run_training.setup": {
"total": 0.22925430000000002,
"count": 1,
"self": 0.22925430000000002
},
"TrainerController.start_learning": {
"total": 31724.0804919,
"count": 1,
"self": 15.183050999705301,
"children": {
"TrainerController._reset_env": {
"total": 5.926682000001983,
"count": 25,
"self": 5.926682000001983
},
"TrainerController.advance": {
"total": 31702.719917500297,
"count": 345516,
"self": 16.836411301741464,
"children": {
"env_step": {
"total": 12136.688460399371,
"count": 345516,
"self": 9618.453549300712,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2508.205003499238,
"count": 345516,
"self": 96.13952259947018,
"children": {
"TorchPolicy.evaluate": {
"total": 2412.065480899768,
"count": 629718,
"self": 2412.065480899768
}
}
},
"workers": {
"total": 10.029907599421822,
"count": 345516,
"self": 0.0,
"children": {
"worker_root": {
"total": 31694.525312700403,
"count": 345516,
"is_parallel": true,
"self": 23925.199966901106,
"children": {
"steps_from_proto": {
"total": 0.09313079999566298,
"count": 50,
"is_parallel": true,
"self": 0.020926200016525343,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.07220459997913764,
"count": 200,
"is_parallel": true,
"self": 0.07220459997913764
}
}
},
"UnityEnvironment.step": {
"total": 7769.232214999303,
"count": 345516,
"is_parallel": true,
"self": 438.3739221002161,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 384.2993512994746,
"count": 345516,
"is_parallel": true,
"self": 384.2993512994746
},
"communicator.exchange": {
"total": 5461.771801900997,
"count": 345516,
"is_parallel": true,
"self": 5461.771801900997
},
"steps_from_proto": {
"total": 1484.7871396986156,
"count": 691032,
"is_parallel": true,
"self": 290.16315419870944,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1194.6239854999062,
"count": 2764128,
"is_parallel": true,
"self": 1194.6239854999062
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 19549.195045799184,
"count": 345516,
"self": 92.11313789905034,
"children": {
"process_trajectory": {
"total": 3098.2860601001325,
"count": 345516,
"self": 3095.9418652001336,
"children": {
"RLTrainer._checkpoint": {
"total": 2.344194899998911,
"count": 10,
"self": 2.344194899998911
}
}
},
"_update_policy": {
"total": 16358.795847800002,
"count": 240,
"self": 1257.692684700005,
"children": {
"TorchPOCAOptimizer.update": {
"total": 15101.103163099997,
"count": 7200,
"self": 15101.103163099997
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.7999991541728377e-06,
"count": 1,
"self": 1.7999991541728377e-06
},
"TrainerController._save_models": {
"total": 0.2508395999975619,
"count": 1,
"self": 0.016176199995243223,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23466340000231867,
"count": 1,
"self": 0.23466340000231867
}
}
}
}
}
}
}