ppo-Pyramids / run_logs /timers.json
MohammedEltoum's picture
First Push
7e8202c
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6243898272514343,
"min": 0.5674708485603333,
"max": 1.454408049583435,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18651.7734375,
"min": 17024.125,
"max": 44120.921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989921.0,
"min": 29952.0,
"max": 989921.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989921.0,
"min": 29952.0,
"max": 989921.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.03518611937761307,
"min": -0.17778491973876953,
"max": 0.03518611937761307,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 8.585412979125977,
"min": -42.13502502441406,
"max": 8.585412979125977,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.012558316811919212,
"min": 0.01059811096638441,
"max": 0.29910263419151306,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.0642292499542236,
"min": 2.543546676635742,
"max": 72.08373260498047,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06582622735212292,
"min": 0.06286433482914706,
"max": 0.07375932751090221,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9215671829297208,
"min": 0.5001349717679022,
"max": 1.0164294768959128,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.004203066073105653,
"min": 5.0809603782794514e-05,
"max": 0.010968095950336019,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.05884292502347913,
"min": 0.0006605248491763287,
"max": 0.07840822366900586,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.263654721671429e-06,
"min": 7.263654721671429e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001016911661034,
"min": 0.0001016911661034,
"max": 0.0033307068897644,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242118571428571,
"min": 0.10242118571428571,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4338966,
"min": 1.3886848,
"max": 2.3169052,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002518764528571429,
"min": 0.0002518764528571429,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035262703400000006,
"min": 0.0035262703400000006,
"max": 0.11103253644000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01492735743522644,
"min": 0.01492735743522644,
"max": 0.4478795826435089,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20898300409317017,
"min": 0.20898300409317017,
"max": 3.1351571083068848,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 855.2222222222222,
"min": 852.2727272727273,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30788.0,
"min": 15984.0,
"max": 32408.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.133427820685837,
"min": -1.0000000521540642,
"max": -0.06475763135787213,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -4.803401544690132,
"min": -31.9980016797781,
"max": -2.13700183480978,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.133427820685837,
"min": -1.0000000521540642,
"max": -0.06475763135787213,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -4.803401544690132,
"min": -31.9980016797781,
"max": -2.13700183480978,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.1318850523417091,
"min": 0.1318850523417091,
"max": 8.805015672929585,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.747861884301528,
"min": 4.558996976353228,
"max": 140.88025076687336,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682325944",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682327981"
},
"total": 2037.066445472,
"count": 1,
"self": 0.4762949059997936,
"children": {
"run_training.setup": {
"total": 0.17502914700003203,
"count": 1,
"self": 0.17502914700003203
},
"TrainerController.start_learning": {
"total": 2036.4151214190001,
"count": 1,
"self": 1.4348967679134148,
"children": {
"TrainerController._reset_env": {
"total": 4.849771162999787,
"count": 1,
"self": 4.849771162999787
},
"TrainerController.advance": {
"total": 2030.0306145090872,
"count": 63055,
"self": 1.5257392391340545,
"children": {
"env_step": {
"total": 1427.2917346609993,
"count": 63055,
"self": 1317.669803294001,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.7762585750354,
"count": 63055,
"self": 4.908421222097331,
"children": {
"TorchPolicy.evaluate": {
"total": 103.86783735293807,
"count": 62547,
"self": 103.86783735293807
}
}
},
"workers": {
"total": 0.8456727919628975,
"count": 63055,
"self": 0.0,
"children": {
"worker_root": {
"total": 2031.5331836010184,
"count": 63055,
"is_parallel": true,
"self": 825.7903345240452,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024519900000541384,
"count": 1,
"is_parallel": true,
"self": 0.0007024989997717057,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017494910002824327,
"count": 8,
"is_parallel": true,
"self": 0.0017494910002824327
}
}
},
"UnityEnvironment.step": {
"total": 0.04602847199998905,
"count": 1,
"is_parallel": true,
"self": 0.0005377929999212938,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005566740001086146,
"count": 1,
"is_parallel": true,
"self": 0.0005566740001086146
},
"communicator.exchange": {
"total": 0.04337915199994313,
"count": 1,
"is_parallel": true,
"self": 0.04337915199994313
},
"steps_from_proto": {
"total": 0.00155485300001601,
"count": 1,
"is_parallel": true,
"self": 0.00034332399991399143,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012115290001020185,
"count": 8,
"is_parallel": true,
"self": 0.0012115290001020185
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1205.7428490769732,
"count": 63054,
"is_parallel": true,
"self": 32.44478067513455,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.375522333941944,
"count": 63054,
"is_parallel": true,
"self": 24.375522333941944
},
"communicator.exchange": {
"total": 1051.5703677829727,
"count": 63054,
"is_parallel": true,
"self": 1051.5703677829727
},
"steps_from_proto": {
"total": 97.3521782849241,
"count": 63054,
"is_parallel": true,
"self": 20.93598629879375,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.41619198613034,
"count": 504432,
"is_parallel": true,
"self": 76.41619198613034
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 601.2131406089538,
"count": 63055,
"self": 2.2874602629233323,
"children": {
"process_trajectory": {
"total": 105.3291275310196,
"count": 63055,
"self": 105.1213864900194,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2077410410001903,
"count": 2,
"self": 0.2077410410001903
}
}
},
"_update_policy": {
"total": 493.5965528150109,
"count": 432,
"self": 312.4039116979902,
"children": {
"TorchPPOOptimizer.update": {
"total": 181.19264111702068,
"count": 22863,
"self": 181.19264111702068
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.109999155043624e-07,
"count": 1,
"self": 9.109999155043624e-07
},
"TrainerController._save_models": {
"total": 0.09983806799982631,
"count": 1,
"self": 0.0013638139994327503,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09847425400039356,
"count": 1,
"self": 0.09847425400039356
}
}
}
}
}
}
}