ppo-PyramidsRND / run_logs /timers.json
MHaurel's picture
First Push
fa2ff70
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5615555644035339,
"min": 0.5575332641601562,
"max": 1.4418917894363403,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16720.87890625,
"min": 16720.87890625,
"max": 43741.23046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989954.0,
"min": 29934.0,
"max": 989954.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989954.0,
"min": 29934.0,
"max": 989954.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4908810555934906,
"min": -0.2482738196849823,
"max": 0.4908810555934906,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 134.01052856445312,
"min": -58.84089660644531,
"max": 134.01052856445312,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.18048235774040222,
"min": -0.06717043370008469,
"max": 0.2645304203033447,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 49.27168273925781,
"min": -17.8001651763916,
"max": 63.487300872802734,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07065648124781936,
"min": 0.06458748826987269,
"max": 0.07310087637334864,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.989190737469471,
"min": 0.5548138253556618,
"max": 1.0711004963554396,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.024175926813595216,
"min": 0.0014057098657258827,
"max": 0.024175926813595216,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.33846297539033304,
"min": 0.01799983360799972,
"max": 0.33846297539033304,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.600040323828569e-06,
"min": 7.600040323828569e-06,
"max": 0.000294842026719325,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010640056453359997,
"min": 0.00010640056453359997,
"max": 0.0036088254970581997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025333142857143,
"min": 0.1025333142857143,
"max": 0.19828067500000002,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4354664000000001,
"min": 1.4354664000000001,
"max": 2.5694593,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002630780971428571,
"min": 0.0002630780971428571,
"max": 0.0098282394325,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003683093359999999,
"min": 0.003683093359999999,
"max": 0.12030388582000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009422557428479195,
"min": 0.008676164783537388,
"max": 0.4038880467414856,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13191580772399902,
"min": 0.12146630138158798,
"max": 3.2311043739318848,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 399.6707317073171,
"min": 399.6707317073171,
"max": 998.9375,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32773.0,
"min": 16477.0,
"max": 32773.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5258394833884121,
"min": -0.937368803890422,
"max": 1.5258394833884121,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 123.59299815446138,
"min": -29.995801724493504,
"max": 123.59299815446138,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5258394833884121,
"min": -0.937368803890422,
"max": 1.5258394833884121,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 123.59299815446138,
"min": -29.995801724493504,
"max": 123.59299815446138,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03880632835667419,
"min": 0.03880632835667419,
"max": 7.4559591123286415,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1433125968906097,
"min": 2.59506837913068,
"max": 126.7513049095869,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685541119",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685544324"
},
"total": 3205.251565461,
"count": 1,
"self": 0.5919918469999175,
"children": {
"run_training.setup": {
"total": 0.09185065999997732,
"count": 1,
"self": 0.09185065999997732
},
"TrainerController.start_learning": {
"total": 3204.567722954,
"count": 1,
"self": 2.373566274989116,
"children": {
"TrainerController._reset_env": {
"total": 1.4379054199999928,
"count": 1,
"self": 1.4379054199999928
},
"TrainerController.advance": {
"total": 3200.6391955030113,
"count": 63591,
"self": 2.454261158065947,
"children": {
"env_step": {
"total": 2057.724929087961,
"count": 63591,
"self": 1916.861226287026,
"children": {
"SubprocessEnvManager._take_step": {
"total": 139.33535961700238,
"count": 63591,
"self": 7.042721024984644,
"children": {
"TorchPolicy.evaluate": {
"total": 132.29263859201774,
"count": 62559,
"self": 132.29263859201774
}
}
},
"workers": {
"total": 1.5283431839324066,
"count": 63591,
"self": 0.0,
"children": {
"worker_root": {
"total": 3197.830950685959,
"count": 63591,
"is_parallel": true,
"self": 1459.7216970189334,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007620687999974507,
"count": 1,
"is_parallel": true,
"self": 0.005589987999996993,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020306999999775144,
"count": 8,
"is_parallel": true,
"self": 0.0020306999999775144
}
}
},
"UnityEnvironment.step": {
"total": 0.0618143860000373,
"count": 1,
"is_parallel": true,
"self": 0.0006967660001464537,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038816099993255193,
"count": 1,
"is_parallel": true,
"self": 0.00038816099993255193
},
"communicator.exchange": {
"total": 0.05852409900001021,
"count": 1,
"is_parallel": true,
"self": 0.05852409900001021
},
"steps_from_proto": {
"total": 0.0022053599999480866,
"count": 1,
"is_parallel": true,
"self": 0.00046366000003672525,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017416999999113614,
"count": 8,
"is_parallel": true,
"self": 0.0017416999999113614
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1738.1092536670258,
"count": 63590,
"is_parallel": true,
"self": 48.37754291311194,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.71554404390929,
"count": 63590,
"is_parallel": true,
"self": 26.71554404390929
},
"communicator.exchange": {
"total": 1525.1164294810135,
"count": 63590,
"is_parallel": true,
"self": 1525.1164294810135
},
"steps_from_proto": {
"total": 137.89973722899106,
"count": 63590,
"is_parallel": true,
"self": 29.46247766308545,
"children": {
"_process_rank_one_or_two_observation": {
"total": 108.4372595659056,
"count": 508720,
"is_parallel": true,
"self": 108.4372595659056
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1140.4600052569845,
"count": 63591,
"self": 4.885910436054019,
"children": {
"process_trajectory": {
"total": 146.3367421679302,
"count": 63591,
"self": 145.9549857109307,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3817564569994829,
"count": 2,
"self": 0.3817564569994829
}
}
},
"_update_policy": {
"total": 989.2373526530001,
"count": 456,
"self": 428.6808045020081,
"children": {
"TorchPPOOptimizer.update": {
"total": 560.5565481509921,
"count": 22797,
"self": 560.5565481509921
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4060001376492437e-06,
"count": 1,
"self": 1.4060001376492437e-06
},
"TrainerController._save_models": {
"total": 0.11705434999976205,
"count": 1,
"self": 0.0015564869995614572,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1154978630002006,
"count": 1,
"self": 0.1154978630002006
}
}
}
}
}
}
}