ppo-Huggy / run_logs /timers.json
jackoyoungblood's picture
Huggy
a6ce452
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4046534299850464,
"min": 1.4046534299850464,
"max": 1.4266811609268188,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70725.703125,
"min": 68794.25,
"max": 76254.3359375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 73.74776119402985,
"min": 70.73170731707317,
"max": 394.9448818897638,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49411.0,
"min": 49255.0,
"max": 50158.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999938.0,
"min": 49761.0,
"max": 1999938.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999938.0,
"min": 49761.0,
"max": 1999938.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.470362424850464,
"min": -0.00015907296619843692,
"max": 2.541415214538574,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1655.142822265625,
"min": -0.02004319429397583,
"max": 1730.843505859375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7884608942181317,
"min": 1.738795138422459,
"max": 4.0562297526527855,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2538.268799126148,
"min": 219.08818744122982,
"max": 2688.3029173612595,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7884608942181317,
"min": 1.738795138422459,
"max": 4.0562297526527855,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2538.268799126148,
"min": 219.08818744122982,
"max": 2688.3029173612595,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01895052918785546,
"min": 0.014108175187842183,
"max": 0.019938716887392932,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05685158756356638,
"min": 0.028216350375684367,
"max": 0.059816150662178796,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05718085091147158,
"min": 0.025236584121982257,
"max": 0.06446110637237629,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17154255273441474,
"min": 0.050473168243964514,
"max": 0.1859247994919618,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.050798649766663e-06,
"min": 4.050798649766663e-06,
"max": 0.0002953638765453749,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.2152395949299989e-05,
"min": 1.2152395949299989e-05,
"max": 0.0008441092686302498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10135023333333337,
"min": 0.10135023333333337,
"max": 0.19845462500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3040507000000001,
"min": 0.20784370000000005,
"max": 0.5813697500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.737664333333326e-05,
"min": 7.737664333333326e-05,
"max": 0.004922885787499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00023212992999999978,
"min": 0.00023212992999999978,
"max": 0.014070350524999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670697151",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670699341"
},
"total": 2190.627158716,
"count": 1,
"self": 0.38766623199990136,
"children": {
"run_training.setup": {
"total": 0.11843814299999167,
"count": 1,
"self": 0.11843814299999167
},
"TrainerController.start_learning": {
"total": 2190.121054341,
"count": 1,
"self": 3.8303059240183757,
"children": {
"TrainerController._reset_env": {
"total": 10.048180446999993,
"count": 1,
"self": 10.048180446999993
},
"TrainerController.advance": {
"total": 2176.130545426982,
"count": 233624,
"self": 3.9444021258564135,
"children": {
"env_step": {
"total": 1697.6161747410183,
"count": 233624,
"self": 1424.1198198050402,
"children": {
"SubprocessEnvManager._take_step": {
"total": 270.95976966997165,
"count": 233624,
"self": 13.993023913010745,
"children": {
"TorchPolicy.evaluate": {
"total": 256.9667457569609,
"count": 222960,
"self": 64.39682437389598,
"children": {
"TorchPolicy.sample_actions": {
"total": 192.56992138306492,
"count": 222960,
"self": 192.56992138306492
}
}
}
}
},
"workers": {
"total": 2.5365852660064547,
"count": 233624,
"self": 0.0,
"children": {
"worker_root": {
"total": 2182.5100271139527,
"count": 233624,
"is_parallel": true,
"self": 1011.7469399839977,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001863035999974727,
"count": 1,
"is_parallel": true,
"self": 0.00043121399994561216,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014318220000291149,
"count": 2,
"is_parallel": true,
"self": 0.0014318220000291149
}
}
},
"UnityEnvironment.step": {
"total": 0.026429124000003412,
"count": 1,
"is_parallel": true,
"self": 0.00026366400004462776,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023796299996092785,
"count": 1,
"is_parallel": true,
"self": 0.00023796299996092785
},
"communicator.exchange": {
"total": 0.02525173200001518,
"count": 1,
"is_parallel": true,
"self": 0.02525173200001518
},
"steps_from_proto": {
"total": 0.0006757649999826754,
"count": 1,
"is_parallel": true,
"self": 0.00022224399992865074,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004535210000540246,
"count": 2,
"is_parallel": true,
"self": 0.0004535210000540246
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1170.763087129955,
"count": 233623,
"is_parallel": true,
"self": 34.14116633790127,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.63780512102551,
"count": 233623,
"is_parallel": true,
"self": 75.63780512102551
},
"communicator.exchange": {
"total": 970.0169794489473,
"count": 233623,
"is_parallel": true,
"self": 970.0169794489473
},
"steps_from_proto": {
"total": 90.9671362220808,
"count": 233623,
"is_parallel": true,
"self": 37.62726018189795,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.339876040182844,
"count": 467246,
"is_parallel": true,
"self": 53.339876040182844
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 474.5699685601069,
"count": 233624,
"self": 5.6407100051010275,
"children": {
"process_trajectory": {
"total": 155.69061836700536,
"count": 233624,
"self": 155.23234489900534,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4582734680000158,
"count": 4,
"self": 0.4582734680000158
}
}
},
"_update_policy": {
"total": 313.2386401880005,
"count": 97,
"self": 259.81584861600106,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.422791571999426,
"count": 2910,
"self": 53.422791571999426
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1080001058871858e-06,
"count": 1,
"self": 1.1080001058871858e-06
},
"TrainerController._save_models": {
"total": 0.1120214349998605,
"count": 1,
"self": 0.0019008739996024815,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11012056100025802,
"count": 1,
"self": 0.11012056100025802
}
}
}
}
}
}
}