ppo-Huggy / run_logs /timers.json
kunalr63's picture
Huggy
ebceac6
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.407120943069458,
"min": 1.407120943069458,
"max": 1.425885796546936,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69825.5625,
"min": 67969.3515625,
"max": 77593.3984375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 96.98238747553816,
"min": 89.14620938628158,
"max": 400.864,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49558.0,
"min": 48907.0,
"max": 50204.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999957.0,
"min": 49637.0,
"max": 1999957.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999957.0,
"min": 49637.0,
"max": 1999957.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4539928436279297,
"min": -0.02615596167743206,
"max": 2.4547839164733887,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1253.9903564453125,
"min": -3.2433393001556396,
"max": 1358.768798828125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8176695849097637,
"min": 1.7548383224395014,
"max": 3.8753163588371917,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1950.8291578888893,
"min": 217.59995198249817,
"max": 2143.978658914566,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8176695849097637,
"min": 1.7548383224395014,
"max": 3.8753163588371917,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1950.8291578888893,
"min": 217.59995198249817,
"max": 2143.978658914566,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01701510447860023,
"min": 0.014531158714968722,
"max": 0.01989825697770963,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05104531343580068,
"min": 0.029062317429937444,
"max": 0.05897078655931788,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06090267706248496,
"min": 0.02275074003264308,
"max": 0.06107522981862227,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18270803118745488,
"min": 0.04550148006528616,
"max": 0.18270803118745488,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3244488918833426e-06,
"min": 3.3244488918833426e-06,
"max": 0.0002953017015660999,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.973346675650027e-06,
"min": 9.973346675650027e-06,
"max": 0.0008439873186708998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10110811666666668,
"min": 0.10110811666666668,
"max": 0.1984339,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30332435,
"min": 0.20734210000000003,
"max": 0.5813291,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.529502166666684e-05,
"min": 6.529502166666684e-05,
"max": 0.00492185161,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001958850650000005,
"min": 0.0001958850650000005,
"max": 0.014068322090000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678103275",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678105740"
},
"total": 2464.9769691829997,
"count": 1,
"self": 0.4468960909998714,
"children": {
"run_training.setup": {
"total": 0.1062461629999234,
"count": 1,
"self": 0.1062461629999234
},
"TrainerController.start_learning": {
"total": 2464.423826929,
"count": 1,
"self": 4.441555363046518,
"children": {
"TrainerController._reset_env": {
"total": 10.134434693000003,
"count": 1,
"self": 10.134434693000003
},
"TrainerController.advance": {
"total": 2449.737244558954,
"count": 231695,
"self": 4.57065297798863,
"children": {
"env_step": {
"total": 1907.9283625969294,
"count": 231695,
"self": 1590.138868173967,
"children": {
"SubprocessEnvManager._take_step": {
"total": 314.9301889940407,
"count": 231695,
"self": 16.753770968903837,
"children": {
"TorchPolicy.evaluate": {
"total": 298.17641802513685,
"count": 222978,
"self": 74.91379138714387,
"children": {
"TorchPolicy.sample_actions": {
"total": 223.26262663799298,
"count": 222978,
"self": 223.26262663799298
}
}
}
}
},
"workers": {
"total": 2.8593054289217434,
"count": 231695,
"self": 0.0,
"children": {
"worker_root": {
"total": 2456.074277887987,
"count": 231695,
"is_parallel": true,
"self": 1167.039134227069,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009055419998276193,
"count": 1,
"is_parallel": true,
"self": 0.0003450449999036209,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005604969999239984,
"count": 2,
"is_parallel": true,
"self": 0.0005604969999239984
}
}
},
"UnityEnvironment.step": {
"total": 0.030514734000007593,
"count": 1,
"is_parallel": true,
"self": 0.0003039780003746273,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020430699987628032,
"count": 1,
"is_parallel": true,
"self": 0.00020430699987628032
},
"communicator.exchange": {
"total": 0.028518968999833305,
"count": 1,
"is_parallel": true,
"self": 0.028518968999833305
},
"steps_from_proto": {
"total": 0.00148747999992338,
"count": 1,
"is_parallel": true,
"self": 0.0002790649998587469,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012084150000646332,
"count": 2,
"is_parallel": true,
"self": 0.0012084150000646332
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1289.035143660918,
"count": 231694,
"is_parallel": true,
"self": 38.77071092598612,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.05238640591892,
"count": 231694,
"is_parallel": true,
"self": 83.05238640591892
},
"communicator.exchange": {
"total": 1074.5152245019767,
"count": 231694,
"is_parallel": true,
"self": 1074.5152245019767
},
"steps_from_proto": {
"total": 92.69682182703627,
"count": 231694,
"is_parallel": true,
"self": 39.69256609415493,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.00425573288135,
"count": 463388,
"is_parallel": true,
"self": 53.00425573288135
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 537.2382289840361,
"count": 231695,
"self": 6.907044160983105,
"children": {
"process_trajectory": {
"total": 167.8895153010501,
"count": 231695,
"self": 166.57349004104958,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3160252600005151,
"count": 10,
"self": 1.3160252600005151
}
}
},
"_update_policy": {
"total": 362.44166952200294,
"count": 97,
"self": 304.3569444739983,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.08472504800466,
"count": 2910,
"self": 58.08472504800466
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.183999756904086e-06,
"count": 1,
"self": 1.183999756904086e-06
},
"TrainerController._save_models": {
"total": 0.11059112999964782,
"count": 1,
"self": 0.0019301959996482765,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10866093399999954,
"count": 1,
"self": 0.10866093399999954
}
}
}
}
}
}
}