unit52 / run_logs /timers.json
hoanglongvn's picture
First Push
546ef7a
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.48850154876708984,
"min": 0.48850154876708984,
"max": 1.4755275249481201,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14733.20703125,
"min": 14733.20703125,
"max": 44761.6015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989972.0,
"min": 29952.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989972.0,
"min": 29952.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7176522016525269,
"min": -0.11127028614282608,
"max": 0.7176522016525269,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 209.554443359375,
"min": -26.816139221191406,
"max": 209.554443359375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0437835156917572,
"min": 0.008350442163646221,
"max": 0.3982731103897095,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 12.784786224365234,
"min": 2.313072443008423,
"max": 94.3907241821289,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07546330550740961,
"min": 0.06476313198408751,
"max": 0.07546330550740961,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0564862771037344,
"min": 0.49460572164288946,
"max": 1.089465714719457,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015976831384163927,
"min": 0.00038802330399016234,
"max": 0.01612685448748784,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22367563937829496,
"min": 0.0038802330399016235,
"max": 0.2419028173123176,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.579961759092855e-06,
"min": 7.579961759092855e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010611946462729998,
"min": 0.00010611946462729998,
"max": 0.003136647254451,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252662142857143,
"min": 0.10252662142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4353727,
"min": 1.3886848,
"max": 2.400546,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026240948071428567,
"min": 0.00026240948071428567,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036737327299999996,
"min": 0.0036737327299999996,
"max": 0.10458034509999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011238059028983116,
"min": 0.011238059028983116,
"max": 0.46219751238822937,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15733282268047333,
"min": 0.15733282268047333,
"max": 3.235382556915283,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 260.25210084033614,
"min": 252.47368421052633,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30970.0,
"min": 15984.0,
"max": 32496.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7395762583966983,
"min": -1.0000000521540642,
"max": 1.7395762583966983,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 205.2699984908104,
"min": -31.99920167028904,
"max": 205.2699984908104,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7395762583966983,
"min": -1.0000000521540642,
"max": 1.7395762583966983,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 205.2699984908104,
"min": -31.99920167028904,
"max": 205.2699984908104,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.030279496394618716,
"min": 0.030279496394618716,
"max": 9.224788644351065,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.5729805745650083,
"min": 3.5204053386405576,
"max": 147.59661830961704,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679531440",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679533824"
},
"total": 2384.234228023,
"count": 1,
"self": 0.8961950160000924,
"children": {
"run_training.setup": {
"total": 0.169306038000002,
"count": 1,
"self": 0.169306038000002
},
"TrainerController.start_learning": {
"total": 2383.168726969,
"count": 1,
"self": 1.6546354450665604,
"children": {
"TrainerController._reset_env": {
"total": 9.852543793000052,
"count": 1,
"self": 9.852543793000052
},
"TrainerController.advance": {
"total": 2371.520595146933,
"count": 63943,
"self": 1.7681283489014277,
"children": {
"env_step": {
"total": 1709.8911882030268,
"count": 63943,
"self": 1586.2680713330087,
"children": {
"SubprocessEnvManager._take_step": {
"total": 122.64285272801783,
"count": 63943,
"self": 5.612532301032104,
"children": {
"TorchPolicy.evaluate": {
"total": 117.03032042698572,
"count": 62561,
"self": 117.03032042698572
}
}
},
"workers": {
"total": 0.9802641420003511,
"count": 63943,
"self": 0.0,
"children": {
"worker_root": {
"total": 2377.2013677679856,
"count": 63943,
"is_parallel": true,
"self": 923.4828946680141,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007448882000005597,
"count": 1,
"is_parallel": true,
"self": 0.005019197999956759,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024296840000488373,
"count": 8,
"is_parallel": true,
"self": 0.0024296840000488373
}
}
},
"UnityEnvironment.step": {
"total": 0.05169796599994925,
"count": 1,
"is_parallel": true,
"self": 0.0005603649998988658,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005421710000064195,
"count": 1,
"is_parallel": true,
"self": 0.0005421710000064195
},
"communicator.exchange": {
"total": 0.04893172700002424,
"count": 1,
"is_parallel": true,
"self": 0.04893172700002424
},
"steps_from_proto": {
"total": 0.0016637030000197228,
"count": 1,
"is_parallel": true,
"self": 0.0003772320000052787,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012864710000144441,
"count": 8,
"is_parallel": true,
"self": 0.0012864710000144441
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1453.7184730999716,
"count": 63942,
"is_parallel": true,
"self": 33.96729401390394,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.660052194022455,
"count": 63942,
"is_parallel": true,
"self": 25.660052194022455
},
"communicator.exchange": {
"total": 1289.1195084210135,
"count": 63942,
"is_parallel": true,
"self": 1289.1195084210135
},
"steps_from_proto": {
"total": 104.97161847103172,
"count": 63942,
"is_parallel": true,
"self": 23.163390141994796,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.80822832903692,
"count": 511536,
"is_parallel": true,
"self": 81.80822832903692
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 659.8612785950045,
"count": 63943,
"self": 2.829841813949315,
"children": {
"process_trajectory": {
"total": 127.94745113905424,
"count": 63943,
"self": 127.4499458800542,
"children": {
"RLTrainer._checkpoint": {
"total": 0.49750525900003595,
"count": 2,
"self": 0.49750525900003595
}
}
},
"_update_policy": {
"total": 529.0839856420009,
"count": 441,
"self": 336.7843891340056,
"children": {
"TorchPPOOptimizer.update": {
"total": 192.29959650799532,
"count": 22833,
"self": 192.29959650799532
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5790001270943321e-06,
"count": 1,
"self": 1.5790001270943321e-06
},
"TrainerController._save_models": {
"total": 0.14095100500026092,
"count": 1,
"self": 0.0020867940002062824,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13886421100005464,
"count": 1,
"self": 0.13886421100005464
}
}
}
}
}
}
}