Pyramids_V0 / run_logs /timers.json
Bill010602's picture
1st_commit
2236aa2
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.34473156929016113,
"min": 0.34473156929016113,
"max": 1.3521031141281128,
"count": 51
},
"Pyramids.Policy.Entropy.sum": {
"value": 10314.3681640625,
"min": 10314.3681640625,
"max": 41017.3984375,
"count": 51
},
"Pyramids.Step.mean": {
"value": 1529955.0,
"min": 29952.0,
"max": 1529955.0,
"count": 51
},
"Pyramids.Step.sum": {
"value": 1529955.0,
"min": 29952.0,
"max": 1529955.0,
"count": 51
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6633832454681396,
"min": -0.1136084496974945,
"max": 0.746944010257721,
"count": 51
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 185.747314453125,
"min": -27.379636764526367,
"max": 216.61376953125,
"count": 51
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.003732958808541298,
"min": -0.03096938692033291,
"max": 0.7258607149124146,
"count": 51
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.0452284812927246,
"min": -8.516581535339355,
"max": 172.02899169921875,
"count": 51
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06778064754554163,
"min": 0.06472327878111087,
"max": 0.07380177942292142,
"count": 51
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9489290656375827,
"min": 0.5092565117175717,
"max": 1.0884144141957364,
"count": 51
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011963429751610955,
"min": 0.0006056632106170908,
"max": 0.016267943262256758,
"count": 51
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16748801652255338,
"min": 0.008479284948639271,
"max": 0.22775120567159463,
"count": 51
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00014849881478612616,
"min": 0.00014849881478612616,
"max": 0.00029838354339596195,
"count": 51
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0020789834070057663,
"min": 0.0020789834070057663,
"max": 0.003937268187577333,
"count": 51
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.14949958809523808,
"min": 0.14949958809523808,
"max": 0.19946118095238097,
"count": 51
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.092994233333333,
"min": 1.3962282666666668,
"max": 2.812422666666667,
"count": 51
},
"Pyramids.Policy.Beta.mean": {
"value": 0.004955008850714286,
"min": 0.004955008850714286,
"max": 0.009946171977142856,
"count": 51
},
"Pyramids.Policy.Beta.sum": {
"value": 0.06937012391,
"min": 0.06937012391,
"max": 0.13126102439999998,
"count": 51
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005452983547002077,
"min": 0.005452983547002077,
"max": 0.5388436317443848,
"count": 51
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.07634177058935165,
"min": 0.07634177058935165,
"max": 3.7719054222106934,
"count": 51
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 319.0860215053763,
"min": 263.77477477477476,
"max": 999.0,
"count": 51
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29675.0,
"min": 15984.0,
"max": 33237.0,
"count": 51
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5733548271399673,
"min": -1.0000000521540642,
"max": 1.712514511285684,
"count": 51
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 146.32199892401695,
"min": -29.90400167554617,
"max": 200.36419782042503,
"count": 51
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5733548271399673,
"min": -1.0000000521540642,
"max": 1.712514511285684,
"count": 51
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 146.32199892401695,
"min": -29.90400167554617,
"max": 200.36419782042503,
"count": 51
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.018114594037122574,
"min": 0.01612276891833412,
"max": 10.815195646137,
"count": 51
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.6846572454523994,
"min": 1.6767679675067484,
"max": 173.043130338192,
"count": 51
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 51
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 51
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673773724",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673776997"
},
"total": 3272.714554587,
"count": 1,
"self": 0.3260815249996085,
"children": {
"run_training.setup": {
"total": 0.10747377200004848,
"count": 1,
"self": 0.10747377200004848
},
"TrainerController.start_learning": {
"total": 3272.28099929,
"count": 1,
"self": 1.7478393000883443,
"children": {
"TrainerController._reset_env": {
"total": 6.456248368999923,
"count": 1,
"self": 6.456248368999923
},
"TrainerController.advance": {
"total": 3263.923110835912,
"count": 99116,
"self": 1.8734890727714628,
"children": {
"env_step": {
"total": 2243.3135783931,
"count": 99116,
"self": 2090.317234769119,
"children": {
"SubprocessEnvManager._take_step": {
"total": 151.90298767602303,
"count": 99116,
"self": 6.355907059004494,
"children": {
"TorchPolicy.evaluate": {
"total": 145.54708061701854,
"count": 96305,
"self": 49.81885900902057,
"children": {
"TorchPolicy.sample_actions": {
"total": 95.72822160799797,
"count": 96305,
"self": 95.72822160799797
}
}
}
}
},
"workers": {
"total": 1.0933559479581163,
"count": 99115,
"self": 0.0,
"children": {
"worker_root": {
"total": 3265.773710493011,
"count": 99115,
"is_parallel": true,
"self": 1321.1656346529653,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018128369999885763,
"count": 1,
"is_parallel": true,
"self": 0.0006593179998617416,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011535190001268347,
"count": 8,
"is_parallel": true,
"self": 0.0011535190001268347
}
}
},
"UnityEnvironment.step": {
"total": 0.046005367000134356,
"count": 1,
"is_parallel": true,
"self": 0.0005876929999431013,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045106200013833586,
"count": 1,
"is_parallel": true,
"self": 0.00045106200013833586
},
"communicator.exchange": {
"total": 0.04313725900010468,
"count": 1,
"is_parallel": true,
"self": 0.04313725900010468
},
"steps_from_proto": {
"total": 0.0018293529999482416,
"count": 1,
"is_parallel": true,
"self": 0.00048235599979307153,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00134699700015517,
"count": 8,
"is_parallel": true,
"self": 0.00134699700015517
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1944.6080758400458,
"count": 99114,
"is_parallel": true,
"self": 43.14153665485469,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 34.81640416908317,
"count": 99114,
"is_parallel": true,
"self": 34.81640416908317
},
"communicator.exchange": {
"total": 1711.7962709400717,
"count": 99114,
"is_parallel": true,
"self": 1711.7962709400717
},
"steps_from_proto": {
"total": 154.8538640760362,
"count": 99114,
"is_parallel": true,
"self": 33.38475969502156,
"children": {
"_process_rank_one_or_two_observation": {
"total": 121.46910438101463,
"count": 792912,
"is_parallel": true,
"self": 121.46910438101463
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1018.7360433700403,
"count": 99115,
"self": 3.4019072830467394,
"children": {
"process_trajectory": {
"total": 220.4935685599869,
"count": 99115,
"self": 220.20375296698762,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2898155929992754,
"count": 3,
"self": 0.2898155929992754
}
}
},
"_update_policy": {
"total": 794.8405675270067,
"count": 704,
"self": 305.5789057240611,
"children": {
"TorchPPOOptimizer.update": {
"total": 489.2616618029456,
"count": 35181,
"self": 489.2616618029456
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4349998309626244e-06,
"count": 1,
"self": 1.4349998309626244e-06
},
"TrainerController._save_models": {
"total": 0.15379934999964462,
"count": 1,
"self": 0.003727043000253616,
"children": {
"RLTrainer._checkpoint": {
"total": 0.150072306999391,
"count": 1,
"self": 0.150072306999391
}
}
}
}
}
}
}