ppo-Pyramids / run_logs /timers.json
albertcalin's picture
First Push
4a27f20
raw
history blame
18.3 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.9263202548027039,
"min": 0.9263202548027039,
"max": 1.5788636207580566,
"count": 10
},
"Pyramids.Policy.Entropy.sum": {
"value": 9129.8125,
"min": 7983.814453125,
"max": 16167.5634765625,
"count": 10
},
"Pyramids.Step.mean": {
"value": 99947.0,
"min": 9984.0,
"max": 99947.0,
"count": 10
},
"Pyramids.Step.sum": {
"value": 99947.0,
"min": 9984.0,
"max": 99947.0,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.09678422659635544,
"min": -0.1129414364695549,
"max": 0.10948534309864044,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -7.839522361755371,
"min": -8.809432029724121,
"max": 8.539856910705566,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.1716296523809433,
"min": 0.1716296523809433,
"max": 0.7205013036727905,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 13.902002334594727,
"min": 13.902002334594727,
"max": 58.36060333251953,
"count": 10
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06877445011279275,
"min": 0.06639022503302923,
"max": 0.07694783635411617,
"count": 10
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.20632335033837823,
"min": 0.13781033674871881,
"max": 0.2761155718596612,
"count": 10
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0002720925150039968,
"min": 0.00023942870047159712,
"max": 0.02972565038362518,
"count": 10
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.0008162775450119903,
"min": 0.0007182861014147913,
"max": 0.05945130076725036,
"count": 10
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.733509422166667e-05,
"min": 1.733509422166667e-05,
"max": 0.00028156800614399997,
"count": 10
},
"Pyramids.Policy.LearningRate.sum": {
"value": 5.2005282665e-05,
"min": 5.2005282665e-05,
"max": 0.0006822720725759999,
"count": 10
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10577833333333331,
"min": 0.10577833333333331,
"max": 0.19385600000000003,
"count": 10
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.3173349999999999,
"min": 0.3173349999999999,
"max": 0.583252,
"count": 10
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0005872555000000001,
"min": 0.0005872555000000001,
"max": 0.0093862144,
"count": 10
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0017617665000000003,
"min": 0.0017617665000000003,
"max": 0.0227496576,
"count": 10
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.11177968978881836,
"min": 0.11177968978881836,
"max": 1.0757884979248047,
"count": 10
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.3353390693664551,
"min": 0.3353390693664551,
"max": 2.1515769958496094,
"count": 10
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 989.6875,
"max": 999.0,
"count": 6
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 15984.0,
"min": 15835.0,
"max": 15984.0,
"count": 6
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.999987552408129,
"min": -1.0000000521540642,
"max": -0.8656250522471964,
"count": 6
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -15.999800838530064,
"min": -16.000000834465027,
"max": -13.850000835955143,
"count": 6
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.999987552408129,
"min": -1.0000000521540642,
"max": -0.8656250522471964,
"count": 6
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -15.999800838530064,
"min": -16.000000834465027,
"max": -13.850000835955143,
"count": 6
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.1870959945954382,
"min": 1.1870959945954382,
"max": 13.083724451251328,
"count": 6
},
"Pyramids.Policy.RndReward.sum": {
"value": 18.993535913527012,
"min": 18.993535913527012,
"max": 209.33959122002125,
"count": 6
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681566591",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681566809"
},
"total": 218.27611564200015,
"count": 1,
"self": 0.4766015579998566,
"children": {
"run_training.setup": {
"total": 0.11251989700031118,
"count": 1,
"self": 0.11251989700031118
},
"TrainerController.start_learning": {
"total": 217.68699418699998,
"count": 1,
"self": 0.18544919999021658,
"children": {
"TrainerController._reset_env": {
"total": 5.237206756999967,
"count": 1,
"self": 5.237206756999967
},
"TrainerController.advance": {
"total": 212.1478460660096,
"count": 6260,
"self": 0.193791109020367,
"children": {
"env_step": {
"total": 146.0962015,
"count": 6260,
"self": 132.2145495740092,
"children": {
"SubprocessEnvManager._take_step": {
"total": 13.76854960498622,
"count": 6260,
"self": 0.5599510749580077,
"children": {
"TorchPolicy.evaluate": {
"total": 13.208598530028212,
"count": 6256,
"self": 13.208598530028212
}
}
},
"workers": {
"total": 0.11310232100458961,
"count": 6260,
"self": 0.0,
"children": {
"worker_root": {
"total": 216.9162498799992,
"count": 6260,
"is_parallel": true,
"self": 97.84487957697593,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0027710670001397375,
"count": 1,
"is_parallel": true,
"self": 0.0008494859994243598,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019215810007153777,
"count": 8,
"is_parallel": true,
"self": 0.0019215810007153777
}
}
},
"UnityEnvironment.step": {
"total": 0.09633604900000137,
"count": 1,
"is_parallel": true,
"self": 0.0005656070002260094,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000537385999905382,
"count": 1,
"is_parallel": true,
"self": 0.000537385999905382
},
"communicator.exchange": {
"total": 0.09301232899997558,
"count": 1,
"is_parallel": true,
"self": 0.09301232899997558
},
"steps_from_proto": {
"total": 0.0022207269998943957,
"count": 1,
"is_parallel": true,
"self": 0.00044338599991533556,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017773409999790601,
"count": 8,
"is_parallel": true,
"self": 0.0017773409999790601
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 119.07137030302329,
"count": 6259,
"is_parallel": true,
"self": 3.633949107038916,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.5911486249901827,
"count": 6259,
"is_parallel": true,
"self": 2.5911486249901827
},
"communicator.exchange": {
"total": 102.23944599000242,
"count": 6259,
"is_parallel": true,
"self": 102.23944599000242
},
"steps_from_proto": {
"total": 10.606826580991765,
"count": 6259,
"is_parallel": true,
"self": 2.3478326460481185,
"children": {
"_process_rank_one_or_two_observation": {
"total": 8.258993934943646,
"count": 50072,
"is_parallel": true,
"self": 8.258993934943646
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 65.85785345698923,
"count": 6260,
"self": 0.24294909599575476,
"children": {
"process_trajectory": {
"total": 11.011887622992617,
"count": 6260,
"self": 11.011887622992617
},
"_update_policy": {
"total": 54.60301673800086,
"count": 28,
"self": 34.391741211012686,
"children": {
"TorchPPOOptimizer.update": {
"total": 20.211275526988175,
"count": 2286,
"self": 20.211275526988175
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0740000107034575e-06,
"count": 1,
"self": 1.0740000107034575e-06
},
"TrainerController._save_models": {
"total": 0.11649109000018143,
"count": 1,
"self": 0.00157085000000734,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11492024000017409,
"count": 1,
"self": 0.11492024000017409
}
}
}
}
}
}
}