ppo-Huggy / run_logs /timers.json
Han
Huggy
a394deb
raw
history blame
17.5 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4396737813949585,
"min": 1.41973078250885,
"max": 1.4459906816482544,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71030.625,
"min": 69600.65625,
"max": 78078.09375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.65412186379929,
"min": 86.99647887323944,
"max": 415.75206611570246,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49469.0,
"min": 48934.0,
"max": 50306.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999952.0,
"min": 49677.0,
"max": 1999952.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999952.0,
"min": 49677.0,
"max": 1999952.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.2949438095092773,
"min": 0.09070170670747757,
"max": 2.3582701683044434,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1280.57861328125,
"min": 10.884204864501953,
"max": 1311.654541015625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.812483593981753,
"min": 1.7742832136650881,
"max": 3.9788425658640416,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2127.3658454418182,
"min": 212.91398563981056,
"max": 2185.128578901291,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.812483593981753,
"min": 1.7742832136650881,
"max": 3.9788425658640416,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2127.3658454418182,
"min": 212.91398563981056,
"max": 2185.128578901291,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014773882242217143,
"min": 0.013454986823813265,
"max": 0.019770293992041842,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.044321646726651426,
"min": 0.028541916280907267,
"max": 0.05584947345584321,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05540079697966576,
"min": 0.018947553634643552,
"max": 0.05657549773653349,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16620239093899727,
"min": 0.037895107269287104,
"max": 0.16758867030342423,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4243488585833295e-06,
"min": 3.4243488585833295e-06,
"max": 0.00029527320157560005,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0273046575749989e-05,
"min": 1.0273046575749989e-05,
"max": 0.0008438227687257499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10114141666666666,
"min": 0.10114141666666666,
"max": 0.1984244,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30342425,
"min": 0.20743290000000003,
"max": 0.5812742500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.69566916666666e-05,
"min": 6.69566916666666e-05,
"max": 0.004921377560000002,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002008700749999998,
"min": 0.0002008700749999998,
"max": 0.014065585075,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688044037",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688046349"
},
"total": 2311.872183133,
"count": 1,
"self": 0.43096573499951774,
"children": {
"run_training.setup": {
"total": 0.04018799100003889,
"count": 1,
"self": 0.04018799100003889
},
"TrainerController.start_learning": {
"total": 2311.4010294070004,
"count": 1,
"self": 4.190115898898057,
"children": {
"TrainerController._reset_env": {
"total": 3.8425629890000437,
"count": 1,
"self": 3.8425629890000437
},
"TrainerController.advance": {
"total": 2303.2890572721026,
"count": 231675,
"self": 4.199044843032425,
"children": {
"env_step": {
"total": 1809.016339519998,
"count": 231675,
"self": 1514.9949158609927,
"children": {
"SubprocessEnvManager._take_step": {
"total": 291.39899918398567,
"count": 231675,
"self": 16.261049007007728,
"children": {
"TorchPolicy.evaluate": {
"total": 275.13795017697794,
"count": 222844,
"self": 275.13795017697794
}
}
},
"workers": {
"total": 2.6224244750195567,
"count": 231675,
"self": 0.0,
"children": {
"worker_root": {
"total": 2304.189483052898,
"count": 231675,
"is_parallel": true,
"self": 1062.0956159908446,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007451420000279541,
"count": 1,
"is_parallel": true,
"self": 0.00022424600001613726,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005208960000118168,
"count": 2,
"is_parallel": true,
"self": 0.0005208960000118168
}
}
},
"UnityEnvironment.step": {
"total": 0.029080112999963603,
"count": 1,
"is_parallel": true,
"self": 0.00031441699991319183,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002476170000136335,
"count": 1,
"is_parallel": true,
"self": 0.0002476170000136335
},
"communicator.exchange": {
"total": 0.027789966000000277,
"count": 1,
"is_parallel": true,
"self": 0.027789966000000277
},
"steps_from_proto": {
"total": 0.000728113000036501,
"count": 1,
"is_parallel": true,
"self": 0.00020781199998509692,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005203010000514041,
"count": 2,
"is_parallel": true,
"self": 0.0005203010000514041
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1242.0938670620535,
"count": 231674,
"is_parallel": true,
"self": 37.34256720321309,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.27660544099524,
"count": 231674,
"is_parallel": true,
"self": 77.27660544099524
},
"communicator.exchange": {
"total": 1035.814791280899,
"count": 231674,
"is_parallel": true,
"self": 1035.814791280899
},
"steps_from_proto": {
"total": 91.6599031369459,
"count": 231674,
"is_parallel": true,
"self": 32.685745067905145,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.974158069040755,
"count": 463348,
"is_parallel": true,
"self": 58.974158069040755
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 490.07367290907234,
"count": 231675,
"self": 5.991424122017634,
"children": {
"process_trajectory": {
"total": 128.83472958405343,
"count": 231675,
"self": 127.95419924505325,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8805303390001882,
"count": 10,
"self": 0.8805303390001882
}
}
},
"_update_policy": {
"total": 355.2475192030013,
"count": 97,
"self": 305.49495551501195,
"children": {
"TorchPPOOptimizer.update": {
"total": 49.75256368798932,
"count": 2910,
"self": 49.75256368798932
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3359999684325885e-06,
"count": 1,
"self": 1.3359999684325885e-06
},
"TrainerController._save_models": {
"total": 0.0792919109999275,
"count": 1,
"self": 0.0011804490000031365,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07811146199992436,
"count": 1,
"self": 0.07811146199992436
}
}
}
}
}
}
}