ppo-Huggy / run_logs /timers.json
SergejSchweizer's picture
Sergej waz here
7d5b4fe
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4021713733673096,
"min": 1.4021713733673096,
"max": 1.4275363683700562,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70895.1875,
"min": 68978.3515625,
"max": 78406.0390625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 89.6086956521739,
"min": 83.31871838111299,
"max": 420.30252100840335,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49464.0,
"min": 49034.0,
"max": 50018.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999997.0,
"min": 49750.0,
"max": 1999997.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999997.0,
"min": 49750.0,
"max": 1999997.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3815245628356934,
"min": 0.15552332997322083,
"max": 2.466378688812256,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1314.6015625,
"min": 18.35175323486328,
"max": 1418.180908203125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.620478633305301,
"min": 1.8791929675360857,
"max": 3.9519326337793257,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1998.504205584526,
"min": 221.74477016925812,
"max": 2239.2375236153603,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.620478633305301,
"min": 1.8791929675360857,
"max": 3.9519326337793257,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1998.504205584526,
"min": 221.74477016925812,
"max": 2239.2375236153603,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01628947103132911,
"min": 0.01262086578644812,
"max": 0.019527221940613043,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.048868413093987324,
"min": 0.02524173157289624,
"max": 0.05744564153404402,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05094595936437448,
"min": 0.02007751998802026,
"max": 0.06399131553868453,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15283787809312344,
"min": 0.04015503997604052,
"max": 0.18428176591793696,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.331098889666673e-06,
"min": 3.331098889666673e-06,
"max": 0.00029530447656517494,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.993296669000019e-06,
"min": 9.993296669000019e-06,
"max": 0.0008440446186518001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10111033333333334,
"min": 0.10111033333333334,
"max": 0.198434825,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.303331,
"min": 0.2073665,
"max": 0.5813482,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.540563333333345e-05,
"min": 6.540563333333345e-05,
"max": 0.0049218977675,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019621690000000034,
"min": 0.00019621690000000034,
"max": 0.01406927518,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670620320",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670622530"
},
"total": 2210.1163207359996,
"count": 1,
"self": 0.38901782399989315,
"children": {
"run_training.setup": {
"total": 0.10447050799996305,
"count": 1,
"self": 0.10447050799996305
},
"TrainerController.start_learning": {
"total": 2209.622832404,
"count": 1,
"self": 3.9501711980419714,
"children": {
"TrainerController._reset_env": {
"total": 10.387425306999944,
"count": 1,
"self": 10.387425306999944
},
"TrainerController.advance": {
"total": 2195.156565591958,
"count": 232029,
"self": 4.255178841990073,
"children": {
"env_step": {
"total": 1716.735073668934,
"count": 232029,
"self": 1436.9711215199,
"children": {
"SubprocessEnvManager._take_step": {
"total": 277.02533380305124,
"count": 232029,
"self": 14.179262878156692,
"children": {
"TorchPolicy.evaluate": {
"total": 262.84607092489455,
"count": 222982,
"self": 65.30292456399843,
"children": {
"TorchPolicy.sample_actions": {
"total": 197.54314636089612,
"count": 222982,
"self": 197.54314636089612
}
}
}
}
},
"workers": {
"total": 2.7386183459830136,
"count": 232029,
"self": 0.0,
"children": {
"worker_root": {
"total": 2201.8874896959665,
"count": 232029,
"is_parallel": true,
"self": 1023.4634198459312,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006254420999994181,
"count": 1,
"is_parallel": true,
"self": 0.0003456950000781944,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005908725999915987,
"count": 2,
"is_parallel": true,
"self": 0.005908725999915987
}
}
},
"UnityEnvironment.step": {
"total": 0.02736112700006288,
"count": 1,
"is_parallel": true,
"self": 0.00026230599996779347,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020592100008798297,
"count": 1,
"is_parallel": true,
"self": 0.00020592100008798297
},
"communicator.exchange": {
"total": 0.026155173999995895,
"count": 1,
"is_parallel": true,
"self": 0.026155173999995895
},
"steps_from_proto": {
"total": 0.0007377260000112074,
"count": 1,
"is_parallel": true,
"self": 0.0002637070000446329,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004740189999665745,
"count": 2,
"is_parallel": true,
"self": 0.0004740189999665745
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1178.4240698500353,
"count": 232028,
"is_parallel": true,
"self": 34.05872600701696,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 73.89560939702562,
"count": 232028,
"is_parallel": true,
"self": 73.89560939702562
},
"communicator.exchange": {
"total": 978.9571939079772,
"count": 232028,
"is_parallel": true,
"self": 978.9571939079772
},
"steps_from_proto": {
"total": 91.51254053801551,
"count": 232028,
"is_parallel": true,
"self": 37.658069040990426,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.85447149702509,
"count": 464056,
"is_parallel": true,
"self": 53.85447149702509
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 474.1663130810339,
"count": 232029,
"self": 6.009421061049693,
"children": {
"process_trajectory": {
"total": 144.90100215098448,
"count": 232029,
"self": 144.42694658198468,
"children": {
"RLTrainer._checkpoint": {
"total": 0.47405556899980184,
"count": 4,
"self": 0.47405556899980184
}
}
},
"_update_policy": {
"total": 323.2558898689997,
"count": 97,
"self": 269.0223496330027,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.233540235997,
"count": 2910,
"self": 54.233540235997
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.720000212430023e-07,
"count": 1,
"self": 7.720000212430023e-07
},
"TrainerController._save_models": {
"total": 0.1286695349999718,
"count": 1,
"self": 0.001972825999928318,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12669670900004348,
"count": 1,
"self": 0.12669670900004348
}
}
}
}
}
}
}