ppo-Huggy / run_logs /timers.json
Gueze's picture
Huggy
1c29559
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.415359616279602,
"min": 1.415359616279602,
"max": 1.4292004108428955,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70084.359375,
"min": 69639.96875,
"max": 77110.421875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 71.0821325648415,
"min": 70.80459770114942,
"max": 420.74789915966386,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49331.0,
"min": 48947.0,
"max": 50069.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999986.0,
"min": 49625.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999986.0,
"min": 49625.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4428482055664062,
"min": 0.16249890625476837,
"max": 2.479626417160034,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1695.336669921875,
"min": 19.17487144470215,
"max": 1695.336669921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7732749524652442,
"min": 1.9373164235030191,
"max": 4.0416962830622305,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2618.6528170108795,
"min": 228.60333797335625,
"max": 2653.244030416012,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7732749524652442,
"min": 1.9373164235030191,
"max": 4.0416962830622305,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2618.6528170108795,
"min": 228.60333797335625,
"max": 2653.244030416012,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.013663005829827551,
"min": 0.013663005829827551,
"max": 0.018452836369397117,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04098901748948265,
"min": 0.027893505278916562,
"max": 0.05300043928272847,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.055115511640906334,
"min": 0.019167904648929833,
"max": 0.061710598816474276,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.165346534922719,
"min": 0.038335809297859666,
"max": 0.18513179644942282,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.962748679116668e-06,
"min": 3.962748679116668e-06,
"max": 0.0002953626765457751,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1888246037350004e-05,
"min": 1.1888246037350004e-05,
"max": 0.0008443923185358998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10132088333333333,
"min": 0.10132088333333333,
"max": 0.1984542250000001,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30396265,
"min": 0.20777394999999999,
"max": 0.5814641,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.591207833333335e-05,
"min": 7.591207833333335e-05,
"max": 0.004922865827500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022773623500000007,
"min": 0.00022773623500000007,
"max": 0.014075058590000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670694849",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670697168"
},
"total": 2318.717126672,
"count": 1,
"self": 0.44580146800035436,
"children": {
"run_training.setup": {
"total": 0.1046893769999997,
"count": 1,
"self": 0.1046893769999997
},
"TrainerController.start_learning": {
"total": 2318.166635827,
"count": 1,
"self": 4.255637515016588,
"children": {
"TrainerController._reset_env": {
"total": 11.462589116999993,
"count": 1,
"self": 11.462589116999993
},
"TrainerController.advance": {
"total": 2302.331045658984,
"count": 232918,
"self": 4.5026448220874045,
"children": {
"env_step": {
"total": 1810.6193721209806,
"count": 232918,
"self": 1516.5234202969377,
"children": {
"SubprocessEnvManager._take_step": {
"total": 291.22467578502994,
"count": 232918,
"self": 14.91300094304961,
"children": {
"TorchPolicy.evaluate": {
"total": 276.31167484198033,
"count": 222832,
"self": 69.70332375503028,
"children": {
"TorchPolicy.sample_actions": {
"total": 206.60835108695005,
"count": 222832,
"self": 206.60835108695005
}
}
}
}
},
"workers": {
"total": 2.8712760390130825,
"count": 232918,
"self": 0.0,
"children": {
"worker_root": {
"total": 2310.0051932210135,
"count": 232918,
"is_parallel": true,
"self": 1067.1094877620212,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002065915999992285,
"count": 1,
"is_parallel": true,
"self": 0.00030548799998086906,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017604280000114159,
"count": 2,
"is_parallel": true,
"self": 0.0017604280000114159
}
}
},
"UnityEnvironment.step": {
"total": 0.02618745699999181,
"count": 1,
"is_parallel": true,
"self": 0.00031866500000887754,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018678300000374293,
"count": 1,
"is_parallel": true,
"self": 0.00018678300000374293
},
"communicator.exchange": {
"total": 0.024984684000060042,
"count": 1,
"is_parallel": true,
"self": 0.024984684000060042
},
"steps_from_proto": {
"total": 0.0006973249999191466,
"count": 1,
"is_parallel": true,
"self": 0.0002599460000283216,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00043737899989082507,
"count": 2,
"is_parallel": true,
"self": 0.00043737899989082507
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1242.8957054589923,
"count": 232917,
"is_parallel": true,
"self": 36.47666515190167,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.58974241609667,
"count": 232917,
"is_parallel": true,
"self": 76.58974241609667
},
"communicator.exchange": {
"total": 1034.6449143960153,
"count": 232917,
"is_parallel": true,
"self": 1034.6449143960153
},
"steps_from_proto": {
"total": 95.18438349497865,
"count": 232917,
"is_parallel": true,
"self": 38.961918832947504,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.22246466203114,
"count": 465834,
"is_parallel": true,
"self": 56.22246466203114
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 487.2090287159159,
"count": 232918,
"self": 6.9090823218834885,
"children": {
"process_trajectory": {
"total": 158.00425628903065,
"count": 232918,
"self": 157.52738638403105,
"children": {
"RLTrainer._checkpoint": {
"total": 0.476869904999603,
"count": 4,
"self": 0.476869904999603
}
}
},
"_update_policy": {
"total": 322.29569010500177,
"count": 97,
"self": 267.39503780700966,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.90065229799211,
"count": 2910,
"self": 54.90065229799211
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.099999260797631e-07,
"count": 1,
"self": 9.099999260797631e-07
},
"TrainerController._save_models": {
"total": 0.1173626259997036,
"count": 1,
"self": 0.0021098239994898904,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1152528020002137,
"count": 1,
"self": 0.1152528020002137
}
}
}
}
}
}
}