poca-SoccerTwos / run_logs /timers.json
davidkh's picture
50M steps, ELO 1597
e9d1f99 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4232853651046753,
"min": 1.1914889812469482,
"max": 3.29569149017334,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 27372.623046875,
"min": 16558.00390625,
"max": 128081.3125,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 65.72972972972973,
"min": 40.0,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19456.0,
"min": 11012.0,
"max": 29092.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1597.079608136376,
"min": 1193.4632533197162,
"max": 1698.4949229540778,
"count": 4831
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 236367.78200418365,
"min": 2388.5899078712573,
"max": 398345.1216989022,
"count": 4831
},
"SoccerTwos.Step.mean": {
"value": 49999984.0,
"min": 9562.0,
"max": 49999984.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999984.0,
"min": 9562.0,
"max": 49999984.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.01089271530508995,
"min": -0.13643035292625427,
"max": 0.16642113029956818,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.612121820449829,
"min": -22.78386878967285,
"max": 25.88186264038086,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.012925611808896065,
"min": -0.13345466554164886,
"max": 0.16679438948631287,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.9129905700683594,
"min": -22.337013244628906,
"max": 25.790990829467773,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.11052162260622592,
"min": -0.5714285714285714,
"max": 0.45223225220557184,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -16.357200145721436,
"min": -63.91780000925064,
"max": 60.24499982595444,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.11052162260622592,
"min": -0.5714285714285714,
"max": 0.45223225220557184,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -16.357200145721436,
"min": -63.91780000925064,
"max": 60.24499982595444,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017073513596551494,
"min": 0.009311351187837621,
"max": 0.02547406100978454,
"count": 2417
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017073513596551494,
"min": 0.009311351187837621,
"max": 0.02547406100978454,
"count": 2417
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09446701159079869,
"min": 9.791846006616348e-08,
"max": 0.1306864582002163,
"count": 2417
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09446701159079869,
"min": 9.791846006616348e-08,
"max": 0.1306864582002163,
"count": 2417
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09576245471835136,
"min": 1.0148629764709464e-07,
"max": 0.13350567693511645,
"count": 2417
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09576245471835136,
"min": 1.0148629764709464e-07,
"max": 0.13350567693511645,
"count": 2417
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2417
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2417
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2417
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2417
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2417
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2417
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1722545702",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/davidk/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1722591769"
},
"total": 46067.02935311,
"count": 1,
"self": 0.21810075700341258,
"children": {
"run_training.setup": {
"total": 0.04552631600017776,
"count": 1,
"self": 0.04552631600017776
},
"TrainerController.start_learning": {
"total": 46066.765726037,
"count": 1,
"self": 40.79071860276599,
"children": {
"TrainerController._reset_env": {
"total": 6.423286502989868,
"count": 250,
"self": 6.423286502989868
},
"TrainerController.advance": {
"total": 46019.41685997624,
"count": 3424961,
"self": 38.2772818822923,
"children": {
"env_step": {
"total": 34597.641284861,
"count": 3424961,
"self": 26297.7755586929,
"children": {
"SubprocessEnvManager._take_step": {
"total": 8275.6733871308,
"count": 3424961,
"self": 282.38117238833365,
"children": {
"TorchPolicy.evaluate": {
"total": 7993.292214742467,
"count": 6291176,
"self": 7993.292214742467
}
}
},
"workers": {
"total": 24.192339037295824,
"count": 3424961,
"self": 0.0,
"children": {
"worker_root": {
"total": 45993.189696476395,
"count": 3424961,
"is_parallel": true,
"self": 24417.443031182593,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023881330016592983,
"count": 2,
"is_parallel": true,
"self": 0.0005777200021839235,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018104129994753748,
"count": 8,
"is_parallel": true,
"self": 0.0018104129994753748
}
}
},
"UnityEnvironment.step": {
"total": 0.026553654999588616,
"count": 1,
"is_parallel": true,
"self": 0.0006051090003893478,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004507829999056412,
"count": 1,
"is_parallel": true,
"self": 0.0004507829999056412
},
"communicator.exchange": {
"total": 0.023734423999485443,
"count": 1,
"is_parallel": true,
"self": 0.023734423999485443
},
"steps_from_proto": {
"total": 0.0017633389998081839,
"count": 2,
"is_parallel": true,
"self": 0.0003393890001461841,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014239499996619998,
"count": 8,
"is_parallel": true,
"self": 0.0014239499996619998
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 21575.499355259803,
"count": 3424960,
"is_parallel": true,
"self": 1132.0872262527846,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 729.5288172089004,
"count": 3424960,
"is_parallel": true,
"self": 729.5288172089004
},
"communicator.exchange": {
"total": 16691.748811488615,
"count": 3424960,
"is_parallel": true,
"self": 16691.748811488615
},
"steps_from_proto": {
"total": 3022.134500309503,
"count": 6849920,
"is_parallel": true,
"self": 561.4106670608071,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2460.723833248696,
"count": 27399680,
"is_parallel": true,
"self": 2460.723833248696
}
}
}
}
},
"steps_from_proto": {
"total": 0.24731003400120244,
"count": 498,
"is_parallel": true,
"self": 0.04687300728073751,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.20043702672046493,
"count": 1992,
"is_parallel": true,
"self": 0.20043702672046493
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 11383.498293232955,
"count": 3424961,
"self": 300.51779104533307,
"children": {
"process_trajectory": {
"total": 4655.4978727855105,
"count": 3424961,
"self": 4641.462586460535,
"children": {
"RLTrainer._checkpoint": {
"total": 14.03528632497546,
"count": 100,
"self": 14.03528632497546
}
}
},
"_update_policy": {
"total": 6427.482629402111,
"count": 2417,
"self": 3355.6600326796834,
"children": {
"TorchPOCAOptimizer.update": {
"total": 3071.822596722428,
"count": 72516,
"self": 3071.822596722428
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.469999789260328e-07,
"count": 1,
"self": 6.469999789260328e-07
},
"TrainerController._save_models": {
"total": 0.13486030800413573,
"count": 1,
"self": 0.001060654001776129,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1337996540023596,
"count": 1,
"self": 0.1337996540023596
}
}
}
}
}
}
}