poca-SoccerTwos / run_logs /timers.json
tayfen's picture
5m steps, first commith`
04b8b19
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.0213615894317627,
"min": 2.009491443634033,
"max": 3.2956788539886475,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 38939.51171875,
"min": 19765.470703125,
"max": 154643.203125,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 75.31818181818181,
"min": 51.45744680851064,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19884.0,
"min": 7992.0,
"max": 29044.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1530.8731346255302,
"min": 1159.7741687804314,
"max": 1546.65543014195,
"count": 459
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 202075.25377056998,
"min": 2321.067814248583,
"max": 281538.77394771576,
"count": 459
},
"SoccerTwos.Step.mean": {
"value": 4999933.0,
"min": 9540.0,
"max": 4999933.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999933.0,
"min": 9540.0,
"max": 4999933.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.006711859721690416,
"min": -0.07943391799926758,
"max": 0.18840113282203674,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.8859654664993286,
"min": -11.772872924804688,
"max": 26.376157760620117,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0078003061935305595,
"min": -0.07678192853927612,
"max": 0.18508915603160858,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.0296404361724854,
"min": -11.542649269104004,
"max": 25.9124813079834,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.09212424177112001,
"min": -0.5714285714285714,
"max": 0.5371563597158953,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 12.160399913787842,
"min": -37.384000301361084,
"max": 47.07479953765869,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.09212424177112001,
"min": -0.5714285714285714,
"max": 0.5371563597158953,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 12.160399913787842,
"min": -37.384000301361084,
"max": 47.07479953765869,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.0227650248639596,
"min": 0.009874877380449712,
"max": 0.025181998672072343,
"count": 238
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.0227650248639596,
"min": 0.009874877380449712,
"max": 0.025181998672072343,
"count": 238
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08372622231642406,
"min": 2.12503995650574e-06,
"max": 0.09860104198257129,
"count": 238
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08372622231642406,
"min": 2.12503995650574e-06,
"max": 0.09860104198257129,
"count": 238
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08488952120145161,
"min": 2.1953862111937874e-06,
"max": 0.1005101685722669,
"count": 238
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08488952120145161,
"min": 2.1953862111937874e-06,
"max": 0.1005101685722669,
"count": 238
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 238
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 238
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 238
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 238
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 238
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 238
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675700579",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:28:38) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "E:\\ProgramData\\Miniconda3\\envs\\rl_hf\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1675721577"
},
"total": 20997.972567,
"count": 1,
"self": 0.7350547999994888,
"children": {
"run_training.setup": {
"total": 0.08352039999999983,
"count": 1,
"self": 0.08352039999999983
},
"TrainerController.start_learning": {
"total": 20997.1539918,
"count": 1,
"self": 10.351660299456853,
"children": {
"TrainerController._reset_env": {
"total": 4.556975699996396,
"count": 25,
"self": 4.556975699996396
},
"TrainerController.advance": {
"total": 20982.06151190055,
"count": 333375,
"self": 10.567984800814884,
"children": {
"env_step": {
"total": 6646.6473090000845,
"count": 333375,
"self": 5007.922153899531,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1632.6026722003107,
"count": 333375,
"self": 63.33631170102626,
"children": {
"TorchPolicy.evaluate": {
"total": 1569.2663604992845,
"count": 636786,
"self": 1569.2663604992845
}
}
},
"workers": {
"total": 6.122482900242986,
"count": 333375,
"self": 0.0,
"children": {
"worker_root": {
"total": 20979.775855499633,
"count": 333375,
"is_parallel": true,
"self": 17063.27428249941,
"children": {
"steps_from_proto": {
"total": 0.05782670000149581,
"count": 50,
"is_parallel": true,
"self": 0.012298500004381374,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04552819999711444,
"count": 200,
"is_parallel": true,
"self": 0.04552819999711444
}
}
},
"UnityEnvironment.step": {
"total": 3916.443746300221,
"count": 333375,
"is_parallel": true,
"self": 210.33805879946203,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 174.12952240006086,
"count": 333375,
"is_parallel": true,
"self": 174.12952240006086
},
"communicator.exchange": {
"total": 2840.075861300319,
"count": 333375,
"is_parallel": true,
"self": 2840.075861300319
},
"steps_from_proto": {
"total": 691.9003038003789,
"count": 666750,
"is_parallel": true,
"self": 147.23733259858454,
"children": {
"_process_rank_one_or_two_observation": {
"total": 544.6629712017943,
"count": 2667000,
"is_parallel": true,
"self": 544.6629712017943
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 14324.846218099648,
"count": 333375,
"self": 68.94114420022379,
"children": {
"process_trajectory": {
"total": 1739.43297689941,
"count": 333375,
"self": 1736.6504571994114,
"children": {
"RLTrainer._checkpoint": {
"total": 2.782519699998602,
"count": 10,
"self": 2.782519699998602
}
}
},
"_update_policy": {
"total": 12516.472097000014,
"count": 238,
"self": 1058.2553322000622,
"children": {
"TorchPOCAOptimizer.update": {
"total": 11458.216764799952,
"count": 7149,
"self": 11458.216764799952
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0000003385357559e-06,
"count": 1,
"self": 1.0000003385357559e-06
},
"TrainerController._save_models": {
"total": 0.18384289999812609,
"count": 1,
"self": 0.00261540000064997,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18122749999747612,
"count": 1,
"self": 0.18122749999747612
}
}
}
}
}
}
}