poca-SoccerTwos / run_logs /timers.json
jwoods's picture
First Push
b5445e5
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.219115734100342,
"min": 2.19679856300354,
"max": 3.2957592010498047,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 44879.3984375,
"min": 20429.765625,
"max": 131154.125,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 54.28260869565217,
"min": 43.705357142857146,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19976.0,
"min": 13640.0,
"max": 28836.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1514.635325257901,
"min": 1175.5303149514862,
"max": 1540.5535986484128,
"count": 461
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 278692.8998474538,
"min": 2352.708475439669,
"max": 332456.12015789293,
"count": 461
},
"SoccerTwos.Step.mean": {
"value": 4999986.0,
"min": 9434.0,
"max": 4999986.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999986.0,
"min": 9434.0,
"max": 4999986.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.009690292179584503,
"min": -0.06676390767097473,
"max": 0.2656790018081665,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.7830138206481934,
"min": -11.13835334777832,
"max": 36.33460998535156,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.009328517131507397,
"min": -0.07028283923864365,
"max": 0.2686382532119751,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.716447114944458,
"min": -12.013956069946289,
"max": 36.718902587890625,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.0057891289825024814,
"min": -0.6652000000079473,
"max": 0.5766857140942624,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -1.0651997327804565,
"min": -39.243200063705444,
"max": 76.6991999745369,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.0057891289825024814,
"min": -0.6652000000079473,
"max": 0.5766857140942624,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -1.0651997327804565,
"min": -39.243200063705444,
"max": 76.6991999745369,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01664948812297856,
"min": 0.011905774932044248,
"max": 0.02285748055825631,
"count": 237
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01664948812297856,
"min": 0.011905774932044248,
"max": 0.02285748055825631,
"count": 237
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10561632787187894,
"min": 2.331003175489362e-06,
"max": 0.11489342699448267,
"count": 237
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10561632787187894,
"min": 2.331003175489362e-06,
"max": 0.11489342699448267,
"count": 237
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10754233524203301,
"min": 2.4531511750562156e-06,
"max": 0.11718783502777418,
"count": 237
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10754233524203301,
"min": 2.4531511750562156e-06,
"max": 0.11718783502777418,
"count": 237
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 237
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 237
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 237
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 237
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 237
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 237
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685053186",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\jwood\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu117",
"numpy_version": "1.21.2",
"end_time_seconds": "1685060352"
},
"total": 7165.688564499999,
"count": 1,
"self": 0.5273317000001043,
"children": {
"run_training.setup": {
"total": 0.08869229999999995,
"count": 1,
"self": 0.08869229999999995
},
"TrainerController.start_learning": {
"total": 7165.072540499999,
"count": 1,
"self": 6.119253500058221,
"children": {
"TrainerController._reset_env": {
"total": 4.80170039999751,
"count": 25,
"self": 4.80170039999751
},
"TrainerController.advance": {
"total": 7154.003809099944,
"count": 333064,
"self": 5.9434175999886065,
"children": {
"env_step": {
"total": 5349.485794500149,
"count": 333064,
"self": 3537.9022568000237,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1807.5844411002647,
"count": 333064,
"self": 36.23928330086369,
"children": {
"TorchPolicy.evaluate": {
"total": 1771.345157799401,
"count": 638248,
"self": 1771.345157799401
}
}
},
"workers": {
"total": 3.999096599860521,
"count": 333064,
"self": 0.0,
"children": {
"worker_root": {
"total": 7153.6843497000555,
"count": 333064,
"is_parallel": true,
"self": 4334.950232100071,
"children": {
"steps_from_proto": {
"total": 0.03446980000073152,
"count": 50,
"is_parallel": true,
"self": 0.0075197000066578035,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.026950099994073717,
"count": 200,
"is_parallel": true,
"self": 0.026950099994073717
}
}
},
"UnityEnvironment.step": {
"total": 2818.6996477999837,
"count": 333064,
"is_parallel": true,
"self": 137.64734230070553,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 116.67095019972939,
"count": 333064,
"is_parallel": true,
"self": 116.67095019972939
},
"communicator.exchange": {
"total": 2128.998269599813,
"count": 333064,
"is_parallel": true,
"self": 2128.998269599813
},
"steps_from_proto": {
"total": 435.38308569973583,
"count": 666128,
"is_parallel": true,
"self": 93.03014329988093,
"children": {
"_process_rank_one_or_two_observation": {
"total": 342.3529423998549,
"count": 2664512,
"is_parallel": true,
"self": 342.3529423998549
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1798.5745969998068,
"count": 333064,
"self": 43.89163129996382,
"children": {
"process_trajectory": {
"total": 567.0804893998438,
"count": 333064,
"self": 564.9522061998447,
"children": {
"RLTrainer._checkpoint": {
"total": 2.128283199999146,
"count": 10,
"self": 2.128283199999146
}
}
},
"_update_policy": {
"total": 1187.6024762999991,
"count": 237,
"self": 663.3542683999674,
"children": {
"TorchPOCAOptimizer.update": {
"total": 524.2482079000317,
"count": 7110,
"self": 524.2482079000317
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.999998731771484e-07,
"count": 1,
"self": 6.999998731771484e-07
},
"TrainerController._save_models": {
"total": 0.14777679999951943,
"count": 1,
"self": 0.0016951999996308587,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14608159999988857,
"count": 1,
"self": 0.14608159999988857
}
}
}
}
}
}
}