poca-SoccerTwos / run_logs /timers.json
AdityaBorse11's picture
First Push`
94b961d
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4408972263336182,
"min": 1.34647798538208,
"max": 3.2957446575164795,
"count": 4516
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 26696.943359375,
"min": 15579.70703125,
"max": 114898.7109375,
"count": 4516
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 66.20270270270271,
"min": 39.77049180327869,
"max": 999.0,
"count": 4516
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19596.0,
"min": 6032.0,
"max": 30900.0,
"count": 4516
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1646.8488571288406,
"min": 1188.6659502990287,
"max": 1747.9718014571458,
"count": 4499
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 243733.63085506842,
"min": 2377.3319005980575,
"max": 390871.28502691246,
"count": 4499
},
"SoccerTwos.Step.mean": {
"value": 45159895.0,
"min": 9480.0,
"max": 45159895.0,
"count": 4516
},
"SoccerTwos.Step.sum": {
"value": 45159895.0,
"min": 9480.0,
"max": 45159895.0,
"count": 4516
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.07225735485553741,
"min": -0.1587270200252533,
"max": 0.1867164969444275,
"count": 4516
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -10.694087982177734,
"min": -24.761415481567383,
"max": 28.941057205200195,
"count": 4516
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07493644207715988,
"min": -0.15862877666950226,
"max": 0.19069910049438477,
"count": 4516
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -11.090593338012695,
"min": -24.746089935302734,
"max": 29.558361053466797,
"count": 4516
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 4516
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 4516
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.17831621741926348,
"min": -0.6733818198695327,
"max": 0.4627111121120616,
"count": 4516
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -26.390800178050995,
"min": -73.69519984722137,
"max": 55.13080018758774,
"count": 4516
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.17831621741926348,
"min": -0.6733818198695327,
"max": 0.4627111121120616,
"count": 4516
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -26.390800178050995,
"min": -73.69519984722137,
"max": 55.13080018758774,
"count": 4516
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4516
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4516
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.021313193761549578,
"min": 0.009113810799802499,
"max": 0.02654586935726305,
"count": 2187
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.021313193761549578,
"min": 0.009113810799802499,
"max": 0.02654586935726305,
"count": 2187
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0957459956407547,
"min": 4.403265850972578e-05,
"max": 0.12643185531099638,
"count": 2187
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0957459956407547,
"min": 4.403265850972578e-05,
"max": 0.12643185531099638,
"count": 2187
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09621889839569728,
"min": 4.428583324624924e-05,
"max": 0.1395223173002402,
"count": 2187
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09621889839569728,
"min": 4.428583324624924e-05,
"max": 0.1395223173002402,
"count": 2187
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2187
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2187
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2187
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2187
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2187
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2187
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701097720",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\Aditya\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1701161699"
},
"total": 63977.16567700001,
"count": 1,
"self": 0.17009470000630245,
"children": {
"run_training.setup": {
"total": 0.09332970000104979,
"count": 1,
"self": 0.09332970000104979
},
"TrainerController.start_learning": {
"total": 63976.902252600004,
"count": 1,
"self": 38.01585670054192,
"children": {
"TrainerController._reset_env": {
"total": 7.724854799977038,
"count": 226,
"self": 7.724854799977038
},
"TrainerController.advance": {
"total": 63931.085107899504,
"count": 3105144,
"self": 36.76736970519414,
"children": {
"env_step": {
"total": 25439.190738883335,
"count": 3105144,
"self": 20010.848189444747,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5404.875475931738,
"count": 3105144,
"self": 236.41763825167436,
"children": {
"TorchPolicy.evaluate": {
"total": 5168.457837680064,
"count": 5674896,
"self": 5168.457837680064
}
}
},
"workers": {
"total": 23.467073506850284,
"count": 3105144,
"self": 0.0,
"children": {
"worker_root": {
"total": 63917.17435837537,
"count": 3105144,
"is_parallel": true,
"self": 47931.852333021,
"children": {
"steps_from_proto": {
"total": 0.21264499996323138,
"count": 452,
"is_parallel": true,
"self": 0.04078390053473413,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.17186109942849725,
"count": 1808,
"is_parallel": true,
"self": 0.17186109942849725
}
}
},
"UnityEnvironment.step": {
"total": 15985.109380354406,
"count": 3105144,
"is_parallel": true,
"self": 843.2959519782453,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 593.3013080957462,
"count": 3105144,
"is_parallel": true,
"self": 593.3013080957462
},
"communicator.exchange": {
"total": 11990.734174017853,
"count": 3105144,
"is_parallel": true,
"self": 11990.734174017853
},
"steps_from_proto": {
"total": 2557.777946262562,
"count": 6210288,
"is_parallel": true,
"self": 479.7330762423808,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2078.0448700201814,
"count": 24841152,
"is_parallel": true,
"self": 2078.0448700201814
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 38455.126999310974,
"count": 3105144,
"self": 343.00531288038474,
"children": {
"process_trajectory": {
"total": 7399.575904130703,
"count": 3105144,
"self": 7392.998859030311,
"children": {
"RLTrainer._checkpoint": {
"total": 6.577045100391842,
"count": 90,
"self": 6.577045100391842
}
}
},
"_update_policy": {
"total": 30712.545782299887,
"count": 2188,
"self": 3519.4821614015964,
"children": {
"TorchPOCAOptimizer.update": {
"total": 27193.06362089829,
"count": 65621,
"self": 27193.06362089829
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.00005330145359e-07,
"count": 1,
"self": 7.00005330145359e-07
},
"TrainerController._save_models": {
"total": 0.07643249997636303,
"count": 1,
"self": 0.0034460999886505306,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0729863999877125,
"count": 1,
"self": 0.0729863999877125
}
}
}
}
}
}
}