poca-SoccerTwos / run_logs /timers.json
giovannidispoto's picture
First Push`
9db8030
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.918834924697876,
"min": 2.8833391666412354,
"max": 3.0236945152282715,
"count": 33
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 78271.4765625,
"min": 39830.37109375,
"max": 84569.7890625,
"count": 33
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 230.95652173913044,
"min": 124.25714285714285,
"max": 623.625,
"count": 33
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 21248.0,
"min": 9680.0,
"max": 22600.0,
"count": 33
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1282.0056355876839,
"min": 1253.013989211454,
"max": 1282.0056355876839,
"count": 33
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 58972.25923703346,
"min": 12559.079803129456,
"max": 101548.60181663197,
"count": 33
},
"SoccerTwos.Step.mean": {
"value": 2929962.0,
"min": 2609928.0,
"max": 2929962.0,
"count": 33
},
"SoccerTwos.Step.sum": {
"value": 2929962.0,
"min": 2609928.0,
"max": 2929962.0,
"count": 33
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.03823350369930267,
"min": -0.049646422266960144,
"max": 0.052216753363609314,
"count": 33
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.7205077409744263,
"min": -2.035503387451172,
"max": 2.2943410873413086,
"count": 33
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.03543571010231972,
"min": -0.05077830329537392,
"max": 0.05149360001087189,
"count": 33
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.594606876373291,
"min": -2.0819103717803955,
"max": 2.4051408767700195,
"count": 33
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 33
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 33
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.12317777209811741,
"min": -0.563066663164081,
"max": 0.48426666997727896,
"count": 33
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 5.542999744415283,
"min": -18.581199884414673,
"max": 12.681599855422974,
"count": 33
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.12317777209811741,
"min": -0.563066663164081,
"max": 0.48426666997727896,
"count": 33
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 5.542999744415283,
"min": -18.581199884414673,
"max": 12.681599855422974,
"count": 33
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015882109101706496,
"min": 0.010916591945958013,
"max": 0.02235342748463154,
"count": 15
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015882109101706496,
"min": 0.010916591945958013,
"max": 0.02235342748463154,
"count": 15
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.032553153360883394,
"min": 0.008785408766319355,
"max": 0.032553153360883394,
"count": 15
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.032553153360883394,
"min": 0.008785408766319355,
"max": 0.032553153360883394,
"count": 15
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.03331315269072851,
"min": 0.008910710799197356,
"max": 0.03331315269072851,
"count": 15
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.03331315269072851,
"min": 0.008910710799197356,
"max": 0.03331315269072851,
"count": 15
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 15
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 15
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 15
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 15
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 15
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 15
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689930704",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:38:11) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/giovannidispoto/miniforge3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env training-envs-executables/linux/SoccerTwos/SoccerTwos --run-id =SoccerTwos training --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1689931918"
},
"total": 1213.220998792,
"count": 1,
"self": 0.19836883399989347,
"children": {
"run_training.setup": {
"total": 0.0146809160000001,
"count": 1,
"self": 0.0146809160000001
},
"TrainerController.start_learning": {
"total": 1213.007949042,
"count": 1,
"self": 0.256065019000971,
"children": {
"TrainerController._reset_env": {
"total": 1.6167403750000064,
"count": 3,
"self": 1.6167403750000064
},
"TrainerController.advance": {
"total": 1210.9198496889987,
"count": 21467,
"self": 0.2263428089916033,
"children": {
"env_step": {
"total": 935.021241669002,
"count": 21467,
"self": 896.3483562050126,
"children": {
"SubprocessEnvManager._take_step": {
"total": 38.50613126399476,
"count": 21467,
"self": 1.2515383709973094,
"children": {
"TorchPolicy.evaluate": {
"total": 37.25459289299745,
"count": 41986,
"self": 37.25459289299745
}
}
},
"workers": {
"total": 0.16675419999469066,
"count": 21466,
"self": 0.0,
"children": {
"worker_root": {
"total": 1210.8783447740025,
"count": 21466,
"is_parallel": true,
"self": 348.50981015801983,
"children": {
"steps_from_proto": {
"total": 0.004010457999933603,
"count": 6,
"is_parallel": true,
"self": 0.0006119609999728493,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003398496999960754,
"count": 24,
"is_parallel": true,
"self": 0.003398496999960754
}
}
},
"UnityEnvironment.step": {
"total": 862.3645241579827,
"count": 21466,
"is_parallel": true,
"self": 2.4730160429826356,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 14.117127869999308,
"count": 21466,
"is_parallel": true,
"self": 14.117127869999308
},
"communicator.exchange": {
"total": 818.327291241,
"count": 21466,
"is_parallel": true,
"self": 818.327291241
},
"steps_from_proto": {
"total": 27.447089004000716,
"count": 42932,
"is_parallel": true,
"self": 3.8421029960153845,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.60498600798533,
"count": 171728,
"is_parallel": true,
"self": 23.60498600798533
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 275.67226521100497,
"count": 21466,
"self": 2.0925505520029333,
"children": {
"process_trajectory": {
"total": 41.71501174500203,
"count": 21466,
"self": 41.71501174500203
},
"_update_policy": {
"total": 231.86470291400002,
"count": 15,
"self": 27.459560829000736,
"children": {
"TorchPOCAOptimizer.update": {
"total": 204.40514208499928,
"count": 450,
"self": 204.40514208499928
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.000001692678779e-07,
"count": 1,
"self": 5.000001692678779e-07
},
"TrainerController._save_models": {
"total": 0.2152934590001223,
"count": 1,
"self": 0.0022526249999827996,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2130408340001395,
"count": 1,
"self": 0.2130408340001395
}
}
}
}
}
}
}