atorre's picture
First 10M
e16c764
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6318333148956299,
"min": 1.6132614612579346,
"max": 3.2957568168640137,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 33419.9453125,
"min": 23794.625,
"max": 105464.21875,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 56.241379310344826,
"min": 41.22222222222222,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19572.0,
"min": 3996.0,
"max": 31968.0,
"count": 1000
},
"SoccerTwos.Step.mean": {
"value": 9999960.0,
"min": 9000.0,
"max": 9999960.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999960.0,
"min": 9000.0,
"max": 9999960.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.05455681309103966,
"min": -0.11073212325572968,
"max": 0.21504288911819458,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 9.547442436218262,
"min": -22.151521682739258,
"max": 33.06159591674805,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.057055097073316574,
"min": -0.11225862801074982,
"max": 0.2163035124540329,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 9.984642028808594,
"min": -21.92664337158203,
"max": 33.97361755371094,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.16592000109808785,
"min": -0.5384615384615384,
"max": 0.5396129021080591,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 29.036000192165375,
"min": -71.7895998954773,
"max": 73.22559988498688,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.16592000109808785,
"min": -0.5384615384615384,
"max": 0.5396129021080591,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 29.036000192165375,
"min": -71.7895998954773,
"max": 73.22559988498688,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1698.052798835726,
"min": 1198.7578908373744,
"max": 1710.4447819569918,
"count": 954
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 295461.1869974163,
"min": 2398.0485328479936,
"max": 387626.6543726098,
"count": 954
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016499078659884012,
"min": 0.00911118609146797,
"max": 0.02418120575991149,
"count": 480
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016499078659884012,
"min": 0.00911118609146797,
"max": 0.02418120575991149,
"count": 480
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11688335239887238,
"min": 2.0363060608967015e-06,
"max": 0.12866197600960733,
"count": 480
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11688335239887238,
"min": 2.0363060608967015e-06,
"max": 0.12866197600960733,
"count": 480
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11954072887698809,
"min": 2.0487085104529493e-06,
"max": 0.13160137881835302,
"count": 480
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11954072887698809,
"min": 2.0487085104529493e-06,
"max": 0.13160137881835302,
"count": 480
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 480
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 480
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 480
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 480
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 480
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 480
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675875263",
"python_version": "3.9.16 (main, Jan 11 2023, 10:02:19) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/sasha/opt/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1675909084"
},
"total": 33822.044240374,
"count": 1,
"self": 1.4647213270072825,
"children": {
"run_training.setup": {
"total": 0.033333238999999626,
"count": 1,
"self": 0.033333238999999626
},
"TrainerController.start_learning": {
"total": 33820.546185807994,
"count": 1,
"self": 16.96266133386962,
"children": {
"TrainerController._reset_env": {
"total": 4.406052080984386,
"count": 40,
"self": 4.406052080984386
},
"TrainerController.advance": {
"total": 33798.97415449014,
"count": 682842,
"self": 14.69149975302571,
"children": {
"env_step": {
"total": 10541.803051611449,
"count": 682842,
"self": 8523.343640761808,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2008.4199754839929,
"count": 682842,
"self": 79.8643249476238,
"children": {
"TorchPolicy.evaluate": {
"total": 1928.555650536369,
"count": 1259612,
"self": 1928.555650536369
}
}
},
"workers": {
"total": 10.039435365648263,
"count": 682842,
"self": 0.0,
"children": {
"worker_root": {
"total": 33788.16669353233,
"count": 682842,
"is_parallel": true,
"self": 26930.578729905323,
"children": {
"steps_from_proto": {
"total": 0.07124204600409989,
"count": 80,
"is_parallel": true,
"self": 0.01565417999450247,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.05558786600959742,
"count": 320,
"is_parallel": true,
"self": 0.05558786600959742
}
}
},
"UnityEnvironment.step": {
"total": 6857.516721581003,
"count": 682842,
"is_parallel": true,
"self": 424.4839428937785,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 278.57702745062585,
"count": 682842,
"is_parallel": true,
"self": 278.57702745062585
},
"communicator.exchange": {
"total": 5013.47298541214,
"count": 682842,
"is_parallel": true,
"self": 5013.47298541214
},
"steps_from_proto": {
"total": 1140.9827658244594,
"count": 1365684,
"is_parallel": true,
"self": 250.14054541372104,
"children": {
"_process_rank_one_or_two_observation": {
"total": 890.8422204107384,
"count": 5462736,
"is_parallel": true,
"self": 890.8422204107384
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 23242.479603125666,
"count": 682842,
"self": 130.30111494246012,
"children": {
"process_trajectory": {
"total": 3274.6360901011803,
"count": 682842,
"self": 3270.291739468181,
"children": {
"RLTrainer._checkpoint": {
"total": 4.344350632999522,
"count": 20,
"self": 4.344350632999522
}
}
},
"_update_policy": {
"total": 19837.542398082027,
"count": 480,
"self": 1564.3807215592242,
"children": {
"TorchPOCAOptimizer.update": {
"total": 18273.161676522803,
"count": 14415,
"self": 18273.161676522803
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.95997800398618e-07,
"count": 1,
"self": 8.95997800398618e-07
},
"TrainerController._save_models": {
"total": 0.20331700699898647,
"count": 1,
"self": 0.0010985010012518615,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2022185059977346,
"count": 1,
"self": 0.2022185059977346
}
}
}
}
}
}
}