poca_SoccerTwos / run_logs /timers.json
yangwj2011's picture
First Push
66db9ea
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.2442386150360107,
"min": 2.2442386150360107,
"max": 3.2957119941711426,
"count": 403
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 43161.19921875,
"min": 22773.53515625,
"max": 112429.953125,
"count": 403
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 50.302083333333336,
"min": 41.21367521367522,
"max": 999.0,
"count": 403
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19316.0,
"min": 11292.0,
"max": 28020.0,
"count": 403
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1428.8541944114083,
"min": 1186.3358990157788,
"max": 1439.0478025289485,
"count": 341
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 274340.0053269904,
"min": 2376.9510585950125,
"max": 339043.8703487618,
"count": 341
},
"SoccerTwos.Step.mean": {
"value": 4029942.0,
"min": 9028.0,
"max": 4029942.0,
"count": 403
},
"SoccerTwos.Step.sum": {
"value": 4029942.0,
"min": 9028.0,
"max": 4029942.0,
"count": 403
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.036567267030477524,
"min": -0.10431957244873047,
"max": 0.1856556087732315,
"count": 403
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 7.057482719421387,
"min": -21.087669372558594,
"max": 28.77661895751953,
"count": 403
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.03692905232310295,
"min": -0.10639823228120804,
"max": 0.18255189061164856,
"count": 403
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 7.127306938171387,
"min": -20.302501678466797,
"max": 28.295543670654297,
"count": 403
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 403
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 403
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.12972020670539974,
"min": -0.6666666666666666,
"max": 0.4373333305120468,
"count": 403
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -25.03599989414215,
"min": -37.25259989500046,
"max": 62.454399943351746,
"count": 403
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.12972020670539974,
"min": -0.6666666666666666,
"max": 0.4373333305120468,
"count": 403
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -25.03599989414215,
"min": -37.25259989500046,
"max": 62.454399943351746,
"count": 403
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 403
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 403
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016675695937980586,
"min": 0.01156629209581297,
"max": 0.022798085392666204,
"count": 190
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016675695937980586,
"min": 0.01156629209581297,
"max": 0.022798085392666204,
"count": 190
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10181177407503128,
"min": 4.791565070642415e-07,
"max": 0.1080866684516271,
"count": 190
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10181177407503128,
"min": 4.791565070642415e-07,
"max": 0.1080866684516271,
"count": 190
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10323484316468239,
"min": 6.557265436413217e-07,
"max": 0.10965974380572636,
"count": 190
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10323484316468239,
"min": 6.557265436413217e-07,
"max": 0.10965974380572636,
"count": 190
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 190
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 190
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 190
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 190
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 190
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 190
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679239730",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu113",
"numpy_version": "1.21.2",
"end_time_seconds": "1679247294"
},
"total": 7563.9356245789995,
"count": 1,
"self": 0.07215274600002886,
"children": {
"run_training.setup": {
"total": 0.10909197699993456,
"count": 1,
"self": 0.10909197699993456
},
"TrainerController.start_learning": {
"total": 7563.754379856,
"count": 1,
"self": 6.913387887071622,
"children": {
"TrainerController._reset_env": {
"total": 10.886652885003286,
"count": 21,
"self": 10.886652885003286
},
"TrainerController.advance": {
"total": 7545.704305595925,
"count": 268825,
"self": 7.145799377557523,
"children": {
"env_step": {
"total": 5952.870888430296,
"count": 268825,
"self": 4429.700663213434,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1519.2379388948186,
"count": 268825,
"self": 44.147688812609886,
"children": {
"TorchPolicy.evaluate": {
"total": 1475.0902500822087,
"count": 513588,
"self": 1475.0902500822087
}
}
},
"workers": {
"total": 3.9322863220431827,
"count": 268824,
"self": 0.0,
"children": {
"worker_root": {
"total": 7551.419511812995,
"count": 268824,
"is_parallel": true,
"self": 3867.5222084200077,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002962532000083229,
"count": 2,
"is_parallel": true,
"self": 0.0007978160003858648,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002164715999697364,
"count": 8,
"is_parallel": true,
"self": 0.002164715999697364
}
}
},
"UnityEnvironment.step": {
"total": 0.02992465700003777,
"count": 1,
"is_parallel": true,
"self": 0.0006568670000888233,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006329539999114786,
"count": 1,
"is_parallel": true,
"self": 0.0006329539999114786
},
"communicator.exchange": {
"total": 0.02654441799995766,
"count": 1,
"is_parallel": true,
"self": 0.02654441799995766
},
"steps_from_proto": {
"total": 0.002090418000079808,
"count": 2,
"is_parallel": true,
"self": 0.00041759800024010474,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001672819999839703,
"count": 8,
"is_parallel": true,
"self": 0.001672819999839703
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3683.8552501989852,
"count": 268823,
"is_parallel": true,
"self": 166.78643356635666,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 106.24675191213942,
"count": 268823,
"is_parallel": true,
"self": 106.24675191213942
},
"communicator.exchange": {
"total": 2889.397301513833,
"count": 268823,
"is_parallel": true,
"self": 2889.397301513833
},
"steps_from_proto": {
"total": 521.4247632066558,
"count": 537646,
"is_parallel": true,
"self": 101.52583203264567,
"children": {
"_process_rank_one_or_two_observation": {
"total": 419.89893117401016,
"count": 2150584,
"is_parallel": true,
"self": 419.89893117401016
}
}
}
}
},
"steps_from_proto": {
"total": 0.0420531940019373,
"count": 40,
"is_parallel": true,
"self": 0.008218023000154062,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03383517100178324,
"count": 160,
"is_parallel": true,
"self": 0.03383517100178324
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1585.6876177880713,
"count": 268824,
"self": 51.371071677157715,
"children": {
"process_trajectory": {
"total": 594.1098656239042,
"count": 268824,
"self": 592.1405817609029,
"children": {
"RLTrainer._checkpoint": {
"total": 1.9692838630012375,
"count": 8,
"self": 1.9692838630012375
}
}
},
"_update_policy": {
"total": 940.2066804870094,
"count": 191,
"self": 655.0171119950141,
"children": {
"TorchPOCAOptimizer.update": {
"total": 285.18956849199526,
"count": 5733,
"self": 285.18956849199526
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2489999789977446e-06,
"count": 1,
"self": 1.2489999789977446e-06
},
"TrainerController._save_models": {
"total": 0.2500322390005749,
"count": 1,
"self": 0.0019761210005526664,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24805611800002225,
"count": 1,
"self": 0.24805611800002225
}
}
}
}
}
}
}