poca-SoccerTwos / run_logs /timers.json
hui168's picture
First Push
05ce8cb verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4839317798614502,
"min": 1.3217110633850098,
"max": 3.295750617980957,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 31008.23828125,
"min": 12233.826171875,
"max": 139907.46875,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 70.43661971830986,
"min": 40.63025210084034,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20004.0,
"min": 11988.0,
"max": 29272.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1663.774809225339,
"min": 1194.156890251674,
"max": 1700.1999188622788,
"count": 4811
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 236256.02290999814,
"min": 2388.3137805033484,
"max": 354933.7148393992,
"count": 4811
},
"SoccerTwos.Step.mean": {
"value": 49999826.0,
"min": 9096.0,
"max": 49999826.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999826.0,
"min": 9096.0,
"max": 49999826.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.03001251630485058,
"min": -0.17046482861042023,
"max": 0.16957390308380127,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -4.231764793395996,
"min": -29.149484634399414,
"max": 31.92991828918457,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.03009491041302681,
"min": -0.17105722427368164,
"max": 0.17510975897312164,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.243382453918457,
"min": -29.25078582763672,
"max": 32.16276550292969,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.06979858198909895,
"min": -0.631578947368421,
"max": 0.4511478284130926,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -9.841600060462952,
"min": -62.49959981441498,
"max": 65.8615996837616,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.06979858198909895,
"min": -0.631578947368421,
"max": 0.4511478284130926,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -9.841600060462952,
"min": -62.49959981441498,
"max": 65.8615996837616,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014501520027321628,
"min": 0.009576276894222246,
"max": 0.02516175553901121,
"count": 2413
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014501520027321628,
"min": 0.009576276894222246,
"max": 0.02516175553901121,
"count": 2413
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08913260574142139,
"min": 1.6679876887100893e-08,
"max": 0.12145498543977737,
"count": 2413
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08913260574142139,
"min": 1.6679876887100893e-08,
"max": 0.12145498543977737,
"count": 2413
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09005878468354543,
"min": 1.8300759988913264e-08,
"max": 0.12307501683632532,
"count": 2413
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09005878468354543,
"min": 1.8300759988913264e-08,
"max": 0.12307501683632532,
"count": 2413
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2413
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2413
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 2413
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 2413
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2413
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2413
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1712088591",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/hui/conda/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1712426607"
},
"total": 338016.229729331,
"count": 1,
"self": 10.26993561803829,
"children": {
"run_training.setup": {
"total": 0.029621077002957463,
"count": 1,
"self": 0.029621077002957463
},
"TrainerController.start_learning": {
"total": 338005.93017263594,
"count": 1,
"self": 94.73738962272182,
"children": {
"TrainerController._reset_env": {
"total": 13.276130230748095,
"count": 209,
"self": 13.276130230748095
},
"TrainerController.advance": {
"total": 337897.2613632415,
"count": 3408035,
"self": 106.42749221005943,
"children": {
"env_step": {
"total": 81938.0193208165,
"count": 3408035,
"self": 65020.16224968282,
"children": {
"SubprocessEnvManager._take_step": {
"total": 16859.248587368988,
"count": 3408035,
"self": 696.1393022305565,
"children": {
"TorchPolicy.evaluate": {
"total": 16163.109285138431,
"count": 6289310,
"self": 16163.109285138431
}
}
},
"workers": {
"total": 58.60848376469221,
"count": 3408035,
"self": 0.0,
"children": {
"worker_root": {
"total": 337841.55069500406,
"count": 3408035,
"is_parallel": true,
"self": 284218.66538325825,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.026187706971541047,
"count": 2,
"is_parallel": true,
"self": 0.023293153033591807,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00289455393794924,
"count": 8,
"is_parallel": true,
"self": 0.00289455393794924
}
}
},
"UnityEnvironment.step": {
"total": 0.05771382397506386,
"count": 1,
"is_parallel": true,
"self": 0.0008700748439878225,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0031269040191546082,
"count": 1,
"is_parallel": true,
"self": 0.0031269040191546082
},
"communicator.exchange": {
"total": 0.05170545005239546,
"count": 1,
"is_parallel": true,
"self": 0.05170545005239546
},
"steps_from_proto": {
"total": 0.0020113950595259666,
"count": 2,
"is_parallel": true,
"self": 0.0004623152781277895,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015490797813981771,
"count": 8,
"is_parallel": true,
"self": 0.0015490797813981771
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 53622.37083138793,
"count": 3408034,
"is_parallel": true,
"self": 3348.1403495456325,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2346.449156976538,
"count": 3408034,
"is_parallel": true,
"self": 2346.449156976538
},
"communicator.exchange": {
"total": 38363.26626932307,
"count": 3408034,
"is_parallel": true,
"self": 38363.26626932307
},
"steps_from_proto": {
"total": 9564.515055542695,
"count": 6816068,
"is_parallel": true,
"self": 1834.4778924479615,
"children": {
"_process_rank_one_or_two_observation": {
"total": 7730.037163094734,
"count": 27264272,
"is_parallel": true,
"self": 7730.037163094734
}
}
}
}
},
"steps_from_proto": {
"total": 0.5144803578732535,
"count": 416,
"is_parallel": true,
"self": 0.09764499438460916,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.41683536348864436,
"count": 1664,
"is_parallel": true,
"self": 0.41683536348864436
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 255852.8145502149,
"count": 3408035,
"self": 715.5272634963039,
"children": {
"process_trajectory": {
"total": 20634.581765327603,
"count": 3408035,
"self": 20585.784304664936,
"children": {
"RLTrainer._checkpoint": {
"total": 48.79746066266671,
"count": 100,
"self": 48.79746066266671
}
}
},
"_update_policy": {
"total": 234502.705521391,
"count": 2413,
"self": 8308.750875544036,
"children": {
"TorchPOCAOptimizer.update": {
"total": 226193.95464584697,
"count": 72402,
"self": 226193.95464584697
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.210952177643776e-06,
"count": 1,
"self": 1.210952177643776e-06
},
"TrainerController._save_models": {
"total": 0.65528833004646,
"count": 1,
"self": 0.11293255211785436,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5423557779286057,
"count": 1,
"self": 0.5423557779286057
}
}
}
}
}
}
}