marik0's picture
Try to improve
88c35ed
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.686773657798767,
"min": 1.571251630783081,
"max": 1.7149637937545776,
"count": 118
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34383.1953125,
"min": 8038.44580078125,
"max": 37387.62890625,
"count": 118
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 52.86021505376344,
"min": 40.46666666666667,
"max": 78.96969696969697,
"count": 118
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19664.0,
"min": 3384.0,
"max": 20848.0,
"count": 118
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1604.7561279872616,
"min": 1596.147576056119,
"max": 1641.8914249784082,
"count": 118
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 298484.63980563066,
"min": 60886.60501160265,
"max": 388015.8269429932,
"count": 118
},
"SoccerTwos.Step.mean": {
"value": 9999984.0,
"min": 8829868.0,
"max": 9999984.0,
"count": 118
},
"SoccerTwos.Step.sum": {
"value": 9999984.0,
"min": 8829868.0,
"max": 9999984.0,
"count": 118
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.06201007962226868,
"min": -0.10462847352027893,
"max": 0.06627217680215836,
"count": 118
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -11.471864700317383,
"min": -18.937753677368164,
"max": 13.519523620605469,
"count": 118
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.06280722469091415,
"min": -0.10412157326936722,
"max": 0.07305586338043213,
"count": 118
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -11.61933708190918,
"min": -18.846004486083984,
"max": 14.903395652770996,
"count": 118
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 118
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 118
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.12782270489512262,
"min": -0.2589891300253246,
"max": 0.27226732567985457,
"count": 118
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -23.647200405597687,
"min": -47.65399992465973,
"max": 54.99799978733063,
"count": 118
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.12782270489512262,
"min": -0.2589891300253246,
"max": 0.27226732567985457,
"count": 118
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -23.647200405597687,
"min": -47.65399992465973,
"max": 54.99799978733063,
"count": 118
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 118
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 118
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017015760073748727,
"min": 0.011039437506891165,
"max": 0.02371452291457293,
"count": 56
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017015760073748727,
"min": 0.011039437506891165,
"max": 0.02371452291457293,
"count": 56
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10656303465366364,
"min": 0.09706846177577973,
"max": 0.1288724867006143,
"count": 56
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10656303465366364,
"min": 0.09706846177577973,
"max": 0.1288724867006143,
"count": 56
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10831870262821515,
"min": 0.09825346643726031,
"max": 0.13139269451300303,
"count": 56
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10831870262821515,
"min": 0.09825346643726031,
"max": 0.13139269451300303,
"count": 56
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 56
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 56
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 56
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 56
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 56
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 56
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675428121",
"python_version": "3.8.16 | packaged by conda-forge | (default, Feb 1 2023, 16:01:55) \n[GCC 11.3.0]",
"command_line_arguments": "/home/mari/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1675434084"
},
"total": 5962.774227431975,
"count": 1,
"self": 0.425103104673326,
"children": {
"run_training.setup": {
"total": 0.01730215735733509,
"count": 1,
"self": 0.01730215735733509
},
"TrainerController.start_learning": {
"total": 5962.331822169945,
"count": 1,
"self": 2.5349797243252397,
"children": {
"TrainerController._reset_env": {
"total": 1.4496937403455377,
"count": 6,
"self": 1.4496937403455377
},
"TrainerController.advance": {
"total": 5957.906488174573,
"count": 81806,
"self": 2.2628058372065425,
"children": {
"env_step": {
"total": 2198.078187521547,
"count": 81806,
"self": 1679.0800731424242,
"children": {
"SubprocessEnvManager._take_step": {
"total": 517.4854472810403,
"count": 81806,
"self": 13.031771199777722,
"children": {
"TorchPolicy.evaluate": {
"total": 504.45367608126253,
"count": 146866,
"self": 504.45367608126253
}
}
},
"workers": {
"total": 1.512667098082602,
"count": 81806,
"self": 0.0,
"children": {
"worker_root": {
"total": 5957.9678576486185,
"count": 81806,
"is_parallel": true,
"self": 4595.35154273361,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004851153120398521,
"count": 2,
"is_parallel": true,
"self": 0.0010587126016616821,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0037924405187368393,
"count": 8,
"is_parallel": true,
"self": 0.0037924405187368393
}
}
},
"UnityEnvironment.step": {
"total": 0.03808434586971998,
"count": 1,
"is_parallel": true,
"self": 0.0009995074942708015,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0009768176823854446,
"count": 1,
"is_parallel": true,
"self": 0.0009768176823854446
},
"communicator.exchange": {
"total": 0.03303218260407448,
"count": 1,
"is_parallel": true,
"self": 0.03303218260407448
},
"steps_from_proto": {
"total": 0.003075838088989258,
"count": 2,
"is_parallel": true,
"self": 0.0006342325359582901,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024416055530309677,
"count": 8,
"is_parallel": true,
"self": 0.0024416055530309677
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.023196437396109104,
"count": 10,
"is_parallel": true,
"self": 0.004153416492044926,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.01904302090406418,
"count": 40,
"is_parallel": true,
"self": 0.01904302090406418
}
}
},
"UnityEnvironment.step": {
"total": 1362.5931184776127,
"count": 81805,
"is_parallel": true,
"self": 75.36137645505369,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 69.51651999168098,
"count": 81805,
"is_parallel": true,
"self": 69.51651999168098
},
"communicator.exchange": {
"total": 966.9044259414077,
"count": 81805,
"is_parallel": true,
"self": 966.9044259414077
},
"steps_from_proto": {
"total": 250.8107960894704,
"count": 163610,
"is_parallel": true,
"self": 48.64048096444458,
"children": {
"_process_rank_one_or_two_observation": {
"total": 202.1703151250258,
"count": 654440,
"is_parallel": true,
"self": 202.1703151250258
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3757.565494815819,
"count": 81806,
"self": 14.944533129222691,
"children": {
"process_trajectory": {
"total": 732.2951598260552,
"count": 81806,
"self": 731.1832850882784,
"children": {
"RLTrainer._checkpoint": {
"total": 1.111874737776816,
"count": 3,
"self": 1.111874737776816
}
}
},
"_update_policy": {
"total": 3010.325801860541,
"count": 56,
"self": 311.6489906795323,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2698.676811181009,
"count": 1680,
"self": 2698.676811181009
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2880191206932068e-06,
"count": 1,
"self": 1.2880191206932068e-06
},
"TrainerController._save_models": {
"total": 0.44065924268215895,
"count": 1,
"self": 0.002393142320215702,
"children": {
"RLTrainer._checkpoint": {
"total": 0.43826610036194324,
"count": 1,
"self": 0.43826610036194324
}
}
}
}
}
}
}