ppo-Pyramids / run_logs /timers.json
splusminusx's picture
Initial Commit
98ceb23
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5439472794532776,
"min": 0.5387341380119324,
"max": 1.4598780870437622,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16153.05859375,
"min": 16049.9677734375,
"max": 44286.86328125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989987.0,
"min": 29952.0,
"max": 989987.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989987.0,
"min": 29952.0,
"max": 989987.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.47965386509895325,
"min": -0.125241219997406,
"max": 0.5370442867279053,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 129.5065460205078,
"min": -30.057891845703125,
"max": 146.0760498046875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.009490493685007095,
"min": -0.02223842591047287,
"max": 0.2066325545310974,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.5624332427978516,
"min": -6.04885196685791,
"max": 49.79844665527344,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06933850301997847,
"min": 0.06535296257562788,
"max": 0.07303936706323709,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9707390422796985,
"min": 0.4855138030165443,
"max": 1.024065812380286,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015602201858460571,
"min": 0.00010590758737836497,
"max": 0.01764486909678326,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.218430826018448,
"min": 0.0011649834611620147,
"max": 0.24702816735496563,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.295133282607144e-06,
"min": 7.295133282607144e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010213186595650002,
"min": 0.00010213186595650002,
"max": 0.003117554860815099,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243167857142861,
"min": 0.10243167857142861,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4340435000000005,
"min": 1.3886848,
"max": 2.4005089999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025292468928571435,
"min": 0.00025292468928571435,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035409456500000006,
"min": 0.0035409456500000006,
"max": 0.10393457151,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007058271206915379,
"min": 0.006736790295690298,
"max": 0.3061252534389496,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09881579875946045,
"min": 0.0945109948515892,
"max": 2.1428768634796143,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 398.86842105263156,
"min": 383.53333333333336,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30314.0,
"min": 15984.0,
"max": 35079.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.469505235062618,
"min": -1.0000000521540642,
"max": 1.5583808042415201,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 111.68239786475897,
"min": -32.000001668930054,
"max": 118.44679871201515,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.469505235062618,
"min": -1.0000000521540642,
"max": 1.5583808042415201,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 111.68239786475897,
"min": -32.000001668930054,
"max": 118.44679871201515,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.029275640953097825,
"min": 0.026695883357121298,
"max": 5.959195447154343,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.2249487124354346,
"min": 2.0021912517840974,
"max": 95.34712715446949,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679060449",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679062642"
},
"total": 2193.0872409400004,
"count": 1,
"self": 0.44508147200076564,
"children": {
"run_training.setup": {
"total": 0.12043211099990003,
"count": 1,
"self": 0.12043211099990003
},
"TrainerController.start_learning": {
"total": 2192.5217273569997,
"count": 1,
"self": 1.419310653037428,
"children": {
"TrainerController._reset_env": {
"total": 6.084513055000116,
"count": 1,
"self": 6.084513055000116
},
"TrainerController.advance": {
"total": 2184.9247929859616,
"count": 63568,
"self": 1.4455939169706653,
"children": {
"env_step": {
"total": 1551.4486246130296,
"count": 63568,
"self": 1438.6775451580584,
"children": {
"SubprocessEnvManager._take_step": {
"total": 111.93169292097309,
"count": 63568,
"self": 4.697211970989429,
"children": {
"TorchPolicy.evaluate": {
"total": 107.23448094998366,
"count": 62562,
"self": 107.23448094998366
}
}
},
"workers": {
"total": 0.8393865339980948,
"count": 63568,
"self": 0.0,
"children": {
"worker_root": {
"total": 2187.480073777951,
"count": 63568,
"is_parallel": true,
"self": 866.2298807908896,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019062760002270807,
"count": 1,
"is_parallel": true,
"self": 0.0005982149996270891,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013080610005999915,
"count": 8,
"is_parallel": true,
"self": 0.0013080610005999915
}
}
},
"UnityEnvironment.step": {
"total": 0.04815826599997308,
"count": 1,
"is_parallel": true,
"self": 0.0005204789999879722,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005067440001766954,
"count": 1,
"is_parallel": true,
"self": 0.0005067440001766954
},
"communicator.exchange": {
"total": 0.04301082099982523,
"count": 1,
"is_parallel": true,
"self": 0.04301082099982523
},
"steps_from_proto": {
"total": 0.004120221999983187,
"count": 1,
"is_parallel": true,
"self": 0.0003950320005969843,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0037251899993862025,
"count": 8,
"is_parallel": true,
"self": 0.0037251899993862025
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1321.2501929870614,
"count": 63567,
"is_parallel": true,
"self": 31.222361043191995,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.855015946974163,
"count": 63567,
"is_parallel": true,
"self": 22.855015946974163
},
"communicator.exchange": {
"total": 1174.2500869339015,
"count": 63567,
"is_parallel": true,
"self": 1174.2500869339015
},
"steps_from_proto": {
"total": 92.92272906299377,
"count": 63567,
"is_parallel": true,
"self": 19.818816703934772,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.103912359059,
"count": 508536,
"is_parallel": true,
"self": 73.103912359059
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 632.0305744559614,
"count": 63568,
"self": 2.5143604839131513,
"children": {
"process_trajectory": {
"total": 116.70272033204265,
"count": 63568,
"self": 116.50204957604228,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20067075600036333,
"count": 2,
"self": 0.20067075600036333
}
}
},
"_update_policy": {
"total": 512.8134936400056,
"count": 438,
"self": 324.82134601598364,
"children": {
"TorchPPOOptimizer.update": {
"total": 187.99214762402198,
"count": 22854,
"self": 187.99214762402198
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.63999809755478e-07,
"count": 1,
"self": 9.63999809755478e-07
},
"TrainerController._save_models": {
"total": 0.09310969900070631,
"count": 1,
"self": 0.0017913890005729627,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09131831000013335,
"count": 1,
"self": 0.09131831000013335
}
}
}
}
}
}
}