testpyramidsrnd / run_logs /timers.json
RaphaelReinauer's picture
First Pyramids
a7dac53
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.41358017921447754,
"min": 0.38993197679519653,
"max": 1.42996084690094,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12414.0224609375,
"min": 11644.017578125,
"max": 43379.29296875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989901.0,
"min": 29952.0,
"max": 989901.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989901.0,
"min": 29952.0,
"max": 989901.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7401013374328613,
"min": -0.09754466265439987,
"max": 0.7401013374328613,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 219.81008911132812,
"min": -23.41071891784668,
"max": 219.81008911132812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02549360878765583,
"min": 0.003851822344586253,
"max": 0.7033986449241638,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.571601867675781,
"min": 0.9899183511734009,
"max": 166.70547485351562,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0676898630846861,
"min": 0.06459672924473933,
"max": 0.07350445102617544,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0153479462702915,
"min": 0.505004010894774,
"max": 1.0290623143664561,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016136129894423198,
"min": 0.0015388093025537023,
"max": 0.01637205031909703,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24204194841634796,
"min": 0.016926902328090725,
"max": 0.24411679549181525,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.479637506820002e-06,
"min": 7.479637506820002e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011219456260230004,
"min": 0.00011219456260230004,
"max": 0.0034903297365567994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249318000000002,
"min": 0.10249318000000002,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373977000000003,
"min": 1.3886848,
"max": 2.5266442000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025906868200000014,
"min": 0.00025906868200000014,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003886030230000002,
"min": 0.003886030230000002,
"max": 0.11635797568,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012334185652434826,
"min": 0.012258809991180897,
"max": 0.6404990553855896,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1850127875804901,
"min": 0.1716233342885971,
"max": 4.483493328094482,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 244.64406779661016,
"min": 244.64406779661016,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28868.0,
"min": 15984.0,
"max": 33564.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.755355921210879,
"min": -1.0000000521540642,
"max": 1.755355921210879,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 207.13199870288372,
"min": -28.58000161498785,
"max": 207.13199870288372,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.755355921210879,
"min": -1.0000000521540642,
"max": 1.755355921210879,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 207.13199870288372,
"min": -28.58000161498785,
"max": 207.13199870288372,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.031063366807618384,
"min": 0.031063366807618384,
"max": 12.83260870166123,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.6654772832989693,
"min": 3.6654772832989693,
"max": 205.32173922657967,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1657918978",
"python_version": "3.6.9 (default, Mar 15 2022, 13:55:28) \n[GCC 8.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.18.5",
"end_time_seconds": "1657920813"
},
"total": 1835.534962362999,
"count": 1,
"self": 0.27018346799741266,
"children": {
"run_training.setup": {
"total": 0.014960827000322752,
"count": 1,
"self": 0.014960827000322752
},
"TrainerController.start_learning": {
"total": 1835.2498180680013,
"count": 1,
"self": 1.4931210819959233,
"children": {
"TrainerController._reset_env": {
"total": 1.7412118799984455,
"count": 1,
"self": 1.7412118799984455
},
"TrainerController.advance": {
"total": 1831.9644836230036,
"count": 64262,
"self": 1.3356513751932653,
"children": {
"env_step": {
"total": 1103.3998733392073,
"count": 64262,
"self": 1026.0440612346138,
"children": {
"SubprocessEnvManager._take_step": {
"total": 76.48616008808312,
"count": 64262,
"self": 3.511202939298528,
"children": {
"TorchPolicy.evaluate": {
"total": 72.9749571487846,
"count": 62574,
"self": 14.95561928774623,
"children": {
"TorchPolicy.sample_actions": {
"total": 58.019337861038366,
"count": 62574,
"self": 58.019337861038366
}
}
}
}
},
"workers": {
"total": 0.869652016510372,
"count": 64262,
"self": 0.0,
"children": {
"worker_root": {
"total": 1831.8139931432816,
"count": 64262,
"is_parallel": true,
"self": 895.3981945827618,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002930039001512341,
"count": 1,
"is_parallel": true,
"self": 0.0016086690011434257,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013213700003689155,
"count": 8,
"is_parallel": true,
"self": 0.0013213700003689155
}
}
},
"UnityEnvironment.step": {
"total": 0.032493468999746256,
"count": 1,
"is_parallel": true,
"self": 0.0002630399976624176,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045806999696651474,
"count": 1,
"is_parallel": true,
"self": 0.00045806999696651474
},
"communicator.exchange": {
"total": 0.030669170002511237,
"count": 1,
"is_parallel": true,
"self": 0.030669170002511237
},
"steps_from_proto": {
"total": 0.0011031890026060864,
"count": 1,
"is_parallel": true,
"self": 0.0002713490066525992,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008318399959534872,
"count": 8,
"is_parallel": true,
"self": 0.0008318399959534872
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 936.4157985605198,
"count": 64261,
"is_parallel": true,
"self": 16.50556306400904,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.00767444599478,
"count": 64261,
"is_parallel": true,
"self": 12.00767444599478
},
"communicator.exchange": {
"total": 843.9158501789898,
"count": 64261,
"is_parallel": true,
"self": 843.9158501789898
},
"steps_from_proto": {
"total": 63.98671087152616,
"count": 64261,
"is_parallel": true,
"self": 13.251265531023819,
"children": {
"_process_rank_one_or_two_observation": {
"total": 50.73544534050234,
"count": 514088,
"is_parallel": true,
"self": 50.73544534050234
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 727.2289589086031,
"count": 64262,
"self": 3.0074682592312456,
"children": {
"process_trajectory": {
"total": 127.86817325334778,
"count": 64262,
"self": 127.75923370034434,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10893955300343805,
"count": 2,
"self": 0.10893955300343805
}
}
},
"_update_policy": {
"total": 596.3533173960241,
"count": 453,
"self": 204.11661663311315,
"children": {
"TorchPPOOptimizer.update": {
"total": 392.2367007629109,
"count": 22812,
"self": 392.2367007629109
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.00002453615889e-07,
"count": 1,
"self": 8.00002453615889e-07
},
"TrainerController._save_models": {
"total": 0.05100068300089333,
"count": 1,
"self": 0.001038469999912195,
"children": {
"RLTrainer._checkpoint": {
"total": 0.04996221300098114,
"count": 1,
"self": 0.04996221300098114
}
}
}
}
}
}
}