testpyramidsrnd / run_logs /timers.json
damilare-akin's picture
First Pyramids
adceeca
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7710626125335693,
"min": 0.7636110782623291,
"max": 1.4190030097961426,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 23242.912109375,
"min": 22871.6796875,
"max": 43046.875,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479909.0,
"min": 29952.0,
"max": 479909.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479909.0,
"min": 29952.0,
"max": 479909.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.026242196559906006,
"min": -0.10788119584321976,
"max": 0.06599686294794083,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -6.350611686706543,
"min": -25.89148712158203,
"max": 15.641257286071777,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01751587726175785,
"min": 0.01751587726175785,
"max": 0.3308188021183014,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.238842487335205,
"min": 4.238842487335205,
"max": 78.404052734375,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06597989980991133,
"min": 0.06597989980991133,
"max": 0.07288810912684891,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9237185973387585,
"min": 0.497877500437457,
"max": 1.0204335277758847,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0030782934719381415,
"min": 0.0002446024757923609,
"max": 0.00469527134312412,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.04309610860713398,
"min": 0.0026906272337159694,
"max": 0.05655404930065938,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.1001464428114284e-05,
"min": 2.1001464428114284e-05,
"max": 0.00029030126037577137,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0002940205019936,
"min": 0.0002940205019936,
"max": 0.0028111951629350004,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10700045714285714,
"min": 0.10700045714285714,
"max": 0.19676708571428575,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4980064,
"min": 1.3773696000000002,
"max": 2.1738472000000004,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0007093456685714286,
"min": 0.0007093456685714286,
"max": 0.00967703186285714,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00993083936,
"min": 0.00993083936,
"max": 0.09372279350000001,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01546267606317997,
"min": 0.01546267606317997,
"max": 0.30330702662467957,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.21647746860980988,
"min": 0.21647746860980988,
"max": 2.1231491565704346,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 940.8529411764706,
"min": 922.6470588235294,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31989.0,
"min": 15984.0,
"max": 33263.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.51546065102924,
"min": -1.0000000521540642,
"max": -0.3349529837860781,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -17.01020148396492,
"min": -31.99920167028904,
"max": -11.388401448726654,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.51546065102924,
"min": -1.0000000521540642,
"max": -0.3349529837860781,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -17.01020148396492,
"min": -31.99920167028904,
"max": -11.388401448726654,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.15026833643346574,
"min": 0.15026833643346574,
"max": 5.647617283277214,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.958855102304369,
"min": 4.958855102304369,
"max": 90.36187653243542,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1666024305",
"python_version": "3.7.14 (default, Sep 8 2022, 00:06:44) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1666025214"
},
"total": 909.0874928950002,
"count": 1,
"self": 0.42621377200021016,
"children": {
"run_training.setup": {
"total": 0.03377734000014243,
"count": 1,
"self": 0.03377734000014243
},
"TrainerController.start_learning": {
"total": 908.6275017829998,
"count": 1,
"self": 0.6061280190219804,
"children": {
"TrainerController._reset_env": {
"total": 6.295468884000002,
"count": 1,
"self": 6.295468884000002
},
"TrainerController.advance": {
"total": 901.6322487899788,
"count": 31489,
"self": 0.6583880920102274,
"children": {
"env_step": {
"total": 563.485305668914,
"count": 31489,
"self": 512.2381084108747,
"children": {
"SubprocessEnvManager._take_step": {
"total": 50.911898687139455,
"count": 31489,
"self": 2.1852822551595636,
"children": {
"TorchPolicy.evaluate": {
"total": 48.72661643197989,
"count": 31317,
"self": 16.732239625009242,
"children": {
"TorchPolicy.sample_actions": {
"total": 31.99437680697065,
"count": 31317,
"self": 31.99437680697065
}
}
}
}
},
"workers": {
"total": 0.33529857089979487,
"count": 31489,
"self": 0.0,
"children": {
"worker_root": {
"total": 906.9020868020166,
"count": 31489,
"is_parallel": true,
"self": 442.17656626107464,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001854775000083464,
"count": 1,
"is_parallel": true,
"self": 0.000694865000241407,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001159909999842057,
"count": 8,
"is_parallel": true,
"self": 0.001159909999842057
}
}
},
"UnityEnvironment.step": {
"total": 0.049217476999729115,
"count": 1,
"is_parallel": true,
"self": 0.0005143329995007662,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047858399966571596,
"count": 1,
"is_parallel": true,
"self": 0.00047858399966571596
},
"communicator.exchange": {
"total": 0.04668448600023112,
"count": 1,
"is_parallel": true,
"self": 0.04668448600023112
},
"steps_from_proto": {
"total": 0.001540074000331515,
"count": 1,
"is_parallel": true,
"self": 0.00039195499948618817,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011481190008453268,
"count": 8,
"is_parallel": true,
"self": 0.0011481190008453268
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 464.72552054094194,
"count": 31488,
"is_parallel": true,
"self": 13.522045822973723,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.208313581973925,
"count": 31488,
"is_parallel": true,
"self": 11.208313581973925
},
"communicator.exchange": {
"total": 395.7148790118986,
"count": 31488,
"is_parallel": true,
"self": 395.7148790118986
},
"steps_from_proto": {
"total": 44.28028212409572,
"count": 31488,
"is_parallel": true,
"self": 10.889729428132341,
"children": {
"_process_rank_one_or_two_observation": {
"total": 33.39055269596338,
"count": 251904,
"is_parallel": true,
"self": 33.39055269596338
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 337.4885550290546,
"count": 31489,
"self": 0.952663256033702,
"children": {
"process_trajectory": {
"total": 74.08651069901407,
"count": 31489,
"self": 73.97423412001444,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11227657899962651,
"count": 1,
"self": 0.11227657899962651
}
}
},
"_update_policy": {
"total": 262.44938107400685,
"count": 207,
"self": 102.6062426110193,
"children": {
"TorchPPOOptimizer.update": {
"total": 159.84313846298755,
"count": 11475,
"self": 159.84313846298755
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2239997886354104e-06,
"count": 1,
"self": 1.2239997886354104e-06
},
"TrainerController._save_models": {
"total": 0.09365486599926953,
"count": 1,
"self": 0.001981650999368867,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09167321499990067,
"count": 1,
"self": 0.09167321499990067
}
}
}
}
}
}
}