ankandrew's picture
First train
73c058e
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.36647021770477295,
"min": 0.36647021770477295,
"max": 1.4333029985427856,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10953.0615234375,
"min": 10953.0615234375,
"max": 43480.6796875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989968.0,
"min": 29952.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989968.0,
"min": 29952.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5184723138809204,
"min": -0.12650656700134277,
"max": 0.591368556022644,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 142.06141662597656,
"min": -30.03253746032715,
"max": 165.58319091796875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.008220351301133633,
"min": 0.0002671023830771446,
"max": 0.3495032787322998,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.2523763179779053,
"min": 0.07238474488258362,
"max": 82.832275390625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07107717379257025,
"min": 0.06360095677256072,
"max": 0.07660879743250495,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9950804330959835,
"min": 0.5214710641376695,
"max": 1.0476803669589572,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014276639248993828,
"min": 0.0005500951116110972,
"max": 0.01679616637522399,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19987294948591358,
"min": 0.004400760892888778,
"max": 0.23514632925313583,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.643733166407144e-06,
"min": 7.643733166407144e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010701226432970001,
"min": 0.00010701226432970001,
"max": 0.0035072891309036996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254787857142857,
"min": 0.10254787857142857,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356703,
"min": 1.3691136000000002,
"max": 2.5690963000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026453306928571444,
"min": 0.00026453306928571444,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003703462970000002,
"min": 0.003703462970000002,
"max": 0.11693272037,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.02067652903497219,
"min": 0.02067652903497219,
"max": 0.6578234434127808,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.28947141766548157,
"min": 0.28947141766548157,
"max": 4.604763984680176,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 364.5813953488372,
"min": 301.6458333333333,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31354.0,
"min": 15984.0,
"max": 34138.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5153317375218167,
"min": -1.0000000521540642,
"max": 1.677514567039907,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 128.80319768935442,
"min": -32.000001668930054,
"max": 161.04139843583107,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5153317375218167,
"min": -1.0000000521540642,
"max": 1.677514567039907,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 128.80319768935442,
"min": -32.000001668930054,
"max": 161.04139843583107,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08078804624349098,
"min": 0.0744586649446622,
"max": 14.769344542175531,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 6.866983930696733,
"min": 6.339833421734511,
"max": 236.3095126748085,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679577951",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679580160"
},
"total": 2208.871212098,
"count": 1,
"self": 0.8485184239998489,
"children": {
"run_training.setup": {
"total": 0.10528656399992542,
"count": 1,
"self": 0.10528656399992542
},
"TrainerController.start_learning": {
"total": 2207.91740711,
"count": 1,
"self": 1.3818655550185213,
"children": {
"TrainerController._reset_env": {
"total": 7.41780844699997,
"count": 1,
"self": 7.41780844699997
},
"TrainerController.advance": {
"total": 2198.972193064982,
"count": 63836,
"self": 1.4667232049910126,
"children": {
"env_step": {
"total": 1577.9283232659345,
"count": 63836,
"self": 1469.7489583289462,
"children": {
"SubprocessEnvManager._take_step": {
"total": 107.34195967904407,
"count": 63836,
"self": 4.748975744037125,
"children": {
"TorchPolicy.evaluate": {
"total": 102.59298393500694,
"count": 62566,
"self": 102.59298393500694
}
}
},
"workers": {
"total": 0.8374052579442832,
"count": 63836,
"self": 0.0,
"children": {
"worker_root": {
"total": 2203.1931559489335,
"count": 63836,
"is_parallel": true,
"self": 847.6841484380107,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020234939997862966,
"count": 1,
"is_parallel": true,
"self": 0.0006022300003678538,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014212639994184428,
"count": 8,
"is_parallel": true,
"self": 0.0014212639994184428
}
}
},
"UnityEnvironment.step": {
"total": 0.0487943749999431,
"count": 1,
"is_parallel": true,
"self": 0.0005744770001001598,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045666999994864454,
"count": 1,
"is_parallel": true,
"self": 0.00045666999994864454
},
"communicator.exchange": {
"total": 0.04602199800001472,
"count": 1,
"is_parallel": true,
"self": 0.04602199800001472
},
"steps_from_proto": {
"total": 0.0017412299998795788,
"count": 1,
"is_parallel": true,
"self": 0.0003821550003522134,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013590749995273654,
"count": 8,
"is_parallel": true,
"self": 0.0013590749995273654
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1355.5090075109229,
"count": 63835,
"is_parallel": true,
"self": 31.58585958495587,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.736510561978776,
"count": 63835,
"is_parallel": true,
"self": 22.736510561978776
},
"communicator.exchange": {
"total": 1207.3890739090466,
"count": 63835,
"is_parallel": true,
"self": 1207.3890739090466
},
"steps_from_proto": {
"total": 93.79756345494161,
"count": 63835,
"is_parallel": true,
"self": 19.80421265876612,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.99335079617549,
"count": 510680,
"is_parallel": true,
"self": 73.99335079617549
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 619.5771465940566,
"count": 63836,
"self": 2.5508085290966847,
"children": {
"process_trajectory": {
"total": 116.77466541297235,
"count": 63836,
"self": 116.4626436919716,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31202172100074677,
"count": 2,
"self": 0.31202172100074677
}
}
},
"_update_policy": {
"total": 500.2516726519875,
"count": 443,
"self": 317.94097543490807,
"children": {
"TorchPPOOptimizer.update": {
"total": 182.31069721707945,
"count": 22821,
"self": 182.31069721707945
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4979996194597334e-06,
"count": 1,
"self": 1.4979996194597334e-06
},
"TrainerController._save_models": {
"total": 0.14553854499990848,
"count": 1,
"self": 0.0019431959999565152,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14359534899995197,
"count": 1,
"self": 0.14359534899995197
}
}
}
}
}
}
}