ppo-PyramidsRND / run_logs /timers.json
apparition's picture
first push
fd668ff
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3753083050251007,
"min": 0.3742251396179199,
"max": 1.4061357975006104,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11193.1953125,
"min": 11193.1953125,
"max": 42656.53515625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989929.0,
"min": 29952.0,
"max": 989929.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989929.0,
"min": 29952.0,
"max": 989929.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5414407253265381,
"min": -0.09649653732776642,
"max": 0.5734546184539795,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 149.43763732910156,
"min": -23.255664825439453,
"max": 159.04246520996094,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.04994254186749458,
"min": 0.007526482455432415,
"max": 0.39979198575019836,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 13.784141540527344,
"min": 2.054729700088501,
"max": 94.75070190429688,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06867265075765398,
"min": 0.06426482251380618,
"max": 0.0750999800949627,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9614171106071558,
"min": 0.525699860664739,
"max": 1.029808779704777,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01684641740280376,
"min": 0.0001425578009271049,
"max": 0.01700807027473096,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23584984363925268,
"min": 0.0019958092129794686,
"max": 0.2551210541209644,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.431183237257143e-06,
"min": 7.431183237257143e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001040365653216,
"min": 0.0001040365653216,
"max": 0.0032557725147426,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024770285714286,
"min": 0.1024770285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346784000000004,
"min": 1.3886848,
"max": 2.4852574,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002574551542857143,
"min": 0.0002574551542857143,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003604372160000001,
"min": 0.003604372160000001,
"max": 0.10855721425999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012062369845807552,
"min": 0.012062369845807552,
"max": 0.4544130861759186,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16887317597866058,
"min": 0.16887317597866058,
"max": 3.180891513824463,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 335.72340425531917,
"min": 313.6875,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31558.0,
"min": 15984.0,
"max": 32709.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.559852612881284,
"min": -1.0000000521540642,
"max": 1.626896606904737,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 148.18599822372198,
"min": -31.991601645946503,
"max": 153.88359827548265,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.559852612881284,
"min": -1.0000000521540642,
"max": 1.626896606904737,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 148.18599822372198,
"min": -31.991601645946503,
"max": 153.88359827548265,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.041910821603347644,
"min": 0.04123519737368042,
"max": 9.349706057459116,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.9815280523180263,
"min": 3.6257154021877795,
"max": 149.59529691934586,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679208034",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679210197"
},
"total": 2163.0749941020003,
"count": 1,
"self": 0.576831370000491,
"children": {
"run_training.setup": {
"total": 0.1444848619998993,
"count": 1,
"self": 0.1444848619998993
},
"TrainerController.start_learning": {
"total": 2162.35367787,
"count": 1,
"self": 1.3523301169857405,
"children": {
"TrainerController._reset_env": {
"total": 5.835542792000069,
"count": 1,
"self": 5.835542792000069
},
"TrainerController.advance": {
"total": 2155.0730432870146,
"count": 63849,
"self": 1.493470893049107,
"children": {
"env_step": {
"total": 1521.9090362130285,
"count": 63849,
"self": 1410.2034311710581,
"children": {
"SubprocessEnvManager._take_step": {
"total": 110.87739392302524,
"count": 63849,
"self": 4.887321660017051,
"children": {
"TorchPolicy.evaluate": {
"total": 105.99007226300819,
"count": 62563,
"self": 105.99007226300819
}
}
},
"workers": {
"total": 0.8282111189450916,
"count": 63849,
"self": 0.0,
"children": {
"worker_root": {
"total": 2157.242933940994,
"count": 63849,
"is_parallel": true,
"self": 865.6832500049666,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019050760001846356,
"count": 1,
"is_parallel": true,
"self": 0.0006332310003926978,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012718449997919379,
"count": 8,
"is_parallel": true,
"self": 0.0012718449997919379
}
}
},
"UnityEnvironment.step": {
"total": 0.07155778499986809,
"count": 1,
"is_parallel": true,
"self": 0.0005572069997015205,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004888040000423644,
"count": 1,
"is_parallel": true,
"self": 0.0004888040000423644
},
"communicator.exchange": {
"total": 0.06875574700006837,
"count": 1,
"is_parallel": true,
"self": 0.06875574700006837
},
"steps_from_proto": {
"total": 0.0017560270000558376,
"count": 1,
"is_parallel": true,
"self": 0.00042490600003475265,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001331121000021085,
"count": 8,
"is_parallel": true,
"self": 0.001331121000021085
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1291.5596839360276,
"count": 63848,
"is_parallel": true,
"self": 31.154320796015554,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.82151703499403,
"count": 63848,
"is_parallel": true,
"self": 23.82151703499403
},
"communicator.exchange": {
"total": 1140.9575612299914,
"count": 63848,
"is_parallel": true,
"self": 1140.9575612299914
},
"steps_from_proto": {
"total": 95.62628487502661,
"count": 63848,
"is_parallel": true,
"self": 20.4890751201242,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.13720975490241,
"count": 510784,
"is_parallel": true,
"self": 75.13720975490241
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 631.6705361809372,
"count": 63849,
"self": 2.513996289980014,
"children": {
"process_trajectory": {
"total": 121.47925874595103,
"count": 63849,
"self": 121.26979283495075,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20946591100027945,
"count": 2,
"self": 0.20946591100027945
}
}
},
"_update_policy": {
"total": 507.6772811450062,
"count": 443,
"self": 323.41075435101516,
"children": {
"TorchPPOOptimizer.update": {
"total": 184.26652679399103,
"count": 22869,
"self": 184.26652679399103
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.389997674385086e-07,
"count": 1,
"self": 8.389997674385086e-07
},
"TrainerController._save_models": {
"total": 0.09276083499980814,
"count": 1,
"self": 0.0013950509996902838,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09136578400011786,
"count": 1,
"self": 0.09136578400011786
}
}
}
}
}
}
}