{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.1564805954694748, "min": 0.14611369371414185, "max": 1.5017271041870117, "count": 100 }, "Pyramids.Policy.Entropy.sum": { "value": 4669.380859375, "min": 4402.11328125, "max": 45556.39453125, "count": 100 }, "Pyramids.Step.mean": { "value": 2999947.0, "min": 29952.0, "max": 2999947.0, "count": 100 }, "Pyramids.Step.sum": { "value": 2999947.0, "min": 29952.0, "max": 2999947.0, "count": 100 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.8258399963378906, "min": -0.057397373020648956, "max": 0.8956943154335022, "count": 100 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 246.10031127929688, "min": -13.947561264038086, "max": 279.013427734375, "count": 100 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.01756458356976509, "min": -0.05148947611451149, "max": 0.26041775941848755, "count": 100 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 5.234245777130127, "min": -14.88045883178711, "max": 62.76068115234375, "count": 100 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06569205751065073, "min": 0.0626093732350534, "max": 0.0754291667706056, "count": 100 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9196888051491102, "min": 0.48254923466472605, "max": 1.131437501559084, "count": 100 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.017662993425187713, "min": 0.001756493660329726, "max": 0.017662993425187713, "count": 100 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.247281907952628, "min": 0.02107792392395671, "max": 0.25602954596494476, "count": 100 }, "Pyramids.Policy.LearningRate.mean": { "value": 1.4611066558547655e-06, "min": 1.4611066558547655e-06, "max": 0.00029838354339596195, "count": 100 }, "Pyramids.Policy.LearningRate.sum": { "value": 2.0455493181966718e-05, "min": 2.0455493181966718e-05, "max": 0.003982332672555833, "count": 100 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10048700238095239, "min": 0.10048700238095239, "max": 0.19946118095238097, "count": 100 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4068180333333333, "min": 1.3962282666666668, "max": 2.827444166666667, "count": 100 }, "Pyramids.Policy.Beta.mean": { "value": 5.865153785714299e-05, "min": 5.865153785714299e-05, "max": 0.009946171977142856, "count": 100 }, "Pyramids.Policy.Beta.sum": { "value": 0.0008211215300000019, "min": 0.0008211215300000019, "max": 0.13276167225000002, "count": 100 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.007950475439429283, "min": 0.007531489245593548, "max": 0.4632280468940735, "count": 100 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.11130665242671967, "min": 0.10544084757566452, "max": 3.242596387863159, "count": 100 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 225.53237410071944, "min": 189.76470588235293, "max": 999.0, "count": 100 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 31349.0, "min": 15984.0, "max": 33337.0, "count": 100 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.7312863167348524, "min": -1.0000000521540642, "max": 1.8100723571291095, "count": 100 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 240.6487980261445, "min": -29.04480180889368, "max": 275.13099828362465, "count": 100 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.7312863167348524, "min": -1.0000000521540642, "max": 1.8100723571291095, "count": 100 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 240.6487980261445, "min": -29.04480180889368, "max": 275.13099828362465, "count": 100 }, "Pyramids.Policy.RndReward.mean": { "value": 0.018516810912083626, "min": 0.015585855099073486, "max": 9.534254821017385, "count": 100 }, "Pyramids.Policy.RndReward.sum": { "value": 2.573836716779624, "min": 2.295636750648555, "max": 152.54807713627815, "count": 100 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 100 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 100 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1709055303", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.2.1+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1709062713" }, "total": 7410.127723216, "count": 1, "self": 0.4757257640003445, "children": { "run_training.setup": { "total": 0.07739858500008268, "count": 1, "self": 0.07739858500008268 }, "TrainerController.start_learning": { "total": 7409.574598867, "count": 1, "self": 4.279242892873299, "children": { "TrainerController._reset_env": { "total": 2.4703196219998063, "count": 1, "self": 2.4703196219998063 }, "TrainerController.advance": { "total": 7402.735617617126, "count": 195851, "self": 4.355840733009245, "children": { "env_step": { "total": 5523.572027642125, "count": 195851, "self": 5135.083199738637, "children": { "SubprocessEnvManager._take_step": { "total": 385.93337601269764, "count": 195851, "self": 14.404279658809173, "children": { "TorchPolicy.evaluate": { "total": 371.52909635388846, "count": 187556, "self": 371.52909635388846 } } }, "workers": { "total": 2.555451890790664, "count": 195851, "self": 0.0, "children": { "worker_root": { "total": 7393.79000448414, "count": 195851, "is_parallel": true, "self": 2619.057318687066, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0020619059998807643, "count": 1, "is_parallel": true, "self": 0.0006067330000405491, "children": { "_process_rank_one_or_two_observation": { "total": 0.0014551729998402152, "count": 8, "is_parallel": true, "self": 0.0014551729998402152 } } }, "UnityEnvironment.step": { "total": 0.047563641000124335, "count": 1, "is_parallel": true, "self": 0.0005727400002797367, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0005059449999862409, "count": 1, "is_parallel": true, "self": 0.0005059449999862409 }, "communicator.exchange": { "total": 0.04483737400005339, "count": 1, "is_parallel": true, "self": 0.04483737400005339 }, "steps_from_proto": { "total": 0.0016475819998049701, "count": 1, "is_parallel": true, "self": 0.0003562829999737005, "children": { "_process_rank_one_or_two_observation": { "total": 0.0012912989998312696, "count": 8, "is_parallel": true, "self": 0.0012912989998312696 } } } } } } }, "UnityEnvironment.step": { "total": 4774.732685797074, "count": 195850, "is_parallel": true, "self": 104.87227943727612, "children": { "UnityEnvironment._generate_step_input": { "total": 75.799227560801, "count": 195850, "is_parallel": true, "self": 75.799227560801 }, "communicator.exchange": { "total": 4286.323968504949, "count": 195850, "is_parallel": true, "self": 4286.323968504949 }, "steps_from_proto": { "total": 307.7372102940474, "count": 195850, "is_parallel": true, "self": 62.529570978122365, "children": { "_process_rank_one_or_two_observation": { "total": 245.207639315925, "count": 1566800, "is_parallel": true, "self": 245.207639315925 } } } } } } } } } } }, "trainer_advance": { "total": 1874.8077492419916, "count": 195851, "self": 8.33212519414974, "children": { "process_trajectory": { "total": 393.3416029658597, "count": 195851, "self": 392.80233537586037, "children": { "RLTrainer._checkpoint": { "total": 0.5392675899993264, "count": 6, "self": 0.5392675899993264 } } }, "_update_policy": { "total": 1473.1340210819822, "count": 1402, "self": 870.8290643870023, "children": { "TorchPPOOptimizer.update": { "total": 602.3049566949799, "count": 68376, "self": 602.3049566949799 } } } } } } }, "trainer_threads": { "total": 1.010001142276451e-06, "count": 1, "self": 1.010001142276451e-06 }, "TrainerController._save_models": { "total": 0.08941772499929357, "count": 1, "self": 0.001957757000127458, "children": { "RLTrainer._checkpoint": { "total": 0.08745996799916611, "count": 1, "self": 0.08745996799916611 } } } } } } }