ppo-Pyramids / run_logs /timers.json
patonw's picture
Baseline
f89b325
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.18317589163780212,
"min": 0.16339467465877533,
"max": 1.4302423000335693,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 5471.830078125,
"min": 4920.140625,
"max": 43387.83203125,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999902.0,
"min": 29952.0,
"max": 2999902.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999902.0,
"min": 29952.0,
"max": 2999902.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7723481059074402,
"min": -0.10346413403749466,
"max": 0.8570897579193115,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 228.6150360107422,
"min": -24.934856414794922,
"max": 261.4123840332031,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.013043629005551338,
"min": 0.0030720990616828203,
"max": 0.43157798051834106,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.8609142303466797,
"min": 0.8509714603424072,
"max": 102.28398132324219,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07008140640035788,
"min": 0.06315222284896821,
"max": 0.07610215950602045,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0512210960053683,
"min": 0.49090916727854866,
"max": 1.1415323925903067,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015848874939709073,
"min": 0.00013317596300608172,
"max": 0.01689037030102315,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2377331240956361,
"min": 0.0015981115560729808,
"max": 0.24140345549676567,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.487032837688891e-06,
"min": 1.487032837688891e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.2305492565333364e-05,
"min": 2.2305492565333364e-05,
"max": 0.003759455746848133,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10049564444444443,
"min": 0.10049564444444443,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5074346666666665,
"min": 1.3962282666666668,
"max": 2.7225589000000006,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.9514880000000075e-05,
"min": 5.9514880000000075e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008927232000000011,
"min": 0.0008927232000000011,
"max": 0.12532987148000002,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006677953992038965,
"min": 0.006240218877792358,
"max": 0.5930566191673279,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10016930848360062,
"min": 0.08736306428909302,
"max": 4.15139627456665,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 252.2991452991453,
"min": 218.91489361702128,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29519.0,
"min": 15984.0,
"max": 32769.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.713505968833581,
"min": -1.0000000521540642,
"max": 1.7808521017432213,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 200.48019835352898,
"min": -31.99640166759491,
"max": 252.88099844753742,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.713505968833581,
"min": -1.0000000521540642,
"max": 1.7808521017432213,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 200.48019835352898,
"min": -31.99640166759491,
"max": 252.88099844753742,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.017547774691538662,
"min": 0.014787722639880485,
"max": 11.427808299660683,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.0530896389100235,
"min": 1.892828497904702,
"max": 182.84493279457092,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691544180",
"python_version": "3.9.0 | packaged by conda-forge | (default, Nov 26 2020, 07:57:39) \n[GCC 9.3.0]",
"command_line_arguments": "/home/patonw/code/Unity/ml-agents/.mamba/envs/my-mamba-environment/bin/mlagents-learn config/ppo/PyramidsRND.yaml --env envs/Pyramids/Pyramids --run-id Pyramids-02 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu113",
"numpy_version": "1.21.2",
"end_time_seconds": "1691546848"
},
"total": 2667.2387524330006,
"count": 1,
"self": 0.21785052700033702,
"children": {
"run_training.setup": {
"total": 0.009867504999419907,
"count": 1,
"self": 0.009867504999419907
},
"TrainerController.start_learning": {
"total": 2667.011034401001,
"count": 1,
"self": 1.9650581697751477,
"children": {
"TrainerController._reset_env": {
"total": 2.850871491000362,
"count": 1,
"self": 2.850871491000362
},
"TrainerController.advance": {
"total": 2662.1478119632247,
"count": 194784,
"self": 1.9136386515147024,
"children": {
"env_step": {
"total": 1692.5154466116,
"count": 194784,
"self": 1486.4000926393492,
"children": {
"SubprocessEnvManager._take_step": {
"total": 204.85600924948994,
"count": 194784,
"self": 5.960243603010895,
"children": {
"TorchPolicy.evaluate": {
"total": 198.89576564647905,
"count": 187549,
"self": 198.89576564647905
}
}
},
"workers": {
"total": 1.2593447227609431,
"count": 194784,
"self": 0.0,
"children": {
"worker_root": {
"total": 2664.1007865956126,
"count": 194784,
"is_parallel": true,
"self": 1308.7857927869882,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007852920007280773,
"count": 1,
"is_parallel": true,
"self": 0.00023520399918197654,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005500880015461007,
"count": 8,
"is_parallel": true,
"self": 0.0005500880015461007
}
}
},
"UnityEnvironment.step": {
"total": 0.017291787000431214,
"count": 1,
"is_parallel": true,
"self": 0.00021072799972898792,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020643900097638834,
"count": 1,
"is_parallel": true,
"self": 0.00020643900097638834
},
"communicator.exchange": {
"total": 0.016191912000067532,
"count": 1,
"is_parallel": true,
"self": 0.016191912000067532
},
"steps_from_proto": {
"total": 0.0006827079996583052,
"count": 1,
"is_parallel": true,
"self": 0.000171674002558575,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005110339970997302,
"count": 8,
"is_parallel": true,
"self": 0.0005110339970997302
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1355.3149938086244,
"count": 194783,
"is_parallel": true,
"self": 34.34300277039074,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.092716686138374,
"count": 194783,
"is_parallel": true,
"self": 23.092716686138374
},
"communicator.exchange": {
"total": 1198.34512067606,
"count": 194783,
"is_parallel": true,
"self": 1198.34512067606
},
"steps_from_proto": {
"total": 99.5341536760352,
"count": 194783,
"is_parallel": true,
"self": 21.942711345516727,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.59144233051848,
"count": 1558264,
"is_parallel": true,
"self": 77.59144233051848
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 967.71872670011,
"count": 194784,
"self": 3.6993494265625486,
"children": {
"process_trajectory": {
"total": 175.21312881762788,
"count": 194784,
"self": 174.86641048962883,
"children": {
"RLTrainer._checkpoint": {
"total": 0.34671832799904223,
"count": 6,
"self": 0.34671832799904223
}
}
},
"_update_policy": {
"total": 788.8062484559196,
"count": 1389,
"self": 478.2296473118313,
"children": {
"TorchPPOOptimizer.update": {
"total": 310.5766011440883,
"count": 68391,
"self": 310.5766011440883
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.9100057114847e-07,
"count": 1,
"self": 5.9100057114847e-07
},
"TrainerController._save_models": {
"total": 0.047292186000049696,
"count": 1,
"self": 0.0007333229987125378,
"children": {
"RLTrainer._checkpoint": {
"total": 0.04655886300133716,
"count": 1,
"self": 0.04655886300133716
}
}
}
}
}
}
}