Pyramids / run_logs /timers.json
aa-unh's picture
First Push
d6b8e97 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8099809885025024,
"min": 0.5570735335350037,
"max": 1.4301304817199707,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 24260.55078125,
"min": 16640.900390625,
"max": 43384.4375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989895.0,
"min": 29952.0,
"max": 989895.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989895.0,
"min": 29952.0,
"max": 989895.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07085061073303223,
"min": -0.09702851623296738,
"max": 0.3550646901130676,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -17.074996948242188,
"min": -23.28684425354004,
"max": 95.15733337402344,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.2467518150806427,
"min": -4.149189472198486,
"max": 2.8301491737365723,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -59.467185974121094,
"min": -1045.595703125,
"max": 758.47998046875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06598078415429752,
"min": 0.0638285303618802,
"max": 0.07223936065394596,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9237309781601654,
"min": 0.46930003225075445,
"max": 1.0345382941921022,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.1563212539409246,
"min": 0.00043586424553891255,
"max": 7.5306195433574326,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 2.1884975551729444,
"min": 0.004794506700928038,
"max": 105.42867360700406,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.391218964864288e-06,
"min": 7.391218964864288e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010347706550810003,
"min": 0.00010347706550810003,
"max": 0.0031413215528929006,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246370714285714,
"min": 0.10246370714285714,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4344919,
"min": 1.3691136000000002,
"max": 2.3471071,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002561243435714287,
"min": 0.0002561243435714287,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035857408100000016,
"min": 0.0035857408100000016,
"max": 0.10473599928999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.003970048855990171,
"min": 0.003537293989211321,
"max": 0.39252251386642456,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.055580683052539825,
"min": 0.04952211678028107,
"max": 2.747657537460327,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 946.1935483870968,
"min": 461.1666666666667,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29332.0,
"min": 15984.0,
"max": 33175.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.7532000508039228,
"min": -1.0000000521540642,
"max": 1.2472872419790788,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -23.349201574921608,
"min": -32.000001668930054,
"max": 81.55679872632027,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.7532000508039228,
"min": -1.0000000521540642,
"max": 1.2472872419790788,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -23.349201574921608,
"min": -32.000001668930054,
"max": 81.55679872632027,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03745276443594541,
"min": 0.03745276443594541,
"max": 7.801773116923869,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.1610356975143077,
"min": 1.1610356975143077,
"max": 124.8283698707819,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710584248",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1710586542"
},
"total": 2293.355035107,
"count": 1,
"self": 0.4967895539998608,
"children": {
"run_training.setup": {
"total": 0.05331629900001644,
"count": 1,
"self": 0.05331629900001644
},
"TrainerController.start_learning": {
"total": 2292.804929254,
"count": 1,
"self": 1.289001316991289,
"children": {
"TrainerController._reset_env": {
"total": 2.695360414000106,
"count": 1,
"self": 2.695360414000106
},
"TrainerController.advance": {
"total": 2288.723647578009,
"count": 63316,
"self": 1.3834439219999695,
"children": {
"env_step": {
"total": 1671.1591172130406,
"count": 63316,
"self": 1537.7376715890396,
"children": {
"SubprocessEnvManager._take_step": {
"total": 132.60257589397315,
"count": 63316,
"self": 4.692079314982948,
"children": {
"TorchPolicy.evaluate": {
"total": 127.9104965789902,
"count": 62567,
"self": 127.9104965789902
}
}
},
"workers": {
"total": 0.8188697300279273,
"count": 63316,
"self": 0.0,
"children": {
"worker_root": {
"total": 2287.521404177975,
"count": 63316,
"is_parallel": true,
"self": 873.3598938329621,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0029830769999534823,
"count": 1,
"is_parallel": true,
"self": 0.000979865000317659,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020032119996358233,
"count": 8,
"is_parallel": true,
"self": 0.0020032119996358233
}
}
},
"UnityEnvironment.step": {
"total": 0.04978881199986063,
"count": 1,
"is_parallel": true,
"self": 0.0007020989999091398,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000527425999962361,
"count": 1,
"is_parallel": true,
"self": 0.000527425999962361
},
"communicator.exchange": {
"total": 0.046488168999985646,
"count": 1,
"is_parallel": true,
"self": 0.046488168999985646
},
"steps_from_proto": {
"total": 0.00207111800000348,
"count": 1,
"is_parallel": true,
"self": 0.00040667999951438105,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001664438000489099,
"count": 8,
"is_parallel": true,
"self": 0.001664438000489099
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1414.1615103450129,
"count": 63315,
"is_parallel": true,
"self": 34.747392043035234,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.14877576398908,
"count": 63315,
"is_parallel": true,
"self": 25.14877576398908
},
"communicator.exchange": {
"total": 1251.8785185069771,
"count": 63315,
"is_parallel": true,
"self": 1251.8785185069771
},
"steps_from_proto": {
"total": 102.38682403101143,
"count": 63315,
"is_parallel": true,
"self": 20.542862439915098,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.84396159109633,
"count": 506520,
"is_parallel": true,
"self": 81.84396159109633
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 616.1810864429683,
"count": 63316,
"self": 2.4923835849785974,
"children": {
"process_trajectory": {
"total": 124.44263754999133,
"count": 63316,
"self": 124.23215826499131,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21047928500001944,
"count": 2,
"self": 0.21047928500001944
}
}
},
"_update_policy": {
"total": 489.2460653079984,
"count": 435,
"self": 288.1082502690058,
"children": {
"TorchPPOOptimizer.update": {
"total": 201.13781503899258,
"count": 22857,
"self": 201.13781503899258
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5069999790284783e-06,
"count": 1,
"self": 1.5069999790284783e-06
},
"TrainerController._save_models": {
"total": 0.09691843799964772,
"count": 1,
"self": 0.0016929989997152006,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09522543899993252,
"count": 1,
"self": 0.09522543899993252
}
}
}
}
}
}
}