YoanG's picture
Pyramids Training
cedb59f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3001975417137146,
"min": 0.2905435562133789,
"max": 0.40185609459877014,
"count": 7
},
"Pyramids.Policy.Entropy.sum": {
"value": 9058.7607421875,
"min": 8651.224609375,
"max": 11241.087890625,
"count": 7
},
"Pyramids.Step.mean": {
"value": 989926.0,
"min": 809960.0,
"max": 989926.0,
"count": 7
},
"Pyramids.Step.sum": {
"value": 989926.0,
"min": 809960.0,
"max": 989926.0,
"count": 7
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4600345492362976,
"min": 0.40502262115478516,
"max": 0.5170003771781921,
"count": 7
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 126.96953582763672,
"min": 108.14103698730469,
"max": 143.2091064453125,
"count": 7
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.030832238495349884,
"min": -0.012912614271044731,
"max": 0.030832238495349884,
"count": 7
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 8.509697914123535,
"min": -3.5122311115264893,
"max": 8.509697914123535,
"count": 7
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 351.64285714285717,
"min": 351.64285714285717,
"max": 415.81944444444446,
"count": 7
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29538.0,
"min": 23555.0,
"max": 30950.0,
"count": 7
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.553083317443019,
"min": 1.389947343029474,
"max": 1.5703325310592042,
"count": 7
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 130.45899866521358,
"min": 98.75919900089502,
"max": 135.04859767109156,
"count": 7
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.553083317443019,
"min": 1.389947343029474,
"max": 1.5703325310592042,
"count": 7
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 130.45899866521358,
"min": 98.75919900089502,
"max": 135.04859767109156,
"count": 7
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03744134823743176,
"min": 0.03744134823743176,
"max": 0.04378624255760981,
"count": 7
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.145073251944268,
"min": 2.5631008547206875,
"max": 3.302659035194665,
"count": 7
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06901744256416957,
"min": 0.06521061695982805,
"max": 0.06981680575887515,
"count": 7
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.966244195898374,
"min": 0.8023969015824451,
"max": 0.9781592543974207,
"count": 7
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.018192482356209353,
"min": 0.014982802643418029,
"max": 0.018192482356209353,
"count": 7
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.25469475298693095,
"min": 0.17979363172101634,
"max": 0.25469475298693095,
"count": 7
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.51439035237857e-06,
"min": 7.51439035237857e-06,
"max": 6.065330478225833e-05,
"count": 7
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010520146493329997,
"min": 0.00010520146493329997,
"max": 0.0007334531555159999,
"count": 7
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250476428571428,
"min": 0.10250476428571428,
"max": 0.12021774166666667,
"count": 7
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350667,
"min": 1.4350667,
"max": 1.6444839999999998,
"count": 7
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002602259521428571,
"min": 0.0002602259521428571,
"max": 0.0020297523925,
"count": 7
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036431633299999996,
"min": 0.0036431633299999996,
"max": 0.0245639516,
"count": 7
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010069741867482662,
"min": 0.009762607514858246,
"max": 0.010430343449115753,
"count": 7
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14097638428211212,
"min": 0.12516412138938904,
"max": 0.15222415328025818,
"count": 7
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679614963",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679615772"
},
"total": 809.0315722919995,
"count": 1,
"self": 0.6313388959993063,
"children": {
"run_training.setup": {
"total": 0.13723234999997658,
"count": 1,
"self": 0.13723234999997658
},
"TrainerController.start_learning": {
"total": 808.2630010460002,
"count": 1,
"self": 0.5253342729747601,
"children": {
"TrainerController._reset_env": {
"total": 4.500169143000221,
"count": 1,
"self": 4.500169143000221
},
"TrainerController.advance": {
"total": 803.1139770020254,
"count": 13998,
"self": 0.5389865869374262,
"children": {
"env_step": {
"total": 544.9573525120718,
"count": 13998,
"self": 514.3701943310189,
"children": {
"SubprocessEnvManager._take_step": {
"total": 30.265318044025207,
"count": 13998,
"self": 1.5033554050160092,
"children": {
"TorchPolicy.evaluate": {
"total": 28.761962639009198,
"count": 13558,
"self": 28.761962639009198
}
}
},
"workers": {
"total": 0.32184013702772063,
"count": 13998,
"self": 0.0,
"children": {
"worker_root": {
"total": 806.4989569350123,
"count": 13998,
"is_parallel": true,
"self": 332.2817186190314,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002066731000013533,
"count": 1,
"is_parallel": true,
"self": 0.0006390210000972729,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014277099999162601,
"count": 8,
"is_parallel": true,
"self": 0.0014277099999162601
}
}
},
"UnityEnvironment.step": {
"total": 0.06863061900003231,
"count": 1,
"is_parallel": true,
"self": 0.0006398000000444881,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005083780001768901,
"count": 1,
"is_parallel": true,
"self": 0.0005083780001768901
},
"communicator.exchange": {
"total": 0.06531844599976466,
"count": 1,
"is_parallel": true,
"self": 0.06531844599976466
},
"steps_from_proto": {
"total": 0.0021639950000462704,
"count": 1,
"is_parallel": true,
"self": 0.00044409099928088835,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001719904000765382,
"count": 8,
"is_parallel": true,
"self": 0.001719904000765382
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 474.2172383159809,
"count": 13997,
"is_parallel": true,
"self": 9.655909960993995,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.832029992981006,
"count": 13997,
"is_parallel": true,
"self": 5.832029992981006
},
"communicator.exchange": {
"total": 429.54272611801525,
"count": 13997,
"is_parallel": true,
"self": 429.54272611801525
},
"steps_from_proto": {
"total": 29.186572243990668,
"count": 13997,
"is_parallel": true,
"self": 6.725139010022303,
"children": {
"_process_rank_one_or_two_observation": {
"total": 22.461433233968364,
"count": 111976,
"is_parallel": true,
"self": 22.461433233968364
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 257.6176379030162,
"count": 13998,
"self": 1.066046331015059,
"children": {
"process_trajectory": {
"total": 35.85074399499808,
"count": 13998,
"self": 35.72269414799757,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12804984700051136,
"count": 1,
"self": 0.12804984700051136
}
}
},
"_update_policy": {
"total": 220.70084757700306,
"count": 102,
"self": 90.52184287402224,
"children": {
"TorchPPOOptimizer.update": {
"total": 130.17900470298082,
"count": 4908,
"self": 130.17900470298082
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1889997040270828e-06,
"count": 1,
"self": 1.1889997040270828e-06
},
"TrainerController._save_models": {
"total": 0.12351943900011975,
"count": 1,
"self": 0.002713141000640462,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12080629799947928,
"count": 1,
"self": 0.12080629799947928
}
}
}
}
}
}
}