jguevara's picture
First training
bc8e113
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.41511183977127075,
"min": 0.379581481218338,
"max": 0.6043116450309753,
"count": 50
},
"Pyramids.Policy.Entropy.sum": {
"value": 4283.9541015625,
"min": 3845.06591796875,
"max": 6246.1650390625,
"count": 50
},
"Pyramids.Step.mean": {
"value": 1499962.0,
"min": 1009976.0,
"max": 1499962.0,
"count": 50
},
"Pyramids.Step.sum": {
"value": 1499962.0,
"min": 1009976.0,
"max": 1499962.0,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4872138202190399,
"min": 0.05979938432574272,
"max": 0.5726072788238525,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 45.798099517822266,
"min": 4.7839508056640625,
"max": 53.252479553222656,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.03181224316358566,
"min": -0.03181224316358566,
"max": 0.08744346350431442,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.9903507232666016,
"min": -2.9903507232666016,
"max": 8.132242202758789,
"count": 50
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06694401380761215,
"min": 0.0607478265146104,
"max": 0.0778923243827497,
"count": 50
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.3347200690380608,
"min": 0.18451486450309554,
"max": 0.38359445321839303,
"count": 50
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.018834684311877938,
"min": 0.005653721174288269,
"max": 0.018834684311877938,
"count": 50
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.09417342155938968,
"min": 0.022614884697153077,
"max": 0.09417342155938968,
"count": 50
},
"Pyramids.Policy.LearningRate.mean": {
"value": 8.854597048799994e-07,
"min": 8.854597048799994e-07,
"max": 9.87590004136889e-05,
"count": 50
},
"Pyramids.Policy.LearningRate.sum": {
"value": 4.427298524399997e-06,
"min": 4.427298524399997e-06,
"max": 0.00048500073833319994,
"count": 50
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10029512,
"min": 0.10029512,
"max": 0.13291964444444446,
"count": 50
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.5014756,
"min": 0.39875893333333334,
"max": 0.6616668000000001,
"count": 50
},
"Pyramids.Policy.Beta.mean": {
"value": 3.948248799999999e-05,
"min": 3.948248799999999e-05,
"max": 0.0032986724800000006,
"count": 50
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00019741243999999992,
"min": 0.00019741243999999992,
"max": 0.01620051332,
"count": 50
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009386932477355003,
"min": 0.008871779777109623,
"max": 0.021980872377753258,
"count": 50
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.04693466052412987,
"min": 0.03788800165057182,
"max": 0.08285048604011536,
"count": 50
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 351.6551724137931,
"min": 322.27272727272725,
"max": 789.2,
"count": 50
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 10198.0,
"min": 1897.0,
"max": 13663.0,
"count": 50
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5103586097729618,
"min": 0.24627997279167174,
"max": 1.6636071311576026,
"count": 50
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 43.80039968341589,
"min": 2.4627997279167175,
"max": 54.72899912297726,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5103586097729618,
"min": 0.24627997279167174,
"max": 1.6636071311576026,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 43.80039968341589,
"min": 2.4627997279167175,
"max": 54.72899912297726,
"count": 50
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03490569133407854,
"min": 0.03359096942469478,
"max": 0.13430134480586275,
"count": 50
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.0122650486882776,
"min": 0.33041048038285226,
"max": 2.148821516893804,
"count": 50
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1700669876",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1700671097"
},
"total": 1220.7294129299999,
"count": 1,
"self": 0.47803199099871563,
"children": {
"run_training.setup": {
"total": 0.0446211200005564,
"count": 1,
"self": 0.0446211200005564
},
"TrainerController.start_learning": {
"total": 1220.2067598190006,
"count": 1,
"self": 0.7437315478828168,
"children": {
"TrainerController._reset_env": {
"total": 4.134272136000618,
"count": 1,
"self": 4.134272136000618
},
"TrainerController.advance": {
"total": 1215.2507424591176,
"count": 32203,
"self": 0.7491293730899997,
"children": {
"env_step": {
"total": 883.3887531708888,
"count": 32203,
"self": 813.4666496741065,
"children": {
"SubprocessEnvManager._take_step": {
"total": 69.4743050678926,
"count": 32203,
"self": 2.4017155919837023,
"children": {
"TorchPolicy.evaluate": {
"total": 67.0725894759089,
"count": 31319,
"self": 67.0725894759089
}
}
},
"workers": {
"total": 0.44779842888965504,
"count": 32203,
"self": 0.0,
"children": {
"worker_root": {
"total": 1217.540503331922,
"count": 32203,
"is_parallel": true,
"self": 468.9795350568611,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019273340003564954,
"count": 1,
"is_parallel": true,
"self": 0.0007131560014386196,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012141779989178758,
"count": 8,
"is_parallel": true,
"self": 0.0012141779989178758
}
}
},
"UnityEnvironment.step": {
"total": 0.050393460000123014,
"count": 1,
"is_parallel": true,
"self": 0.0005697900005543488,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004963479996149545,
"count": 1,
"is_parallel": true,
"self": 0.0004963479996149545
},
"communicator.exchange": {
"total": 0.047540343000036955,
"count": 1,
"is_parallel": true,
"self": 0.047540343000036955
},
"steps_from_proto": {
"total": 0.0017869789999167551,
"count": 1,
"is_parallel": true,
"self": 0.0003905709991158801,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001396408000800875,
"count": 8,
"is_parallel": true,
"self": 0.001396408000800875
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 748.560968275061,
"count": 32202,
"is_parallel": true,
"self": 17.396085019255224,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.869784766995508,
"count": 32202,
"is_parallel": true,
"self": 12.869784766995508
},
"communicator.exchange": {
"total": 666.1160614319333,
"count": 32202,
"is_parallel": true,
"self": 666.1160614319333
},
"steps_from_proto": {
"total": 52.17903705687695,
"count": 32202,
"is_parallel": true,
"self": 10.672621504089875,
"children": {
"_process_rank_one_or_two_observation": {
"total": 41.506415552787075,
"count": 257616,
"is_parallel": true,
"self": 41.506415552787075
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 331.11285991513887,
"count": 32203,
"self": 1.4475519231664293,
"children": {
"process_trajectory": {
"total": 67.92937983497086,
"count": 32203,
"self": 67.83041549197151,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09896434299935208,
"count": 1,
"self": 0.09896434299935208
}
}
},
"_update_policy": {
"total": 261.7359281570016,
"count": 235,
"self": 155.89730558705105,
"children": {
"TorchPPOOptimizer.update": {
"total": 105.83862256995053,
"count": 11376,
"self": 105.83862256995053
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.264999809791334e-06,
"count": 1,
"self": 1.264999809791334e-06
},
"TrainerController._save_models": {
"total": 0.07801241099969047,
"count": 1,
"self": 0.0017386769995937357,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07627373400009674,
"count": 1,
"self": 0.07627373400009674
}
}
}
}
}
}
}