Overgrown7380's picture
First Push
7216f26 verified
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9437726736068726,
"min": 0.9437726736068726,
"max": 2.875823974609375,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9000.759765625,
"min": 9000.759765625,
"max": 29451.314453125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.210151672363281,
"min": 0.41365987062454224,
"max": 13.210151672363281,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2575.9794921875,
"min": 80.25001525878906,
"max": 2668.538330078125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07513870276264188,
"min": 0.05882720539202459,
"max": 0.07513870276264188,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3005548110505675,
"min": 0.23530882156809835,
"max": 0.36215749583389684,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1940895297071513,
"min": 0.10861266785821713,
"max": 0.3091184555315504,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7763581188286052,
"min": 0.43445067143286853,
"max": 1.4009270802432414,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.136363636363637,
"min": 3.159090909090909,
"max": 26.163636363636364,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1150.0,
"min": 139.0,
"max": 1439.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.136363636363637,
"min": 3.159090909090909,
"max": 26.163636363636364,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1150.0,
"min": 139.0,
"max": 1439.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707159807",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1707160265"
},
"total": 457.52183259000003,
"count": 1,
"self": 0.4455861980000009,
"children": {
"run_training.setup": {
"total": 0.05128259599996454,
"count": 1,
"self": 0.05128259599996454
},
"TrainerController.start_learning": {
"total": 457.02496379600007,
"count": 1,
"self": 0.6270486329963205,
"children": {
"TrainerController._reset_env": {
"total": 3.228359470999976,
"count": 1,
"self": 3.228359470999976
},
"TrainerController.advance": {
"total": 453.0563310710038,
"count": 18200,
"self": 0.28339971103309836,
"children": {
"env_step": {
"total": 452.7729313599707,
"count": 18200,
"self": 293.8548947089803,
"children": {
"SubprocessEnvManager._take_step": {
"total": 158.60191928800623,
"count": 18200,
"self": 1.5173476920056146,
"children": {
"TorchPolicy.evaluate": {
"total": 157.08457159600061,
"count": 18200,
"self": 157.08457159600061
}
}
},
"workers": {
"total": 0.3161173629841869,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 455.8075109710055,
"count": 18200,
"is_parallel": true,
"self": 227.7851270139979,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005612427999949432,
"count": 1,
"is_parallel": true,
"self": 0.004083110000237866,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015293179997115658,
"count": 10,
"is_parallel": true,
"self": 0.0015293179997115658
}
}
},
"UnityEnvironment.step": {
"total": 0.03620067100018787,
"count": 1,
"is_parallel": true,
"self": 0.0006744990000697726,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004326130001572892,
"count": 1,
"is_parallel": true,
"self": 0.0004326130001572892
},
"communicator.exchange": {
"total": 0.033123527000043396,
"count": 1,
"is_parallel": true,
"self": 0.033123527000043396
},
"steps_from_proto": {
"total": 0.00197003199991741,
"count": 1,
"is_parallel": true,
"self": 0.0004575339999064454,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015124980000109645,
"count": 10,
"is_parallel": true,
"self": 0.0015124980000109645
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 228.0223839570076,
"count": 18199,
"is_parallel": true,
"self": 10.931008968995911,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.382312677998925,
"count": 18199,
"is_parallel": true,
"self": 5.382312677998925
},
"communicator.exchange": {
"total": 177.82797039502088,
"count": 18199,
"is_parallel": true,
"self": 177.82797039502088
},
"steps_from_proto": {
"total": 33.881091914991885,
"count": 18199,
"is_parallel": true,
"self": 6.5081054949225745,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.37298642006931,
"count": 181990,
"is_parallel": true,
"self": 27.37298642006931
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.946199990423338e-05,
"count": 1,
"self": 3.946199990423338e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 447.8070153040942,
"count": 684061,
"is_parallel": true,
"self": 13.918519254079229,
"children": {
"process_trajectory": {
"total": 246.64140001901592,
"count": 684061,
"is_parallel": true,
"self": 245.92595572801588,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7154442910000398,
"count": 4,
"is_parallel": true,
"self": 0.7154442910000398
}
}
},
"_update_policy": {
"total": 187.24709603099905,
"count": 90,
"is_parallel": true,
"self": 50.966571411996256,
"children": {
"TorchPPOOptimizer.update": {
"total": 136.2805246190028,
"count": 4584,
"is_parallel": true,
"self": 136.2805246190028
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11318515900006787,
"count": 1,
"self": 0.0011453220001840236,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11203983699988385,
"count": 1,
"self": 0.11203983699988385
}
}
}
}
}
}
}