irow's picture
First training of SnowballTarget
e025429
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1934200525283813,
"min": 1.1934200525283813,
"max": 2.8712961673736572,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 11460.4130859375,
"min": 11460.4130859375,
"max": 29468.11328125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.586148262023926,
"min": 0.35947099328041077,
"max": 11.586148262023926,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2259.298828125,
"min": 69.73737335205078,
"max": 2342.745849609375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0664147037999047,
"min": 0.06157271980248647,
"max": 0.07212051466105514,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2656588151996188,
"min": 0.24629087920994588,
"max": 0.352478569106418,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19550381345199602,
"min": 0.11266436911754164,
"max": 0.2553255811625836,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7820152538079841,
"min": 0.4506574764701666,
"max": 1.276627905812918,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 22.681818181818183,
"min": 3.2045454545454546,
"max": 23.054545454545455,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 998.0,
"min": 141.0,
"max": 1268.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 22.681818181818183,
"min": 3.2045454545454546,
"max": 23.054545454545455,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 998.0,
"min": 141.0,
"max": 1268.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683741785",
"python_version": "3.10.6 (main, Mar 10 2023, 10:55:28) [GCC 11.3.0]",
"command_line_arguments": "/home/isaac/Documents/RLCourse/venv/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683742021"
},
"total": 236.3718885599992,
"count": 1,
"self": 0.2198700299995835,
"children": {
"run_training.setup": {
"total": 0.014653968999482458,
"count": 1,
"self": 0.014653968999482458
},
"TrainerController.start_learning": {
"total": 236.13736456100014,
"count": 1,
"self": 0.3070173458982026,
"children": {
"TrainerController._reset_env": {
"total": 2.4236985500010633,
"count": 1,
"self": 2.4236985500010633
},
"TrainerController.advance": {
"total": 233.3219095321001,
"count": 18206,
"self": 0.14354426696445444,
"children": {
"env_step": {
"total": 233.17836526513565,
"count": 18206,
"self": 171.502447514069,
"children": {
"SubprocessEnvManager._take_step": {
"total": 61.52136401895041,
"count": 18206,
"self": 0.9186693459541857,
"children": {
"TorchPolicy.evaluate": {
"total": 60.60269467299622,
"count": 18206,
"self": 60.60269467299622
}
}
},
"workers": {
"total": 0.15455373211625556,
"count": 18206,
"self": 0.0,
"children": {
"worker_root": {
"total": 235.7412006208233,
"count": 18206,
"is_parallel": true,
"self": 121.8941021467217,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020628759993996937,
"count": 1,
"is_parallel": true,
"self": 0.0009262149997084634,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011366609996912302,
"count": 10,
"is_parallel": true,
"self": 0.0011366609996912302
}
}
},
"UnityEnvironment.step": {
"total": 0.017121337001299253,
"count": 1,
"is_parallel": true,
"self": 0.00029900500157964416,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019235599938838277,
"count": 1,
"is_parallel": true,
"self": 0.00019235599938838277
},
"communicator.exchange": {
"total": 0.015762119999635615,
"count": 1,
"is_parallel": true,
"self": 0.015762119999635615
},
"steps_from_proto": {
"total": 0.0008678560006956104,
"count": 1,
"is_parallel": true,
"self": 0.0001688270003796788,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006990290003159316,
"count": 10,
"is_parallel": true,
"self": 0.0006990290003159316
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 113.84709847410159,
"count": 18205,
"is_parallel": true,
"self": 6.078745504462859,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.976606610820454,
"count": 18205,
"is_parallel": true,
"self": 2.976606610820454
},
"communicator.exchange": {
"total": 87.73231253299309,
"count": 18205,
"is_parallel": true,
"self": 87.73231253299309
},
"steps_from_proto": {
"total": 17.05943382582518,
"count": 18205,
"is_parallel": true,
"self": 3.0179141940243426,
"children": {
"_process_rank_one_or_two_observation": {
"total": 14.041519631800838,
"count": 182050,
"is_parallel": true,
"self": 14.041519631800838
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.240900049509946e-05,
"count": 1,
"self": 7.240900049509946e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 233.04972137379445,
"count": 145887,
"is_parallel": true,
"self": 1.3843523462546727,
"children": {
"process_trajectory": {
"total": 123.5052580745396,
"count": 145887,
"is_parallel": true,
"self": 123.02028629953747,
"children": {
"RLTrainer._checkpoint": {
"total": 0.48497177500212274,
"count": 4,
"is_parallel": true,
"self": 0.48497177500212274
}
}
},
"_update_policy": {
"total": 108.16011095300018,
"count": 90,
"is_parallel": true,
"self": 35.065464277065985,
"children": {
"TorchPPOOptimizer.update": {
"total": 73.0946466759342,
"count": 4587,
"is_parallel": true,
"self": 73.0946466759342
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08466672400027164,
"count": 1,
"self": 0.00048688200149626937,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08417984199877537,
"count": 1,
"self": 0.08417984199877537
}
}
}
}
}
}
}