zjoe's picture
First Push
6c8a950
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.0538735389709473,
"min": 2.0538735389709473,
"max": 2.8903074264526367,
"count": 15
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 21056.3125,
"min": 20229.134765625,
"max": 29631.431640625,
"count": 15
},
"SnowballTarget.Step.mean": {
"value": 149984.0,
"min": 9952.0,
"max": 149984.0,
"count": 15
},
"SnowballTarget.Step.sum": {
"value": 149984.0,
"min": 9952.0,
"max": 149984.0,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 4.231385707855225,
"min": 0.14891785383224487,
"max": 4.231385707855225,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 867.43408203125,
"min": 28.890064239501953,
"max": 867.43408203125,
"count": 15
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 15
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 15
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 16.70909090909091,
"min": 2.772727272727273,
"max": 16.70909090909091,
"count": 15
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 919.0,
"min": 122.0,
"max": 919.0,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 16.70909090909091,
"min": 2.772727272727273,
"max": 16.70909090909091,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 919.0,
"min": 122.0,
"max": 919.0,
"count": 15
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 15
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 15
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0417517071046556,
"min": 0.028716993547034993,
"max": 0.0417517071046556,
"count": 14
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.0417517071046556,
"min": 0.028716993547034993,
"max": 0.0417517071046556,
"count": 14
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1918023142963648,
"min": 0.05151942378235242,
"max": 0.1918023142963648,
"count": 14
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.1918023142963648,
"min": 0.05151942378235242,
"max": 0.1918023142963648,
"count": 14
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 6.784097738666682e-06,
"min": 6.784097738666682e-06,
"max": 0.0002781760072746666,
"count": 14
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 6.784097738666682e-06,
"min": 6.784097738666682e-06,
"max": 0.0002781760072746666,
"count": 14
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10226133333333333,
"min": 0.10226133333333333,
"max": 0.19272533333333328,
"count": 14
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10226133333333333,
"min": 0.10226133333333333,
"max": 0.19272533333333328,
"count": 14
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00012284053333333356,
"min": 0.00012284053333333356,
"max": 0.004636994133333332,
"count": 14
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00012284053333333356,
"min": 0.00012284053333333356,
"max": 0.004636994133333332,
"count": 14
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694415126",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1694415518"
},
"total": 392.46705395899994,
"count": 1,
"self": 0.43804733899969506,
"children": {
"run_training.setup": {
"total": 0.045349221000151374,
"count": 1,
"self": 0.045349221000151374
},
"TrainerController.start_learning": {
"total": 391.9836573990001,
"count": 1,
"self": 0.40211090198567945,
"children": {
"TrainerController._reset_env": {
"total": 4.966020559000071,
"count": 1,
"self": 4.966020559000071
},
"TrainerController.advance": {
"total": 386.4663362260146,
"count": 13673,
"self": 0.18922777101215615,
"children": {
"env_step": {
"total": 386.27710845500246,
"count": 13673,
"self": 294.864554015011,
"children": {
"SubprocessEnvManager._take_step": {
"total": 91.2114447800052,
"count": 13673,
"self": 1.5385142060122234,
"children": {
"TorchPolicy.evaluate": {
"total": 89.67293057399297,
"count": 13673,
"self": 89.67293057399297
}
}
},
"workers": {
"total": 0.20110965998628672,
"count": 13673,
"self": 0.0,
"children": {
"worker_root": {
"total": 390.8088132300145,
"count": 13673,
"is_parallel": true,
"self": 206.00392812802852,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005855769999925542,
"count": 1,
"is_parallel": true,
"self": 0.004332075999855078,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015236940000704635,
"count": 10,
"is_parallel": true,
"self": 0.0015236940000704635
}
}
},
"UnityEnvironment.step": {
"total": 0.07767605899994123,
"count": 1,
"is_parallel": true,
"self": 0.0006785279997529869,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002907699999923352,
"count": 1,
"is_parallel": true,
"self": 0.0002907699999923352
},
"communicator.exchange": {
"total": 0.06470752500013077,
"count": 1,
"is_parallel": true,
"self": 0.06470752500013077
},
"steps_from_proto": {
"total": 0.011999236000065139,
"count": 1,
"is_parallel": true,
"self": 0.0004810750003798603,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.011518160999685279,
"count": 10,
"is_parallel": true,
"self": 0.011518160999685279
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 184.80488510198597,
"count": 13672,
"is_parallel": true,
"self": 8.064186371970209,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.114863584999284,
"count": 13672,
"is_parallel": true,
"self": 4.114863584999284
},
"communicator.exchange": {
"total": 145.3036681260187,
"count": 13672,
"is_parallel": true,
"self": 145.3036681260187
},
"steps_from_proto": {
"total": 27.32216701899779,
"count": 13672,
"is_parallel": true,
"self": 4.87594188099888,
"children": {
"_process_rank_one_or_two_observation": {
"total": 22.44622513799891,
"count": 136720,
"is_parallel": true,
"self": 22.44622513799891
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00019342899986440898,
"count": 1,
"self": 0.00019342899986440898,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 382.64629510903796,
"count": 441885,
"is_parallel": true,
"self": 9.926138596034662,
"children": {
"process_trajectory": {
"total": 250.18197013000372,
"count": 441885,
"is_parallel": true,
"self": 248.3950705140037,
"children": {
"RLTrainer._checkpoint": {
"total": 1.786899616000028,
"count": 5,
"is_parallel": true,
"self": 1.786899616000028
}
}
},
"_update_policy": {
"total": 122.53818638299958,
"count": 14,
"is_parallel": true,
"self": 87.85068394299901,
"children": {
"TorchPPOOptimizer.update": {
"total": 34.68750244000057,
"count": 1704,
"is_parallel": true,
"self": 34.68750244000057
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14899628299986034,
"count": 1,
"self": 0.0008628309999494377,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1481334519999109,
"count": 1,
"self": 0.1481334519999109
}
}
}
}
}
}
}