PabloTa's picture
First Push
8b2416d
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6887931823730469,
"min": 0.6817097663879395,
"max": 2.877542495727539,
"count": 200
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7069.08447265625,
"min": 6533.359375,
"max": 29532.21875,
"count": 200
},
"SnowballTarget.Step.mean": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Step.sum": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.701602935791016,
"min": 0.11468148976564407,
"max": 14.705523490905762,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 3013.82861328125,
"min": 22.24820899963379,
"max": 3013.82861328125,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.89090909090909,
"min": 3.227272727272727,
"max": 29.163636363636364,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1589.0,
"min": 142.0,
"max": 1604.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.89090909090909,
"min": 3.227272727272727,
"max": 29.163636363636364,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1589.0,
"min": 142.0,
"max": 1604.0,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0476456560860051,
"min": 0.04465514199393127,
"max": 0.05828310601320577,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.0952913121720102,
"min": 0.08931028398786255,
"max": 0.16877422243947207,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.12963950442836472,
"min": 0.11528359079047254,
"max": 0.2743366196620883,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.25927900885672944,
"min": 0.23056718158094508,
"max": 0.8230098589862648,
"count": 200
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.1720997656000062e-06,
"min": 1.1720997656000062e-06,
"max": 0.0004983720003256,
"count": 200
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.3441995312000125e-06,
"min": 2.3441995312000125e-06,
"max": 0.0014736660052668002,
"count": 200
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.1002344,
"min": 0.1002344,
"max": 0.19967440000000003,
"count": 200
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.2004688,
"min": 0.2004688,
"max": 0.5947332000000001,
"count": 200
},
"SnowballTarget.Policy.Beta.mean": {
"value": 2.1696560000000067e-05,
"min": 2.1696560000000067e-05,
"max": 0.004983752559999999,
"count": 200
},
"SnowballTarget.Policy.Beta.sum": {
"value": 4.339312000000013e-05,
"min": 4.339312000000013e-05,
"max": 0.014737186680000002,
"count": 200
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680698981",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680703622"
},
"total": 4640.327572544,
"count": 1,
"self": 0.8383555849995901,
"children": {
"run_training.setup": {
"total": 0.12410765399999946,
"count": 1,
"self": 0.12410765399999946
},
"TrainerController.start_learning": {
"total": 4639.365109305,
"count": 1,
"self": 5.091590097886183,
"children": {
"TrainerController._reset_env": {
"total": 3.568254252000088,
"count": 1,
"self": 3.568254252000088
},
"TrainerController.advance": {
"total": 4630.466038831114,
"count": 181888,
"self": 2.5842687931617547,
"children": {
"env_step": {
"total": 4627.881770037952,
"count": 181888,
"self": 3467.568670750841,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1157.6377138510802,
"count": 181888,
"self": 16.80708259820244,
"children": {
"TorchPolicy.evaluate": {
"total": 1140.8306312528778,
"count": 181888,
"self": 1140.8306312528778
}
}
},
"workers": {
"total": 2.6753854360310925,
"count": 181888,
"self": 0.0,
"children": {
"worker_root": {
"total": 4625.9025871190315,
"count": 181888,
"is_parallel": true,
"self": 2223.4850392079425,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020046659999479743,
"count": 1,
"is_parallel": true,
"self": 0.0006876479999391449,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013170180000088294,
"count": 10,
"is_parallel": true,
"self": 0.0013170180000088294
}
}
},
"UnityEnvironment.step": {
"total": 0.046545236999918416,
"count": 1,
"is_parallel": true,
"self": 0.00047997199976634874,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00033726800006661506,
"count": 1,
"is_parallel": true,
"self": 0.00033726800006661506
},
"communicator.exchange": {
"total": 0.04425546600009511,
"count": 1,
"is_parallel": true,
"self": 0.04425546600009511
},
"steps_from_proto": {
"total": 0.0014725309999903402,
"count": 1,
"is_parallel": true,
"self": 0.00033583499998712796,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011366960000032122,
"count": 10,
"is_parallel": true,
"self": 0.0011366960000032122
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2402.417547911089,
"count": 181887,
"is_parallel": true,
"self": 95.92143633622982,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 51.84789065800692,
"count": 181887,
"is_parallel": true,
"self": 51.84789065800692
},
"communicator.exchange": {
"total": 1951.5780144187936,
"count": 181887,
"is_parallel": true,
"self": 1951.5780144187936
},
"steps_from_proto": {
"total": 303.0702064980585,
"count": 181887,
"is_parallel": true,
"self": 59.72215061531847,
"children": {
"_process_rank_one_or_two_observation": {
"total": 243.34805588274003,
"count": 1818870,
"is_parallel": true,
"self": 243.34805588274003
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.869000000937376e-05,
"count": 1,
"self": 7.869000000937376e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 4587.455271434587,
"count": 5068254,
"is_parallel": true,
"self": 118.71313614247538,
"children": {
"process_trajectory": {
"total": 2984.4422389721144,
"count": 5068254,
"is_parallel": true,
"self": 2971.7406143221147,
"children": {
"RLTrainer._checkpoint": {
"total": 12.701624649999758,
"count": 40,
"is_parallel": true,
"self": 12.701624649999758
}
}
},
"_update_policy": {
"total": 1484.299896319998,
"count": 454,
"is_parallel": true,
"self": 753.1453951550427,
"children": {
"TorchPPOOptimizer.update": {
"total": 731.1545011649553,
"count": 34500,
"is_parallel": true,
"self": 731.1545011649553
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.2391474340001878,
"count": 1,
"self": 0.001944448000358534,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23720298599982925,
"count": 1,
"self": 0.23720298599982925
}
}
}
}
}
}
}