osman93's picture
Initial commit
eca1582
raw
history blame
18.7 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4932537376880646,
"min": 0.49260714650154114,
"max": 1.4931550025939941,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14781.828125,
"min": 14762.451171875,
"max": 45296.3515625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989896.0,
"min": 29952.0,
"max": 989896.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989896.0,
"min": 29952.0,
"max": 989896.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3830755650997162,
"min": -0.12279938161373138,
"max": 0.44871169328689575,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 102.66425323486328,
"min": -29.471851348876953,
"max": 121.15216064453125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.12481797486543655,
"min": -0.007700951769948006,
"max": 0.4408397972583771,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 33.45121765136719,
"min": -2.079257011413574,
"max": 104.47903442382812,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06575106887889672,
"min": 0.06385722308944981,
"max": 0.07298132943779716,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.920514964304554,
"min": 0.5108693060645801,
"max": 1.0678308021257787,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014629137550877028,
"min": 0.0002321590937766454,
"max": 0.014829713002698206,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2048079257122784,
"min": 0.0027859091253197446,
"max": 0.21832211638769985,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.753511701242856e-06,
"min": 7.753511701242856e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010854916381739999,
"min": 0.00010854916381739999,
"max": 0.0035079374306875998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10258447142857144,
"min": 0.10258447142857144,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4361826000000002,
"min": 1.3886848,
"max": 2.5693124000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026818869571428576,
"min": 0.00026818869571428576,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037546417400000005,
"min": 0.0037546417400000005,
"max": 0.11695430875999997,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010856484062969685,
"min": 0.010856484062969685,
"max": 0.38719943165779114,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15199077129364014,
"min": 0.15199077129364014,
"max": 2.7103960514068604,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 416.3,
"min": 408.93333333333334,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29141.0,
"min": 15984.0,
"max": 32766.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.326479977049998,
"min": -1.0000000521540642,
"max": 1.3801013447650492,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 92.85359839349985,
"min": -30.997001603245735,
"max": 101.32299938052893,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.326479977049998,
"min": -1.0000000521540642,
"max": 1.3801013447650492,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 92.85359839349985,
"min": -30.997001603245735,
"max": 101.32299938052893,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04687117344209193,
"min": 0.04687117344209193,
"max": 7.9189072931185365,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.280982140946435,
"min": 3.280982140946435,
"max": 126.70251668989658,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680719449",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680721557"
},
"total": 2108.564166356,
"count": 1,
"self": 0.47579567900038455,
"children": {
"run_training.setup": {
"total": 0.17678425199983394,
"count": 1,
"self": 0.17678425199983394
},
"TrainerController.start_learning": {
"total": 2107.9115864249998,
"count": 1,
"self": 1.2827584240249053,
"children": {
"TrainerController._reset_env": {
"total": 4.674190202000318,
"count": 1,
"self": 4.674190202000318
},
"TrainerController.advance": {
"total": 2101.8615441799748,
"count": 63553,
"self": 1.3820729547492192,
"children": {
"env_step": {
"total": 1483.4717952302085,
"count": 63553,
"self": 1379.871693017391,
"children": {
"SubprocessEnvManager._take_step": {
"total": 102.83044515291613,
"count": 63553,
"self": 4.64839628096388,
"children": {
"TorchPolicy.evaluate": {
"total": 98.18204887195225,
"count": 62550,
"self": 98.18204887195225
}
}
},
"workers": {
"total": 0.7696570599014194,
"count": 63553,
"self": 0.0,
"children": {
"worker_root": {
"total": 2103.333863883942,
"count": 63553,
"is_parallel": true,
"self": 830.8813718818433,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002670992999810551,
"count": 1,
"is_parallel": true,
"self": 0.0008410119994550769,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001829981000355474,
"count": 8,
"is_parallel": true,
"self": 0.001829981000355474
}
}
},
"UnityEnvironment.step": {
"total": 0.048361468999701174,
"count": 1,
"is_parallel": true,
"self": 0.0006892559999869263,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005081139997855644,
"count": 1,
"is_parallel": true,
"self": 0.0005081139997855644
},
"communicator.exchange": {
"total": 0.04542014799972094,
"count": 1,
"is_parallel": true,
"self": 0.04542014799972094
},
"steps_from_proto": {
"total": 0.001743951000207744,
"count": 1,
"is_parallel": true,
"self": 0.00041593400101191946,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013280169991958246,
"count": 8,
"is_parallel": true,
"self": 0.0013280169991958246
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1272.4524920020986,
"count": 63552,
"is_parallel": true,
"self": 32.56971543399459,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.730930821055154,
"count": 63552,
"is_parallel": true,
"self": 22.730930821055154
},
"communicator.exchange": {
"total": 1122.865737207204,
"count": 63552,
"is_parallel": true,
"self": 1122.865737207204
},
"steps_from_proto": {
"total": 94.28610853984492,
"count": 63552,
"is_parallel": true,
"self": 19.534685374254877,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.75142316559004,
"count": 508416,
"is_parallel": true,
"self": 74.75142316559004
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 617.007675995017,
"count": 63553,
"self": 2.4224305260240726,
"children": {
"process_trajectory": {
"total": 104.56337798299728,
"count": 63553,
"self": 104.34741632599844,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21596165699884295,
"count": 2,
"self": 0.21596165699884295
}
}
},
"_update_policy": {
"total": 510.0218674859957,
"count": 447,
"self": 325.3888958099278,
"children": {
"TorchPPOOptimizer.update": {
"total": 184.63297167606788,
"count": 22788,
"self": 184.63297167606788
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.264999809791334e-06,
"count": 1,
"self": 1.264999809791334e-06
},
"TrainerController._save_models": {
"total": 0.09309235399996396,
"count": 1,
"self": 0.0013743660001637181,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09171798799980024,
"count": 1,
"self": 0.09171798799980024
}
}
}
}
}
}
}