testpyramidsrnd / run_logs /timers.json
micheljperez's picture
First Pyramids
7e4a77f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.060375068336725235,
"min": 0.060375068336725235,
"max": 1.3336066007614136,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 1823.81005859375,
"min": 1823.81005859375,
"max": 40456.2890625,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479936.0,
"min": 29976.0,
"max": 479936.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479936.0,
"min": 29976.0,
"max": 479936.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.08777488768100739,
"min": -0.11272956430912018,
"max": -0.04560702294111252,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -21.15374755859375,
"min": -27.16782569885254,
"max": -10.991292953491211,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 1.219641923904419,
"min": 0.7252746224403381,
"max": 1.219641923904419,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 293.9337158203125,
"min": 172.6153564453125,
"max": 293.9337158203125,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07074705433088722,
"min": 0.06554275684936896,
"max": 0.07627533102162873,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9904587606324211,
"min": 0.6102026481730298,
"max": 0.9904587606324211,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.001887249799420672,
"min": 0.0011585869165694975,
"max": 0.023994818150634253,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.026421497191889406,
"min": 0.0153774726126203,
"max": 0.19195854520507402,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.069242167398571e-05,
"min": 2.069242167398571e-05,
"max": 0.00029060460313179995,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00028969390343579996,
"min": 0.00028969390343579996,
"max": 0.0030842750719084,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10689744285714285,
"min": 0.10689744285714285,
"max": 0.19686820000000005,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4965642,
"min": 1.4965642,
"max": 2.3280916,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0006990545414285714,
"min": 0.0006990545414285714,
"max": 0.009687133179999998,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00978676358,
"min": 0.00978676358,
"max": 0.10283635084000001,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 1.2160804271697998,
"min": 0.8016397953033447,
"max": 1.2240477800369263,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 17.02512550354004,
"min": 7.33494758605957,
"max": 17.136669158935547,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 977.3870967741935,
"min": 927.6764705882352,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30299.0,
"min": 16647.0,
"max": 33203.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.7844581140145179,
"min": -0.9998875516466796,
"max": -0.4933111612443571,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -24.318201534450054,
"min": -31.99640165269375,
"max": -13.319401353597641,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.7844581140145179,
"min": -0.9998875516466796,
"max": -0.4933111612443571,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -24.318201534450054,
"min": -31.99640165269375,
"max": -13.319401353597641,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 11.756962801660261,
"min": 7.566271596533411,
"max": 15.387858438141206,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 364.4658468514681,
"min": 257.25323428213596,
"max": 381.1022219657898,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1658025994",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1658026889"
},
"total": 894.7292518040001,
"count": 1,
"self": 0.5868491990001985,
"children": {
"run_training.setup": {
"total": 0.043841267000061634,
"count": 1,
"self": 0.043841267000061634
},
"TrainerController.start_learning": {
"total": 894.0985613379999,
"count": 1,
"self": 0.6158201610089691,
"children": {
"TrainerController._reset_env": {
"total": 10.054375986999958,
"count": 1,
"self": 10.054375986999958
},
"TrainerController.advance": {
"total": 883.3323803599906,
"count": 31486,
"self": 0.6795618989731338,
"children": {
"env_step": {
"total": 526.9781809490055,
"count": 31486,
"self": 475.1840076949927,
"children": {
"SubprocessEnvManager._take_step": {
"total": 51.454006080003865,
"count": 31486,
"self": 2.2470082730092145,
"children": {
"TorchPolicy.evaluate": {
"total": 49.20699780699465,
"count": 31320,
"self": 17.25048302100663,
"children": {
"TorchPolicy.sample_actions": {
"total": 31.95651478598802,
"count": 31320,
"self": 31.95651478598802
}
}
}
}
},
"workers": {
"total": 0.3401671740089114,
"count": 31486,
"self": 0.0,
"children": {
"worker_root": {
"total": 892.0575689990333,
"count": 31486,
"is_parallel": true,
"self": 465.5740696420314,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00546580700006416,
"count": 1,
"is_parallel": true,
"self": 0.004230266000377014,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012355409996871458,
"count": 8,
"is_parallel": true,
"self": 0.0012355409996871458
}
}
},
"UnityEnvironment.step": {
"total": 0.04885161499987589,
"count": 1,
"is_parallel": true,
"self": 0.0005016609998165222,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045915600003354484,
"count": 1,
"is_parallel": true,
"self": 0.00045915600003354484
},
"communicator.exchange": {
"total": 0.0462457749999885,
"count": 1,
"is_parallel": true,
"self": 0.0462457749999885
},
"steps_from_proto": {
"total": 0.0016450230000373267,
"count": 1,
"is_parallel": true,
"self": 0.0004156230004355166,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012293999996018101,
"count": 8,
"is_parallel": true,
"self": 0.0012293999996018101
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 426.48349935700185,
"count": 31485,
"is_parallel": true,
"self": 13.537782663978078,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.209256858030358,
"count": 31485,
"is_parallel": true,
"self": 11.209256858030358
},
"communicator.exchange": {
"total": 355.89668504300244,
"count": 31485,
"is_parallel": true,
"self": 355.89668504300244
},
"steps_from_proto": {
"total": 45.839774791990976,
"count": 31485,
"is_parallel": true,
"self": 11.099635615003763,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.74013917698721,
"count": 251880,
"is_parallel": true,
"self": 34.74013917698721
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 355.67463751201194,
"count": 31486,
"self": 1.0335173360017507,
"children": {
"process_trajectory": {
"total": 80.84573346101047,
"count": 31486,
"self": 80.74223051001059,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10350295099988216,
"count": 1,
"self": 0.10350295099988216
}
}
},
"_update_policy": {
"total": 273.7953867149997,
"count": 214,
"self": 108.1369901560231,
"children": {
"TorchPPOOptimizer.update": {
"total": 165.65839655897662,
"count": 11388,
"self": 165.65839655897662
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.13300029624952e-06,
"count": 1,
"self": 1.13300029624952e-06
},
"TrainerController._save_models": {
"total": 0.09598369700006515,
"count": 1,
"self": 0.001629467999919143,
"children": {
"RLTrainer._checkpoint": {
"total": 0.094354229000146,
"count": 1,
"self": 0.094354229000146
}
}
}
}
}
}
}