ppo-Pyramids / run_logs /timers.json
Zionamsalem's picture
I might end this unit!
5b63ce6 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.30174532532691956,
"min": 0.30111098289489746,
"max": 1.4569752216339111,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9177.8857421875,
"min": 8893.6142578125,
"max": 44198.80078125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989907.0,
"min": 29952.0,
"max": 989907.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989907.0,
"min": 29952.0,
"max": 989907.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.485789030790329,
"min": -0.11082945764064789,
"max": 0.5344838500022888,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 133.59197998046875,
"min": -26.70989990234375,
"max": 144.31063842773438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.06109166890382767,
"min": -0.016629774123430252,
"max": 0.3125622868537903,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 16.800209045410156,
"min": -4.523298740386963,
"max": 75.3275146484375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07247156131192807,
"min": 0.0639174328909056,
"max": 0.07273752634566755,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.014601858366993,
"min": 0.48495898766047724,
"max": 1.0463508848333731,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01701817784322418,
"min": 0.0007437264070160322,
"max": 0.01701817784322418,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23825448980513853,
"min": 0.008924716884192387,
"max": 0.23825448980513853,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.333490412678569e-06,
"min": 7.333490412678569e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010266886577749996,
"min": 0.00010266886577749996,
"max": 0.0032589569136811,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244446428571428,
"min": 0.10244446428571428,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4342225,
"min": 1.3886848,
"max": 2.3863189,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002542019821428571,
"min": 0.0002542019821428571,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003558827749999999,
"min": 0.003558827749999999,
"max": 0.10865325811000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011915902607142925,
"min": 0.011915902607142925,
"max": 0.41923269629478455,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1668226420879364,
"min": 0.1668226420879364,
"max": 2.934628963470459,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 394.9875,
"min": 361.58620689655174,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31599.0,
"min": 15984.0,
"max": 33810.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4299374838359653,
"min": -1.0000000521540642,
"max": 1.5665973110993703,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 114.39499870687723,
"min": -30.604801654815674,
"max": 134.53999774158,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4299374838359653,
"min": -1.0000000521540642,
"max": 1.5665973110993703,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 114.39499870687723,
"min": -30.604801654815674,
"max": 134.53999774158,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04875061192142312,
"min": 0.04630890977928231,
"max": 8.244974036701024,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.900048953713849,
"min": 3.662518389726756,
"max": 131.91958458721638,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739874870",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739877141"
},
"total": 2270.917346591,
"count": 1,
"self": 0.5317507879994992,
"children": {
"run_training.setup": {
"total": 0.020596433999799046,
"count": 1,
"self": 0.020596433999799046
},
"TrainerController.start_learning": {
"total": 2270.3649993690005,
"count": 1,
"self": 1.4216132280434977,
"children": {
"TrainerController._reset_env": {
"total": 2.1535198769997805,
"count": 1,
"self": 2.1535198769997805
},
"TrainerController.advance": {
"total": 2266.697146882958,
"count": 63715,
"self": 1.4656953820572198,
"children": {
"env_step": {
"total": 1584.6988095608945,
"count": 63715,
"self": 1424.0284235659528,
"children": {
"SubprocessEnvManager._take_step": {
"total": 159.81959405201223,
"count": 63715,
"self": 4.82011118288392,
"children": {
"TorchPolicy.evaluate": {
"total": 154.99948286912831,
"count": 62558,
"self": 154.99948286912831
}
}
},
"workers": {
"total": 0.8507919429293906,
"count": 63715,
"self": 0.0,
"children": {
"worker_root": {
"total": 2265.141849779913,
"count": 63715,
"is_parallel": true,
"self": 957.9124650790386,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021050399996056512,
"count": 1,
"is_parallel": true,
"self": 0.0007095929986462579,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013954470009593933,
"count": 8,
"is_parallel": true,
"self": 0.0013954470009593933
}
}
},
"UnityEnvironment.step": {
"total": 0.04745150000007925,
"count": 1,
"is_parallel": true,
"self": 0.0005292370005918201,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004973239997525525,
"count": 1,
"is_parallel": true,
"self": 0.0004973239997525525
},
"communicator.exchange": {
"total": 0.04471848499997577,
"count": 1,
"is_parallel": true,
"self": 0.04471848499997577
},
"steps_from_proto": {
"total": 0.0017064539997591055,
"count": 1,
"is_parallel": true,
"self": 0.00037717499890277395,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013292790008563315,
"count": 8,
"is_parallel": true,
"self": 0.0013292790008563315
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1307.2293847008746,
"count": 63714,
"is_parallel": true,
"self": 32.143628979924415,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.11649713707584,
"count": 63714,
"is_parallel": true,
"self": 24.11649713707584
},
"communicator.exchange": {
"total": 1150.885577916907,
"count": 63714,
"is_parallel": true,
"self": 1150.885577916907
},
"steps_from_proto": {
"total": 100.08368066696721,
"count": 63714,
"is_parallel": true,
"self": 20.654079620931498,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.42960104603571,
"count": 509712,
"is_parallel": true,
"self": 79.42960104603571
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 680.5326419400062,
"count": 63715,
"self": 2.6487831570852904,
"children": {
"process_trajectory": {
"total": 130.1719660099343,
"count": 63715,
"self": 129.91227057293418,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25969543700011855,
"count": 2,
"self": 0.25969543700011855
}
}
},
"_update_policy": {
"total": 547.7118927729866,
"count": 444,
"self": 300.1547657120068,
"children": {
"TorchPPOOptimizer.update": {
"total": 247.55712706097984,
"count": 22833,
"self": 247.55712706097984
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.28000190469902e-07,
"count": 1,
"self": 9.28000190469902e-07
},
"TrainerController._save_models": {
"total": 0.09271845299917914,
"count": 1,
"self": 0.0014304439991974505,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09128800899998168,
"count": 1,
"self": 0.09128800899998168
}
}
}
}
}
}
}