patonw commited on
Commit
79ee852
1 Parent(s): faca62a

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -34,3 +34,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  replay.mp4 filter=lfs diff=lfs merge=lfs -text
 
 
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  replay.mp4 filter=lfs diff=lfs merge=lfs -text
37
+ git.diff filter=lfs diff=lfs merge=lfs -text
.summary/0/events.out.tfevents.1692302374.muon ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0d659ee93ea81d0677b339e82677ccc8e48d6c0ea8947cbf2d0cd75086ecf28
3
+ size 118322
README.md CHANGED
@@ -15,7 +15,7 @@ model-index:
15
  type: doom_health_gathering_supreme
16
  metrics:
17
  - type: mean_reward
18
- value: 9.83 +/- 4.64
19
  name: mean_reward
20
  verified: false
21
  ---
 
15
  type: doom_health_gathering_supreme
16
  metrics:
17
  - type: mean_reward
18
+ value: 11.85 +/- 5.93
19
  name: mean_reward
20
  verified: false
21
  ---
checkpoint_p0/best_000002142_8773632_reward_26.292.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9c031d9c90b75798af34dced8e78feee14edea85369e84b36b8037e04cbc16e
3
+ size 34928806
checkpoint_p0/checkpoint_000002142_8773632.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46e099d7a6ad0c16466314798da0ffdfa426d8dfbd36b8e0f4724210e37200f1
3
+ size 34929220
checkpoint_p0/checkpoint_000002443_10006528.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ad5d590a4c04824abeb57e357ef4798f0d89eade51c85bfccfe2d53a29a740c
3
+ size 34929220
config.json CHANGED
@@ -65,7 +65,7 @@
65
  "summaries_use_frameskip": true,
66
  "heartbeat_interval": 20,
67
  "heartbeat_reporting_interval": 600,
68
- "train_for_env_steps": 4000000,
69
  "train_for_seconds": 10000000000,
70
  "save_every_sec": 120,
71
  "keep_checkpoints": 2,
@@ -102,7 +102,7 @@
102
  "env_framestack": 1,
103
  "pixel_format": "CHW",
104
  "use_record_episode_statistics": false,
105
- "with_wandb": false,
106
  "wandb_user": null,
107
  "wandb_project": "sample_factory",
108
  "wandb_group": null,
@@ -138,5 +138,6 @@
138
  "train_for_env_steps": 4000000
139
  },
140
  "git_hash": "336df5a551fea3a2cf40925bf3083db6b4518c91",
141
- "git_repo_name": "https://github.com/huggingface/deep-rl-class"
 
142
  }
 
65
  "summaries_use_frameskip": true,
66
  "heartbeat_interval": 20,
67
  "heartbeat_reporting_interval": 600,
68
+ "train_for_env_steps": 10000000,
69
  "train_for_seconds": 10000000000,
70
  "save_every_sec": 120,
71
  "keep_checkpoints": 2,
 
102
  "env_framestack": 1,
103
  "pixel_format": "CHW",
104
  "use_record_episode_statistics": false,
105
+ "with_wandb": true,
106
  "wandb_user": null,
107
  "wandb_project": "sample_factory",
108
  "wandb_group": null,
 
138
  "train_for_env_steps": 4000000
139
  },
140
  "git_hash": "336df5a551fea3a2cf40925bf3083db6b4518c91",
141
+ "git_repo_name": "https://github.com/huggingface/deep-rl-class",
142
+ "wandb_unique_id": "default_experiment_20230817_125929_635646"
143
  }
git.diff CHANGED
The diff for this file is too large to render. See raw diff
 
replay.mp4 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:33bf7c09ae64a3500f531643c6c22988477b4d6a5be6c98d93ef82310c705f4a
3
- size 18208913
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd325b3be486b1cb6eae1418969fe4bbc491b0fdd15d9b76f669502007eaa328
3
+ size 22633936
sf_log.txt CHANGED
@@ -1050,3 +1050,924 @@ main_loop: 87.5323
1050
  [2023-08-17 12:52:44,835][131794] Avg episode rewards: #0: 20.835, true rewards: #0: 9.835
1051
  [2023-08-17 12:52:44,835][131794] Avg episode reward: 20.835, avg true_objective: 9.835
1052
  [2023-08-17 12:52:54,306][131794] Replay video saved to /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/replay.mp4!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1050
  [2023-08-17 12:52:44,835][131794] Avg episode rewards: #0: 20.835, true rewards: #0: 9.835
1051
  [2023-08-17 12:52:44,835][131794] Avg episode reward: 20.835, avg true_objective: 9.835
1052
  [2023-08-17 12:52:54,306][131794] Replay video saved to /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/replay.mp4!
1053
+ [2023-08-17 12:54:20,339][131794] The model has been pushed to https://huggingface.co/patonw/rl_course_vizdoom_health_gathering_supreme
1054
+ [2023-08-17 12:59:09,422][131794] Environment doom_basic already registered, overwriting...
1055
+ [2023-08-17 12:59:09,423][131794] Environment doom_two_colors_easy already registered, overwriting...
1056
+ [2023-08-17 12:59:09,423][131794] Environment doom_two_colors_hard already registered, overwriting...
1057
+ [2023-08-17 12:59:09,424][131794] Environment doom_dm already registered, overwriting...
1058
+ [2023-08-17 12:59:09,424][131794] Environment doom_dwango5 already registered, overwriting...
1059
+ [2023-08-17 12:59:09,424][131794] Environment doom_my_way_home_flat_actions already registered, overwriting...
1060
+ [2023-08-17 12:59:09,425][131794] Environment doom_defend_the_center_flat_actions already registered, overwriting...
1061
+ [2023-08-17 12:59:09,425][131794] Environment doom_my_way_home already registered, overwriting...
1062
+ [2023-08-17 12:59:09,425][131794] Environment doom_deadly_corridor already registered, overwriting...
1063
+ [2023-08-17 12:59:09,425][131794] Environment doom_defend_the_center already registered, overwriting...
1064
+ [2023-08-17 12:59:09,426][131794] Environment doom_defend_the_line already registered, overwriting...
1065
+ [2023-08-17 12:59:09,426][131794] Environment doom_health_gathering already registered, overwriting...
1066
+ [2023-08-17 12:59:09,426][131794] Environment doom_health_gathering_supreme already registered, overwriting...
1067
+ [2023-08-17 12:59:09,427][131794] Environment doom_battle already registered, overwriting...
1068
+ [2023-08-17 12:59:09,427][131794] Environment doom_battle2 already registered, overwriting...
1069
+ [2023-08-17 12:59:09,427][131794] Environment doom_duel_bots already registered, overwriting...
1070
+ [2023-08-17 12:59:09,427][131794] Environment doom_deathmatch_bots already registered, overwriting...
1071
+ [2023-08-17 12:59:09,428][131794] Environment doom_duel already registered, overwriting...
1072
+ [2023-08-17 12:59:09,428][131794] Environment doom_deathmatch_full already registered, overwriting...
1073
+ [2023-08-17 12:59:09,428][131794] Environment doom_benchmark already registered, overwriting...
1074
+ [2023-08-17 12:59:09,429][131794] register_encoder_factory: <function make_vizdoom_encoder at 0x7f0223bcab90>
1075
+ [2023-08-17 12:59:29,604][131794] Environment doom_basic already registered, overwriting...
1076
+ [2023-08-17 12:59:29,605][131794] Environment doom_two_colors_easy already registered, overwriting...
1077
+ [2023-08-17 12:59:29,606][131794] Environment doom_two_colors_hard already registered, overwriting...
1078
+ [2023-08-17 12:59:29,606][131794] Environment doom_dm already registered, overwriting...
1079
+ [2023-08-17 12:59:29,606][131794] Environment doom_dwango5 already registered, overwriting...
1080
+ [2023-08-17 12:59:29,607][131794] Environment doom_my_way_home_flat_actions already registered, overwriting...
1081
+ [2023-08-17 12:59:29,607][131794] Environment doom_defend_the_center_flat_actions already registered, overwriting...
1082
+ [2023-08-17 12:59:29,607][131794] Environment doom_my_way_home already registered, overwriting...
1083
+ [2023-08-17 12:59:29,608][131794] Environment doom_deadly_corridor already registered, overwriting...
1084
+ [2023-08-17 12:59:29,608][131794] Environment doom_defend_the_center already registered, overwriting...
1085
+ [2023-08-17 12:59:29,608][131794] Environment doom_defend_the_line already registered, overwriting...
1086
+ [2023-08-17 12:59:29,609][131794] Environment doom_health_gathering already registered, overwriting...
1087
+ [2023-08-17 12:59:29,609][131794] Environment doom_health_gathering_supreme already registered, overwriting...
1088
+ [2023-08-17 12:59:29,609][131794] Environment doom_battle already registered, overwriting...
1089
+ [2023-08-17 12:59:29,610][131794] Environment doom_battle2 already registered, overwriting...
1090
+ [2023-08-17 12:59:29,610][131794] Environment doom_duel_bots already registered, overwriting...
1091
+ [2023-08-17 12:59:29,611][131794] Environment doom_deathmatch_bots already registered, overwriting...
1092
+ [2023-08-17 12:59:29,611][131794] Environment doom_duel already registered, overwriting...
1093
+ [2023-08-17 12:59:29,611][131794] Environment doom_deathmatch_full already registered, overwriting...
1094
+ [2023-08-17 12:59:29,612][131794] Environment doom_benchmark already registered, overwriting...
1095
+ [2023-08-17 12:59:29,612][131794] register_encoder_factory: <function make_vizdoom_encoder at 0x7f0223bcab90>
1096
+ [2023-08-17 12:59:29,630][131794] Loading existing experiment configuration from /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/config.json
1097
+ [2023-08-17 12:59:29,631][131794] Overriding arg 'train_for_env_steps' with value 10000000 passed from command line
1098
+ [2023-08-17 12:59:29,631][131794] Overriding arg 'with_wandb' with value True passed from command line
1099
+ [2023-08-17 12:59:29,634][131794] Experiment dir /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment already exists!
1100
+ [2023-08-17 12:59:29,635][131794] Resuming existing experiment from /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment...
1101
+ [2023-08-17 12:59:29,635][131794] Weights and Biases integration enabled. Project: sample_factory, user: None, group: None, unique_id: default_experiment_20230817_125929_635646
1102
+ [2023-08-17 12:59:29,819][131794] Initializing WandB...
1103
+ [2023-08-17 12:59:34,788][131794] Environment var CUDA_VISIBLE_DEVICES is 0
1104
+
1105
+ [2023-08-17 12:59:35,803][131794] Starting experiment with the following configuration:
1106
+ help=False
1107
+ algo=APPO
1108
+ env=doom_health_gathering_supreme
1109
+ experiment=default_experiment
1110
+ train_dir=/home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir
1111
+ restart_behavior=resume
1112
+ device=gpu
1113
+ seed=None
1114
+ num_policies=1
1115
+ async_rl=True
1116
+ serial_mode=False
1117
+ batched_sampling=False
1118
+ num_batches_to_accumulate=2
1119
+ worker_num_splits=2
1120
+ policy_workers_per_policy=1
1121
+ max_policy_lag=1000
1122
+ num_workers=8
1123
+ num_envs_per_worker=4
1124
+ batch_size=1024
1125
+ num_batches_per_epoch=1
1126
+ num_epochs=1
1127
+ rollout=32
1128
+ recurrence=32
1129
+ shuffle_minibatches=False
1130
+ gamma=0.99
1131
+ reward_scale=1.0
1132
+ reward_clip=1000.0
1133
+ value_bootstrap=False
1134
+ normalize_returns=True
1135
+ exploration_loss_coeff=0.001
1136
+ value_loss_coeff=0.5
1137
+ kl_loss_coeff=0.0
1138
+ exploration_loss=symmetric_kl
1139
+ gae_lambda=0.95
1140
+ ppo_clip_ratio=0.1
1141
+ ppo_clip_value=0.2
1142
+ with_vtrace=False
1143
+ vtrace_rho=1.0
1144
+ vtrace_c=1.0
1145
+ optimizer=adam
1146
+ adam_eps=1e-06
1147
+ adam_beta1=0.9
1148
+ adam_beta2=0.999
1149
+ max_grad_norm=4.0
1150
+ learning_rate=0.0001
1151
+ lr_schedule=constant
1152
+ lr_schedule_kl_threshold=0.008
1153
+ lr_adaptive_min=1e-06
1154
+ lr_adaptive_max=0.01
1155
+ obs_subtract_mean=0.0
1156
+ obs_scale=255.0
1157
+ normalize_input=True
1158
+ normalize_input_keys=None
1159
+ decorrelate_experience_max_seconds=0
1160
+ decorrelate_envs_on_one_worker=True
1161
+ actor_worker_gpus=[]
1162
+ set_workers_cpu_affinity=True
1163
+ force_envs_single_thread=False
1164
+ default_niceness=0
1165
+ log_to_file=True
1166
+ experiment_summaries_interval=10
1167
+ flush_summaries_interval=30
1168
+ stats_avg=100
1169
+ summaries_use_frameskip=True
1170
+ heartbeat_interval=20
1171
+ heartbeat_reporting_interval=600
1172
+ train_for_env_steps=10000000
1173
+ train_for_seconds=10000000000
1174
+ save_every_sec=120
1175
+ keep_checkpoints=2
1176
+ load_checkpoint_kind=latest
1177
+ save_milestones_sec=-1
1178
+ save_best_every_sec=5
1179
+ save_best_metric=reward
1180
+ save_best_after=100000
1181
+ benchmark=False
1182
+ encoder_mlp_layers=[512, 512]
1183
+ encoder_conv_architecture=convnet_simple
1184
+ encoder_conv_mlp_layers=[512]
1185
+ use_rnn=True
1186
+ rnn_size=512
1187
+ rnn_type=gru
1188
+ rnn_num_layers=1
1189
+ decoder_mlp_layers=[]
1190
+ nonlinearity=elu
1191
+ policy_initialization=orthogonal
1192
+ policy_init_gain=1.0
1193
+ actor_critic_share_weights=True
1194
+ adaptive_stddev=True
1195
+ continuous_tanh_scale=0.0
1196
+ initial_stddev=1.0
1197
+ use_env_info_cache=False
1198
+ env_gpu_actions=False
1199
+ env_gpu_observations=True
1200
+ env_frameskip=4
1201
+ env_framestack=1
1202
+ pixel_format=CHW
1203
+ use_record_episode_statistics=False
1204
+ with_wandb=True
1205
+ wandb_user=None
1206
+ wandb_project=sample_factory
1207
+ wandb_group=None
1208
+ wandb_job_type=SF
1209
+ wandb_tags=[]
1210
+ with_pbt=False
1211
+ pbt_mix_policies_in_one_env=True
1212
+ pbt_period_env_steps=5000000
1213
+ pbt_start_mutation=20000000
1214
+ pbt_replace_fraction=0.3
1215
+ pbt_mutation_rate=0.15
1216
+ pbt_replace_reward_gap=0.1
1217
+ pbt_replace_reward_gap_absolute=1e-06
1218
+ pbt_optimize_gamma=False
1219
+ pbt_target_objective=true_objective
1220
+ pbt_perturb_min=1.1
1221
+ pbt_perturb_max=1.5
1222
+ num_agents=-1
1223
+ num_humans=0
1224
+ num_bots=-1
1225
+ start_bot_difficulty=None
1226
+ timelimit=None
1227
+ res_w=128
1228
+ res_h=72
1229
+ wide_aspect_ratio=False
1230
+ eval_env_frameskip=1
1231
+ fps=35
1232
+ command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000
1233
+ cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 4000000}
1234
+ git_hash=336df5a551fea3a2cf40925bf3083db6b4518c91
1235
+ git_repo_name=https://github.com/huggingface/deep-rl-class
1236
+ wandb_unique_id=default_experiment_20230817_125929_635646
1237
+ [2023-08-17 12:59:35,804][131794] Saving configuration to /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/config.json...
1238
+ [2023-08-17 12:59:35,879][131794] Rollout worker 0 uses device cpu
1239
+ [2023-08-17 12:59:35,879][131794] Rollout worker 1 uses device cpu
1240
+ [2023-08-17 12:59:35,880][131794] Rollout worker 2 uses device cpu
1241
+ [2023-08-17 12:59:35,881][131794] Rollout worker 3 uses device cpu
1242
+ [2023-08-17 12:59:35,881][131794] Rollout worker 4 uses device cpu
1243
+ [2023-08-17 12:59:35,882][131794] Rollout worker 5 uses device cpu
1244
+ [2023-08-17 12:59:35,882][131794] Rollout worker 6 uses device cpu
1245
+ [2023-08-17 12:59:35,883][131794] Rollout worker 7 uses device cpu
1246
+ [2023-08-17 12:59:35,911][131794] Using GPUs [0] for process 0 (actually maps to GPUs [0])
1247
+ [2023-08-17 12:59:35,912][131794] InferenceWorker_p0-w0: min num requests: 2
1248
+ [2023-08-17 12:59:35,935][131794] Starting all processes...
1249
+ [2023-08-17 12:59:35,936][131794] Starting process learner_proc0
1250
+ [2023-08-17 12:59:35,984][131794] Starting all processes...
1251
+ [2023-08-17 12:59:35,986][131794] Starting process inference_proc0-0
1252
+ [2023-08-17 12:59:35,986][131794] Starting process rollout_proc0
1253
+ [2023-08-17 12:59:35,986][131794] Starting process rollout_proc1
1254
+ [2023-08-17 12:59:35,986][131794] Starting process rollout_proc2
1255
+ [2023-08-17 12:59:35,987][131794] Starting process rollout_proc3
1256
+ [2023-08-17 12:59:35,987][131794] Starting process rollout_proc4
1257
+ [2023-08-17 12:59:35,988][131794] Starting process rollout_proc5
1258
+ [2023-08-17 12:59:35,988][131794] Starting process rollout_proc6
1259
+ [2023-08-17 12:59:35,989][131794] Starting process rollout_proc7
1260
+ [2023-08-17 12:59:36,953][138062] Using GPUs [0] for process 0 (actually maps to GPUs [0])
1261
+ [2023-08-17 12:59:36,953][138062] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
1262
+ [2023-08-17 12:59:36,957][138062] Num visible devices: 1
1263
+ [2023-08-17 12:59:36,973][138062] Starting seed is not provided
1264
+ [2023-08-17 12:59:36,974][138062] Using GPUs [0] for process 0 (actually maps to GPUs [0])
1265
+ [2023-08-17 12:59:36,974][138062] Initializing actor-critic model on device cuda:0
1266
+ [2023-08-17 12:59:36,974][138062] RunningMeanStd input shape: (3, 72, 128)
1267
+ [2023-08-17 12:59:36,975][138062] RunningMeanStd input shape: (1,)
1268
+ [2023-08-17 12:59:36,983][138062] ConvEncoder: input_channels=3
1269
+ [2023-08-17 12:59:37,000][138076] Using GPUs [0] for process 0 (actually maps to GPUs [0])
1270
+ [2023-08-17 12:59:37,000][138076] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
1271
+ [2023-08-17 12:59:37,004][138076] Num visible devices: 1
1272
+ [2023-08-17 12:59:37,043][138062] Conv encoder output size: 512
1273
+ [2023-08-17 12:59:37,043][138062] Policy head output size: 512
1274
+ [2023-08-17 12:59:37,048][138077] Worker 1 uses CPU cores [3, 4, 5]
1275
+ [2023-08-17 12:59:37,050][138062] Created Actor Critic model with architecture:
1276
+ [2023-08-17 12:59:37,051][138062] ActorCriticSharedWeights(
1277
+ (obs_normalizer): ObservationNormalizer(
1278
+ (running_mean_std): RunningMeanStdDictInPlace(
1279
+ (running_mean_std): ModuleDict(
1280
+ (obs): RunningMeanStdInPlace()
1281
+ )
1282
+ )
1283
+ )
1284
+ (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
1285
+ (encoder): VizdoomEncoder(
1286
+ (basic_encoder): ConvEncoder(
1287
+ (enc): RecursiveScriptModule(
1288
+ original_name=ConvEncoderImpl
1289
+ (conv_head): RecursiveScriptModule(
1290
+ original_name=Sequential
1291
+ (0): RecursiveScriptModule(original_name=Conv2d)
1292
+ (1): RecursiveScriptModule(original_name=ELU)
1293
+ (2): RecursiveScriptModule(original_name=Conv2d)
1294
+ (3): RecursiveScriptModule(original_name=ELU)
1295
+ (4): RecursiveScriptModule(original_name=Conv2d)
1296
+ (5): RecursiveScriptModule(original_name=ELU)
1297
+ )
1298
+ (mlp_layers): RecursiveScriptModule(
1299
+ original_name=Sequential
1300
+ (0): RecursiveScriptModule(original_name=Linear)
1301
+ (1): RecursiveScriptModule(original_name=ELU)
1302
+ )
1303
+ )
1304
+ )
1305
+ )
1306
+ (core): ModelCoreRNN(
1307
+ (core): GRU(512, 512)
1308
+ )
1309
+ (decoder): MlpDecoder(
1310
+ (mlp): Identity()
1311
+ )
1312
+ (critic_linear): Linear(in_features=512, out_features=1, bias=True)
1313
+ (action_parameterization): ActionParameterizationDefault(
1314
+ (distribution_linear): Linear(in_features=512, out_features=5, bias=True)
1315
+ )
1316
+ )
1317
+ [2023-08-17 12:59:37,051][138075] Worker 0 uses CPU cores [0, 1, 2]
1318
+ [2023-08-17 12:59:37,061][138081] Worker 5 uses CPU cores [15, 16, 17]
1319
+ [2023-08-17 12:59:37,061][138082] Worker 6 uses CPU cores [18, 19, 20]
1320
+ [2023-08-17 12:59:37,061][138083] Worker 7 uses CPU cores [21, 22, 23]
1321
+ [2023-08-17 12:59:37,065][138078] Worker 2 uses CPU cores [6, 7, 8]
1322
+ [2023-08-17 12:59:37,075][138079] Worker 3 uses CPU cores [9, 10, 11]
1323
+ [2023-08-17 12:59:37,079][138080] Worker 4 uses CPU cores [12, 13, 14]
1324
+ [2023-08-17 12:59:37,161][138062] Using optimizer <class 'torch.optim.adam.Adam'>
1325
+ [2023-08-17 12:59:37,161][138062] Loading state from checkpoint /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
1326
+ [2023-08-17 12:59:37,184][138062] Loading model from checkpoint
1327
+ [2023-08-17 12:59:37,187][138062] Loaded experiment state at self.train_step=978, self.env_steps=4005888
1328
+ [2023-08-17 12:59:37,187][138062] Initialized policy 0 weights for model version 978
1329
+ [2023-08-17 12:59:37,188][138062] LearnerWorker_p0 finished initialization!
1330
+ [2023-08-17 12:59:37,188][138062] Using GPUs [0] for process 0 (actually maps to GPUs [0])
1331
+ [2023-08-17 12:59:37,231][138076] RunningMeanStd input shape: (3, 72, 128)
1332
+ [2023-08-17 12:59:37,232][138076] RunningMeanStd input shape: (1,)
1333
+ [2023-08-17 12:59:37,238][138076] ConvEncoder: input_channels=3
1334
+ [2023-08-17 12:59:37,290][138076] Conv encoder output size: 512
1335
+ [2023-08-17 12:59:37,290][138076] Policy head output size: 512
1336
+ [2023-08-17 12:59:37,315][131794] Inference worker 0-0 is ready!
1337
+ [2023-08-17 12:59:37,316][131794] All inference workers are ready! Signal rollout workers to start!
1338
+ [2023-08-17 12:59:37,333][138083] Doom resolution: 160x120, resize resolution: (128, 72)
1339
+ [2023-08-17 12:59:37,334][138079] Doom resolution: 160x120, resize resolution: (128, 72)
1340
+ [2023-08-17 12:59:37,334][138080] Doom resolution: 160x120, resize resolution: (128, 72)
1341
+ [2023-08-17 12:59:37,334][138075] Doom resolution: 160x120, resize resolution: (128, 72)
1342
+ [2023-08-17 12:59:37,335][138077] Doom resolution: 160x120, resize resolution: (128, 72)
1343
+ [2023-08-17 12:59:37,335][138081] Doom resolution: 160x120, resize resolution: (128, 72)
1344
+ [2023-08-17 12:59:37,335][138078] Doom resolution: 160x120, resize resolution: (128, 72)
1345
+ [2023-08-17 12:59:37,336][138082] Doom resolution: 160x120, resize resolution: (128, 72)
1346
+ [2023-08-17 12:59:37,546][138075] Decorrelating experience for 0 frames...
1347
+ [2023-08-17 12:59:37,547][138083] Decorrelating experience for 0 frames...
1348
+ [2023-08-17 12:59:37,557][138077] Decorrelating experience for 0 frames...
1349
+ [2023-08-17 12:59:37,734][138083] Decorrelating experience for 32 frames...
1350
+ [2023-08-17 12:59:37,734][138079] Decorrelating experience for 0 frames...
1351
+ [2023-08-17 12:59:37,734][138078] Decorrelating experience for 0 frames...
1352
+ [2023-08-17 12:59:37,740][138077] Decorrelating experience for 32 frames...
1353
+ [2023-08-17 12:59:37,785][138075] Decorrelating experience for 32 frames...
1354
+ [2023-08-17 12:59:37,920][138078] Decorrelating experience for 32 frames...
1355
+ [2023-08-17 12:59:37,941][138083] Decorrelating experience for 64 frames...
1356
+ [2023-08-17 12:59:37,941][138080] Decorrelating experience for 0 frames...
1357
+ [2023-08-17 12:59:37,951][138077] Decorrelating experience for 64 frames...
1358
+ [2023-08-17 12:59:37,959][138079] Decorrelating experience for 32 frames...
1359
+ [2023-08-17 12:59:38,127][138078] Decorrelating experience for 64 frames...
1360
+ [2023-08-17 12:59:38,138][138080] Decorrelating experience for 32 frames...
1361
+ [2023-08-17 12:59:38,145][138075] Decorrelating experience for 64 frames...
1362
+ [2023-08-17 12:59:38,164][138083] Decorrelating experience for 96 frames...
1363
+ [2023-08-17 12:59:38,164][138077] Decorrelating experience for 96 frames...
1364
+ [2023-08-17 12:59:38,172][138079] Decorrelating experience for 64 frames...
1365
+ [2023-08-17 12:59:38,335][138078] Decorrelating experience for 96 frames...
1366
+ [2023-08-17 12:59:38,375][138081] Decorrelating experience for 0 frames...
1367
+ [2023-08-17 12:59:38,386][138075] Decorrelating experience for 96 frames...
1368
+ [2023-08-17 12:59:38,408][138079] Decorrelating experience for 96 frames...
1369
+ [2023-08-17 12:59:38,420][138080] Decorrelating experience for 64 frames...
1370
+ [2023-08-17 12:59:38,628][138082] Decorrelating experience for 0 frames...
1371
+ [2023-08-17 12:59:38,630][138081] Decorrelating experience for 32 frames...
1372
+ [2023-08-17 12:59:38,750][138062] Signal inference workers to stop experience collection...
1373
+ [2023-08-17 12:59:38,756][138076] InferenceWorker_p0-w0: stopping experience collection
1374
+ [2023-08-17 12:59:38,821][138082] Decorrelating experience for 32 frames...
1375
+ [2023-08-17 12:59:38,835][138081] Decorrelating experience for 64 frames...
1376
+ [2023-08-17 12:59:38,836][138080] Decorrelating experience for 96 frames...
1377
+ [2023-08-17 12:59:39,025][138082] Decorrelating experience for 64 frames...
1378
+ [2023-08-17 12:59:39,042][138081] Decorrelating experience for 96 frames...
1379
+ [2023-08-17 12:59:39,218][138082] Decorrelating experience for 96 frames...
1380
+ [2023-08-17 12:59:39,282][138062] Signal inference workers to resume experience collection...
1381
+ [2023-08-17 12:59:39,283][138076] InferenceWorker_p0-w0: resuming experience collection
1382
+ [2023-08-17 12:59:39,788][131794] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 4014080. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0)
1383
+ [2023-08-17 12:59:39,789][131794] Avg episode reward: [(0, '6.493')]
1384
+ [2023-08-17 12:59:40,549][138076] Updated weights for policy 0, policy_version 988 (0.0184)
1385
+ [2023-08-17 12:59:41,527][138076] Updated weights for policy 0, policy_version 998 (0.0006)
1386
+ [2023-08-17 12:59:42,585][138076] Updated weights for policy 0, policy_version 1008 (0.0006)
1387
+ [2023-08-17 12:59:43,648][138076] Updated weights for policy 0, policy_version 1018 (0.0007)
1388
+ [2023-08-17 12:59:44,652][138076] Updated weights for policy 0, policy_version 1028 (0.0006)
1389
+ [2023-08-17 12:59:44,788][131794] Fps is (10 sec: 40141.5, 60 sec: 40141.5, 300 sec: 40141.5). Total num frames: 4214784. Throughput: 0: 7708.9. Samples: 38544. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
1390
+ [2023-08-17 12:59:44,789][131794] Avg episode reward: [(0, '20.021')]
1391
+ [2023-08-17 12:59:45,583][138076] Updated weights for policy 0, policy_version 1038 (0.0006)
1392
+ [2023-08-17 12:59:46,560][138076] Updated weights for policy 0, policy_version 1048 (0.0006)
1393
+ [2023-08-17 12:59:47,593][138076] Updated weights for policy 0, policy_version 1058 (0.0006)
1394
+ [2023-08-17 12:59:48,603][138076] Updated weights for policy 0, policy_version 1068 (0.0006)
1395
+ [2023-08-17 12:59:49,605][138076] Updated weights for policy 0, policy_version 1078 (0.0006)
1396
+ [2023-08-17 12:59:49,788][131794] Fps is (10 sec: 40550.4, 60 sec: 40550.4, 300 sec: 40550.4). Total num frames: 4419584. Throughput: 0: 10010.8. Samples: 100108. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
1397
+ [2023-08-17 12:59:49,789][131794] Avg episode reward: [(0, '21.241')]
1398
+ [2023-08-17 12:59:50,586][138076] Updated weights for policy 0, policy_version 1088 (0.0006)
1399
+ [2023-08-17 12:59:51,564][138076] Updated weights for policy 0, policy_version 1098 (0.0006)
1400
+ [2023-08-17 12:59:52,582][138076] Updated weights for policy 0, policy_version 1108 (0.0006)
1401
+ [2023-08-17 12:59:53,540][138076] Updated weights for policy 0, policy_version 1118 (0.0006)
1402
+ [2023-08-17 12:59:54,551][138076] Updated weights for policy 0, policy_version 1128 (0.0006)
1403
+ [2023-08-17 12:59:54,788][131794] Fps is (10 sec: 41369.6, 60 sec: 40960.3, 300 sec: 40960.3). Total num frames: 4628480. Throughput: 0: 8754.6. Samples: 131318. Policy #0 lag: (min: 0.0, avg: 0.8, max: 1.0)
1404
+ [2023-08-17 12:59:54,789][131794] Avg episode reward: [(0, '19.294')]
1405
+ [2023-08-17 12:59:55,528][138076] Updated weights for policy 0, policy_version 1138 (0.0006)
1406
+ [2023-08-17 12:59:55,905][131794] Heartbeat connected on Batcher_0
1407
+ [2023-08-17 12:59:55,915][131794] Heartbeat connected on LearnerWorker_p0
1408
+ [2023-08-17 12:59:55,916][131794] Heartbeat connected on InferenceWorker_p0-w0
1409
+ [2023-08-17 12:59:55,917][131794] Heartbeat connected on RolloutWorker_w0
1410
+ [2023-08-17 12:59:55,919][131794] Heartbeat connected on RolloutWorker_w1
1411
+ [2023-08-17 12:59:55,922][131794] Heartbeat connected on RolloutWorker_w2
1412
+ [2023-08-17 12:59:55,925][131794] Heartbeat connected on RolloutWorker_w3
1413
+ [2023-08-17 12:59:55,928][131794] Heartbeat connected on RolloutWorker_w4
1414
+ [2023-08-17 12:59:55,929][131794] Heartbeat connected on RolloutWorker_w5
1415
+ [2023-08-17 12:59:55,932][131794] Heartbeat connected on RolloutWorker_w6
1416
+ [2023-08-17 12:59:55,934][131794] Heartbeat connected on RolloutWorker_w7
1417
+ [2023-08-17 12:59:56,515][138076] Updated weights for policy 0, policy_version 1148 (0.0006)
1418
+ [2023-08-17 12:59:57,482][138076] Updated weights for policy 0, policy_version 1158 (0.0006)
1419
+ [2023-08-17 12:59:58,514][138076] Updated weights for policy 0, policy_version 1168 (0.0006)
1420
+ [2023-08-17 12:59:59,555][138076] Updated weights for policy 0, policy_version 1178 (0.0006)
1421
+ [2023-08-17 12:59:59,788][131794] Fps is (10 sec: 41369.8, 60 sec: 40960.1, 300 sec: 40960.1). Total num frames: 4833280. Throughput: 0: 9661.5. Samples: 193230. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
1422
+ [2023-08-17 12:59:59,789][131794] Avg episode reward: [(0, '25.107')]
1423
+ [2023-08-17 12:59:59,790][138062] Saving new best policy, reward=25.107!
1424
+ [2023-08-17 13:00:00,580][138076] Updated weights for policy 0, policy_version 1188 (0.0006)
1425
+ [2023-08-17 13:00:01,571][138076] Updated weights for policy 0, policy_version 1198 (0.0006)
1426
+ [2023-08-17 13:00:02,515][138076] Updated weights for policy 0, policy_version 1208 (0.0006)
1427
+ [2023-08-17 13:00:03,491][138076] Updated weights for policy 0, policy_version 1218 (0.0006)
1428
+ [2023-08-17 13:00:04,455][138076] Updated weights for policy 0, policy_version 1228 (0.0006)
1429
+ [2023-08-17 13:00:04,788][131794] Fps is (10 sec: 41369.4, 60 sec: 41123.9, 300 sec: 41123.9). Total num frames: 5042176. Throughput: 0: 10216.7. Samples: 255418. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
1430
+ [2023-08-17 13:00:04,789][131794] Avg episode reward: [(0, '23.711')]
1431
+ [2023-08-17 13:00:05,441][138076] Updated weights for policy 0, policy_version 1238 (0.0006)
1432
+ [2023-08-17 13:00:06,429][138076] Updated weights for policy 0, policy_version 1248 (0.0006)
1433
+ [2023-08-17 13:00:07,404][138076] Updated weights for policy 0, policy_version 1258 (0.0006)
1434
+ [2023-08-17 13:00:08,373][138076] Updated weights for policy 0, policy_version 1268 (0.0006)
1435
+ [2023-08-17 13:00:09,421][138076] Updated weights for policy 0, policy_version 1278 (0.0006)
1436
+ [2023-08-17 13:00:09,788][131794] Fps is (10 sec: 41369.8, 60 sec: 41096.7, 300 sec: 41096.7). Total num frames: 5246976. Throughput: 0: 9549.6. Samples: 286486. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
1437
+ [2023-08-17 13:00:09,789][131794] Avg episode reward: [(0, '20.401')]
1438
+ [2023-08-17 13:00:10,433][138076] Updated weights for policy 0, policy_version 1288 (0.0007)
1439
+ [2023-08-17 13:00:11,471][138076] Updated weights for policy 0, policy_version 1298 (0.0007)
1440
+ [2023-08-17 13:00:12,500][138076] Updated weights for policy 0, policy_version 1308 (0.0006)
1441
+ [2023-08-17 13:00:13,520][138076] Updated weights for policy 0, policy_version 1318 (0.0006)
1442
+ [2023-08-17 13:00:14,499][138076] Updated weights for policy 0, policy_version 1328 (0.0006)
1443
+ [2023-08-17 13:00:14,788][131794] Fps is (10 sec: 40550.6, 60 sec: 40960.1, 300 sec: 40960.1). Total num frames: 5447680. Throughput: 0: 9908.0. Samples: 346780. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
1444
+ [2023-08-17 13:00:14,789][131794] Avg episode reward: [(0, '21.480')]
1445
+ [2023-08-17 13:00:15,502][138076] Updated weights for policy 0, policy_version 1338 (0.0006)
1446
+ [2023-08-17 13:00:16,525][138076] Updated weights for policy 0, policy_version 1348 (0.0007)
1447
+ [2023-08-17 13:00:17,539][138076] Updated weights for policy 0, policy_version 1358 (0.0006)
1448
+ [2023-08-17 13:00:18,483][138076] Updated weights for policy 0, policy_version 1368 (0.0006)
1449
+ [2023-08-17 13:00:19,457][138076] Updated weights for policy 0, policy_version 1378 (0.0006)
1450
+ [2023-08-17 13:00:19,788][131794] Fps is (10 sec: 40959.7, 60 sec: 41062.4, 300 sec: 41062.4). Total num frames: 5656576. Throughput: 0: 10223.4. Samples: 408936. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
1451
+ [2023-08-17 13:00:19,789][131794] Avg episode reward: [(0, '24.208')]
1452
+ [2023-08-17 13:00:20,437][138076] Updated weights for policy 0, policy_version 1388 (0.0006)
1453
+ [2023-08-17 13:00:21,479][138076] Updated weights for policy 0, policy_version 1398 (0.0007)
1454
+ [2023-08-17 13:00:22,461][138076] Updated weights for policy 0, policy_version 1408 (0.0007)
1455
+ [2023-08-17 13:00:23,400][138076] Updated weights for policy 0, policy_version 1418 (0.0006)
1456
+ [2023-08-17 13:00:24,390][138076] Updated weights for policy 0, policy_version 1428 (0.0006)
1457
+ [2023-08-17 13:00:24,788][131794] Fps is (10 sec: 41779.0, 60 sec: 41142.1, 300 sec: 41142.1). Total num frames: 5865472. Throughput: 0: 9769.6. Samples: 439632. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
1458
+ [2023-08-17 13:00:24,789][131794] Avg episode reward: [(0, '23.461')]
1459
+ [2023-08-17 13:00:25,374][138076] Updated weights for policy 0, policy_version 1438 (0.0006)
1460
+ [2023-08-17 13:00:26,415][138076] Updated weights for policy 0, policy_version 1448 (0.0007)
1461
+ [2023-08-17 13:00:27,435][138076] Updated weights for policy 0, policy_version 1458 (0.0006)
1462
+ [2023-08-17 13:00:28,377][138076] Updated weights for policy 0, policy_version 1468 (0.0006)
1463
+ [2023-08-17 13:00:29,386][138076] Updated weights for policy 0, policy_version 1478 (0.0007)
1464
+ [2023-08-17 13:00:29,788][131794] Fps is (10 sec: 41369.8, 60 sec: 41123.9, 300 sec: 41123.9). Total num frames: 6070272. Throughput: 0: 10296.5. Samples: 501888. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
1465
+ [2023-08-17 13:00:29,789][131794] Avg episode reward: [(0, '23.247')]
1466
+ [2023-08-17 13:00:30,352][138076] Updated weights for policy 0, policy_version 1488 (0.0006)
1467
+ [2023-08-17 13:00:31,330][138076] Updated weights for policy 0, policy_version 1498 (0.0006)
1468
+ [2023-08-17 13:00:32,335][138076] Updated weights for policy 0, policy_version 1508 (0.0006)
1469
+ [2023-08-17 13:00:33,303][138076] Updated weights for policy 0, policy_version 1518 (0.0006)
1470
+ [2023-08-17 13:00:34,304][138076] Updated weights for policy 0, policy_version 1528 (0.0006)
1471
+ [2023-08-17 13:00:34,788][131794] Fps is (10 sec: 41369.8, 60 sec: 41183.5, 300 sec: 41183.5). Total num frames: 6279168. Throughput: 0: 10313.4. Samples: 564208. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
1472
+ [2023-08-17 13:00:34,789][131794] Avg episode reward: [(0, '21.354')]
1473
+ [2023-08-17 13:00:35,279][138076] Updated weights for policy 0, policy_version 1538 (0.0006)
1474
+ [2023-08-17 13:00:36,307][138076] Updated weights for policy 0, policy_version 1548 (0.0007)
1475
+ [2023-08-17 13:00:37,355][138076] Updated weights for policy 0, policy_version 1558 (0.0007)
1476
+ [2023-08-17 13:00:38,294][138076] Updated weights for policy 0, policy_version 1568 (0.0007)
1477
+ [2023-08-17 13:00:39,268][138076] Updated weights for policy 0, policy_version 1578 (0.0006)
1478
+ [2023-08-17 13:00:39,788][131794] Fps is (10 sec: 41369.6, 60 sec: 41164.9, 300 sec: 41164.9). Total num frames: 6483968. Throughput: 0: 10290.4. Samples: 594388. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
1479
+ [2023-08-17 13:00:39,789][131794] Avg episode reward: [(0, '20.492')]
1480
+ [2023-08-17 13:00:40,270][138076] Updated weights for policy 0, policy_version 1588 (0.0007)
1481
+ [2023-08-17 13:00:41,245][138076] Updated weights for policy 0, policy_version 1598 (0.0006)
1482
+ [2023-08-17 13:00:42,195][138076] Updated weights for policy 0, policy_version 1608 (0.0006)
1483
+ [2023-08-17 13:00:43,197][138076] Updated weights for policy 0, policy_version 1618 (0.0006)
1484
+ [2023-08-17 13:00:44,192][138076] Updated weights for policy 0, policy_version 1628 (0.0007)
1485
+ [2023-08-17 13:00:44,788][131794] Fps is (10 sec: 41369.3, 60 sec: 41301.3, 300 sec: 41212.1). Total num frames: 6692864. Throughput: 0: 10311.1. Samples: 657232. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
1486
+ [2023-08-17 13:00:44,789][131794] Avg episode reward: [(0, '22.781')]
1487
+ [2023-08-17 13:00:45,189][138076] Updated weights for policy 0, policy_version 1638 (0.0006)
1488
+ [2023-08-17 13:00:46,149][138076] Updated weights for policy 0, policy_version 1648 (0.0006)
1489
+ [2023-08-17 13:00:47,125][138076] Updated weights for policy 0, policy_version 1658 (0.0006)
1490
+ [2023-08-17 13:00:48,117][138076] Updated weights for policy 0, policy_version 1668 (0.0006)
1491
+ [2023-08-17 13:00:49,070][138076] Updated weights for policy 0, policy_version 1678 (0.0006)
1492
+ [2023-08-17 13:00:49,788][131794] Fps is (10 sec: 41778.5, 60 sec: 41369.5, 300 sec: 41252.5). Total num frames: 6901760. Throughput: 0: 10331.5. Samples: 720336. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
1493
+ [2023-08-17 13:00:49,789][131794] Avg episode reward: [(0, '23.809')]
1494
+ [2023-08-17 13:00:49,994][138076] Updated weights for policy 0, policy_version 1688 (0.0006)
1495
+ [2023-08-17 13:00:50,994][138076] Updated weights for policy 0, policy_version 1698 (0.0007)
1496
+ [2023-08-17 13:00:51,961][138076] Updated weights for policy 0, policy_version 1708 (0.0006)
1497
+ [2023-08-17 13:00:52,902][138076] Updated weights for policy 0, policy_version 1718 (0.0006)
1498
+ [2023-08-17 13:00:53,878][138076] Updated weights for policy 0, policy_version 1728 (0.0007)
1499
+ [2023-08-17 13:00:54,788][131794] Fps is (10 sec: 41779.3, 60 sec: 41369.6, 300 sec: 41287.7). Total num frames: 7110656. Throughput: 0: 10348.9. Samples: 752188. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
1500
+ [2023-08-17 13:00:54,789][131794] Avg episode reward: [(0, '20.533')]
1501
+ [2023-08-17 13:00:54,908][138076] Updated weights for policy 0, policy_version 1738 (0.0007)
1502
+ [2023-08-17 13:00:55,862][138076] Updated weights for policy 0, policy_version 1748 (0.0006)
1503
+ [2023-08-17 13:00:56,857][138076] Updated weights for policy 0, policy_version 1758 (0.0006)
1504
+ [2023-08-17 13:00:57,828][138076] Updated weights for policy 0, policy_version 1768 (0.0006)
1505
+ [2023-08-17 13:00:58,775][138076] Updated weights for policy 0, policy_version 1778 (0.0006)
1506
+ [2023-08-17 13:00:59,711][138076] Updated weights for policy 0, policy_version 1788 (0.0006)
1507
+ [2023-08-17 13:00:59,788][131794] Fps is (10 sec: 42189.4, 60 sec: 41506.1, 300 sec: 41369.6). Total num frames: 7323648. Throughput: 0: 10401.9. Samples: 814868. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
1508
+ [2023-08-17 13:00:59,789][131794] Avg episode reward: [(0, '24.820')]
1509
+ [2023-08-17 13:01:00,668][138076] Updated weights for policy 0, policy_version 1798 (0.0007)
1510
+ [2023-08-17 13:01:01,641][138076] Updated weights for policy 0, policy_version 1808 (0.0006)
1511
+ [2023-08-17 13:01:02,611][138076] Updated weights for policy 0, policy_version 1818 (0.0006)
1512
+ [2023-08-17 13:01:03,492][138076] Updated weights for policy 0, policy_version 1828 (0.0005)
1513
+ [2023-08-17 13:01:04,409][138076] Updated weights for policy 0, policy_version 1838 (0.0006)
1514
+ [2023-08-17 13:01:04,788][131794] Fps is (10 sec: 43417.8, 60 sec: 41711.0, 300 sec: 41538.3). Total num frames: 7544832. Throughput: 0: 10472.4. Samples: 880194. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
1515
+ [2023-08-17 13:01:04,789][131794] Avg episode reward: [(0, '21.285')]
1516
+ [2023-08-17 13:01:05,381][138076] Updated weights for policy 0, policy_version 1848 (0.0006)
1517
+ [2023-08-17 13:01:06,393][138076] Updated weights for policy 0, policy_version 1858 (0.0006)
1518
+ [2023-08-17 13:01:07,365][138076] Updated weights for policy 0, policy_version 1868 (0.0006)
1519
+ [2023-08-17 13:01:08,372][138076] Updated weights for policy 0, policy_version 1878 (0.0006)
1520
+ [2023-08-17 13:01:09,387][138076] Updated weights for policy 0, policy_version 1888 (0.0006)
1521
+ [2023-08-17 13:01:09,788][131794] Fps is (10 sec: 42188.8, 60 sec: 41642.6, 300 sec: 41460.6). Total num frames: 7745536. Throughput: 0: 10490.1. Samples: 911686. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
1522
+ [2023-08-17 13:01:09,789][131794] Avg episode reward: [(0, '23.546')]
1523
+ [2023-08-17 13:01:10,409][138076] Updated weights for policy 0, policy_version 1898 (0.0006)
1524
+ [2023-08-17 13:01:11,405][138076] Updated weights for policy 0, policy_version 1908 (0.0006)
1525
+ [2023-08-17 13:01:12,388][138076] Updated weights for policy 0, policy_version 1918 (0.0006)
1526
+ [2023-08-17 13:01:13,419][138076] Updated weights for policy 0, policy_version 1928 (0.0006)
1527
+ [2023-08-17 13:01:14,389][138076] Updated weights for policy 0, policy_version 1938 (0.0006)
1528
+ [2023-08-17 13:01:14,788][131794] Fps is (10 sec: 40959.9, 60 sec: 41779.2, 300 sec: 41477.4). Total num frames: 7954432. Throughput: 0: 10458.9. Samples: 972538. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
1529
+ [2023-08-17 13:01:14,789][131794] Avg episode reward: [(0, '22.888')]
1530
+ [2023-08-17 13:01:15,393][138076] Updated weights for policy 0, policy_version 1948 (0.0006)
1531
+ [2023-08-17 13:01:16,338][138076] Updated weights for policy 0, policy_version 1958 (0.0006)
1532
+ [2023-08-17 13:01:17,292][138076] Updated weights for policy 0, policy_version 1968 (0.0005)
1533
+ [2023-08-17 13:01:18,289][138076] Updated weights for policy 0, policy_version 1978 (0.0006)
1534
+ [2023-08-17 13:01:19,274][138076] Updated weights for policy 0, policy_version 1988 (0.0006)
1535
+ [2023-08-17 13:01:19,788][131794] Fps is (10 sec: 41779.1, 60 sec: 41779.2, 300 sec: 41492.5). Total num frames: 8163328. Throughput: 0: 10474.6. Samples: 1035566. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
1536
+ [2023-08-17 13:01:19,789][131794] Avg episode reward: [(0, '23.391')]
1537
+ [2023-08-17 13:01:20,269][138076] Updated weights for policy 0, policy_version 1998 (0.0006)
1538
+ [2023-08-17 13:01:21,317][138076] Updated weights for policy 0, policy_version 2008 (0.0007)
1539
+ [2023-08-17 13:01:22,285][138076] Updated weights for policy 0, policy_version 2018 (0.0006)
1540
+ [2023-08-17 13:01:23,339][138076] Updated weights for policy 0, policy_version 2028 (0.0007)
1541
+ [2023-08-17 13:01:24,378][138076] Updated weights for policy 0, policy_version 2038 (0.0007)
1542
+ [2023-08-17 13:01:24,788][131794] Fps is (10 sec: 40549.8, 60 sec: 41574.3, 300 sec: 41389.1). Total num frames: 8359936. Throughput: 0: 10485.4. Samples: 1066232. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
1543
+ [2023-08-17 13:01:24,789][131794] Avg episode reward: [(0, '24.663')]
1544
+ [2023-08-17 13:01:25,431][138076] Updated weights for policy 0, policy_version 2048 (0.0007)
1545
+ [2023-08-17 13:01:26,428][138076] Updated weights for policy 0, policy_version 2058 (0.0006)
1546
+ [2023-08-17 13:01:27,401][138076] Updated weights for policy 0, policy_version 2068 (0.0006)
1547
+ [2023-08-17 13:01:28,336][138076] Updated weights for policy 0, policy_version 2078 (0.0007)
1548
+ [2023-08-17 13:01:29,286][138076] Updated weights for policy 0, policy_version 2088 (0.0006)
1549
+ [2023-08-17 13:01:29,788][131794] Fps is (10 sec: 40550.2, 60 sec: 41642.6, 300 sec: 41406.8). Total num frames: 8568832. Throughput: 0: 10439.8. Samples: 1127022. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
1550
+ [2023-08-17 13:01:29,790][131794] Avg episode reward: [(0, '25.178')]
1551
+ [2023-08-17 13:01:29,791][138062] Saving new best policy, reward=25.178!
1552
+ [2023-08-17 13:01:30,320][138076] Updated weights for policy 0, policy_version 2098 (0.0007)
1553
+ [2023-08-17 13:01:31,287][138076] Updated weights for policy 0, policy_version 2108 (0.0006)
1554
+ [2023-08-17 13:01:32,279][138076] Updated weights for policy 0, policy_version 2118 (0.0006)
1555
+ [2023-08-17 13:01:33,314][138076] Updated weights for policy 0, policy_version 2128 (0.0007)
1556
+ [2023-08-17 13:01:34,294][138076] Updated weights for policy 0, policy_version 2138 (0.0006)
1557
+ [2023-08-17 13:01:34,788][131794] Fps is (10 sec: 41370.4, 60 sec: 41574.4, 300 sec: 41387.4). Total num frames: 8773632. Throughput: 0: 10409.6. Samples: 1188768. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
1558
+ [2023-08-17 13:01:34,789][131794] Avg episode reward: [(0, '26.292')]
1559
+ [2023-08-17 13:01:34,791][138062] Saving /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000002142_8773632.pth...
1560
+ [2023-08-17 13:01:34,827][138062] Removing /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000170_696320.pth
1561
+ [2023-08-17 13:01:34,834][138062] Saving new best policy, reward=26.292!
1562
+ [2023-08-17 13:01:35,358][138076] Updated weights for policy 0, policy_version 2148 (0.0007)
1563
+ [2023-08-17 13:01:36,310][138076] Updated weights for policy 0, policy_version 2158 (0.0006)
1564
+ [2023-08-17 13:01:37,294][138076] Updated weights for policy 0, policy_version 2168 (0.0006)
1565
+ [2023-08-17 13:01:38,264][138076] Updated weights for policy 0, policy_version 2178 (0.0006)
1566
+ [2023-08-17 13:01:39,278][138076] Updated weights for policy 0, policy_version 2188 (0.0007)
1567
+ [2023-08-17 13:01:39,788][131794] Fps is (10 sec: 41370.0, 60 sec: 41642.7, 300 sec: 41403.8). Total num frames: 8982528. Throughput: 0: 10381.0. Samples: 1219334. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
1568
+ [2023-08-17 13:01:39,789][131794] Avg episode reward: [(0, '21.438')]
1569
+ [2023-08-17 13:01:40,280][138076] Updated weights for policy 0, policy_version 2198 (0.0006)
1570
+ [2023-08-17 13:01:41,229][138076] Updated weights for policy 0, policy_version 2208 (0.0006)
1571
+ [2023-08-17 13:01:42,216][138076] Updated weights for policy 0, policy_version 2218 (0.0006)
1572
+ [2023-08-17 13:01:43,186][138076] Updated weights for policy 0, policy_version 2228 (0.0006)
1573
+ [2023-08-17 13:01:44,149][138076] Updated weights for policy 0, policy_version 2238 (0.0006)
1574
+ [2023-08-17 13:01:44,788][131794] Fps is (10 sec: 41779.0, 60 sec: 41642.7, 300 sec: 41418.8). Total num frames: 9191424. Throughput: 0: 10379.3. Samples: 1281936. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
1575
+ [2023-08-17 13:01:44,789][131794] Avg episode reward: [(0, '22.403')]
1576
+ [2023-08-17 13:01:45,098][138076] Updated weights for policy 0, policy_version 2248 (0.0006)
1577
+ [2023-08-17 13:01:46,042][138076] Updated weights for policy 0, policy_version 2258 (0.0006)
1578
+ [2023-08-17 13:01:47,015][138076] Updated weights for policy 0, policy_version 2268 (0.0006)
1579
+ [2023-08-17 13:01:47,992][138076] Updated weights for policy 0, policy_version 2278 (0.0007)
1580
+ [2023-08-17 13:01:49,037][138076] Updated weights for policy 0, policy_version 2288 (0.0007)
1581
+ [2023-08-17 13:01:49,788][131794] Fps is (10 sec: 41779.1, 60 sec: 41642.8, 300 sec: 41432.6). Total num frames: 9400320. Throughput: 0: 10329.8. Samples: 1345036. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
1582
+ [2023-08-17 13:01:49,789][131794] Avg episode reward: [(0, '24.155')]
1583
+ [2023-08-17 13:01:50,078][138076] Updated weights for policy 0, policy_version 2298 (0.0006)
1584
+ [2023-08-17 13:01:51,011][138076] Updated weights for policy 0, policy_version 2308 (0.0006)
1585
+ [2023-08-17 13:01:52,009][138076] Updated weights for policy 0, policy_version 2318 (0.0006)
1586
+ [2023-08-17 13:01:52,933][138076] Updated weights for policy 0, policy_version 2328 (0.0006)
1587
+ [2023-08-17 13:01:53,988][138076] Updated weights for policy 0, policy_version 2338 (0.0007)
1588
+ [2023-08-17 13:01:54,788][131794] Fps is (10 sec: 41779.5, 60 sec: 41642.7, 300 sec: 41445.5). Total num frames: 9609216. Throughput: 0: 10323.1. Samples: 1376224. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
1589
+ [2023-08-17 13:01:54,789][131794] Avg episode reward: [(0, '23.394')]
1590
+ [2023-08-17 13:01:54,969][138076] Updated weights for policy 0, policy_version 2348 (0.0006)
1591
+ [2023-08-17 13:01:55,990][138076] Updated weights for policy 0, policy_version 2358 (0.0006)
1592
+ [2023-08-17 13:01:56,962][138076] Updated weights for policy 0, policy_version 2368 (0.0006)
1593
+ [2023-08-17 13:01:57,986][138076] Updated weights for policy 0, policy_version 2378 (0.0006)
1594
+ [2023-08-17 13:01:58,980][138076] Updated weights for policy 0, policy_version 2388 (0.0006)
1595
+ [2023-08-17 13:01:59,788][131794] Fps is (10 sec: 41369.7, 60 sec: 41506.1, 300 sec: 41428.1). Total num frames: 9814016. Throughput: 0: 10345.8. Samples: 1438100. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
1596
+ [2023-08-17 13:01:59,789][131794] Avg episode reward: [(0, '22.972')]
1597
+ [2023-08-17 13:01:59,927][138076] Updated weights for policy 0, policy_version 2398 (0.0006)
1598
+ [2023-08-17 13:02:00,931][138076] Updated weights for policy 0, policy_version 2408 (0.0007)
1599
+ [2023-08-17 13:02:01,923][138076] Updated weights for policy 0, policy_version 2418 (0.0006)
1600
+ [2023-08-17 13:02:02,877][138076] Updated weights for policy 0, policy_version 2428 (0.0005)
1601
+ [2023-08-17 13:02:03,905][138076] Updated weights for policy 0, policy_version 2438 (0.0007)
1602
+ [2023-08-17 13:02:04,439][138062] Stopping Batcher_0...
1603
+ [2023-08-17 13:02:04,440][138062] Loop batcher_evt_loop terminating...
1604
+ [2023-08-17 13:02:04,440][138062] Saving /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000002443_10006528.pth...
1605
+ [2023-08-17 13:02:04,439][131794] Component Batcher_0 stopped!
1606
+ [2023-08-17 13:02:04,455][138076] Weights refcount: 2 0
1607
+ [2023-08-17 13:02:04,456][138076] Stopping InferenceWorker_p0-w0...
1608
+ [2023-08-17 13:02:04,456][138076] Loop inference_proc0-0_evt_loop terminating...
1609
+ [2023-08-17 13:02:04,456][131794] Component InferenceWorker_p0-w0 stopped!
1610
+ [2023-08-17 13:02:04,473][138062] Removing /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth
1611
+ [2023-08-17 13:02:04,477][138062] Saving /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000002443_10006528.pth...
1612
+ [2023-08-17 13:02:04,501][138080] Stopping RolloutWorker_w4...
1613
+ [2023-08-17 13:02:04,501][138080] Loop rollout_proc4_evt_loop terminating...
1614
+ [2023-08-17 13:02:04,501][131794] Component RolloutWorker_w4 stopped!
1615
+ [2023-08-17 13:02:04,507][138081] Stopping RolloutWorker_w5...
1616
+ [2023-08-17 13:02:04,507][138079] Stopping RolloutWorker_w3...
1617
+ [2023-08-17 13:02:04,507][138081] Loop rollout_proc5_evt_loop terminating...
1618
+ [2023-08-17 13:02:04,507][138079] Loop rollout_proc3_evt_loop terminating...
1619
+ [2023-08-17 13:02:04,507][131794] Component RolloutWorker_w3 stopped!
1620
+ [2023-08-17 13:02:04,508][131794] Component RolloutWorker_w5 stopped!
1621
+ [2023-08-17 13:02:04,511][138083] Stopping RolloutWorker_w7...
1622
+ [2023-08-17 13:02:04,511][138083] Loop rollout_proc7_evt_loop terminating...
1623
+ [2023-08-17 13:02:04,511][131794] Component RolloutWorker_w7 stopped!
1624
+ [2023-08-17 13:02:04,512][138078] Stopping RolloutWorker_w2...
1625
+ [2023-08-17 13:02:04,512][138078] Loop rollout_proc2_evt_loop terminating...
1626
+ [2023-08-17 13:02:04,513][138082] Stopping RolloutWorker_w6...
1627
+ [2023-08-17 13:02:04,512][131794] Component RolloutWorker_w2 stopped!
1628
+ [2023-08-17 13:02:04,513][138082] Loop rollout_proc6_evt_loop terminating...
1629
+ [2023-08-17 13:02:04,513][131794] Component RolloutWorker_w6 stopped!
1630
+ [2023-08-17 13:02:04,519][138077] Stopping RolloutWorker_w1...
1631
+ [2023-08-17 13:02:04,519][138077] Loop rollout_proc1_evt_loop terminating...
1632
+ [2023-08-17 13:02:04,519][131794] Component RolloutWorker_w1 stopped!
1633
+ [2023-08-17 13:02:04,535][138062] Stopping LearnerWorker_p0...
1634
+ [2023-08-17 13:02:04,536][138062] Loop learner_proc0_evt_loop terminating...
1635
+ [2023-08-17 13:02:04,535][131794] Component LearnerWorker_p0 stopped!
1636
+ [2023-08-17 13:02:04,539][138075] Stopping RolloutWorker_w0...
1637
+ [2023-08-17 13:02:04,540][138075] Loop rollout_proc0_evt_loop terminating...
1638
+ [2023-08-17 13:02:04,539][131794] Component RolloutWorker_w0 stopped!
1639
+ [2023-08-17 13:02:04,540][131794] Waiting for process learner_proc0 to stop...
1640
+ [2023-08-17 13:02:05,188][131794] Waiting for process inference_proc0-0 to join...
1641
+ [2023-08-17 13:02:05,189][131794] Waiting for process rollout_proc0 to join...
1642
+ [2023-08-17 13:02:05,190][131794] Waiting for process rollout_proc1 to join...
1643
+ [2023-08-17 13:02:05,190][131794] Waiting for process rollout_proc2 to join...
1644
+ [2023-08-17 13:02:05,191][131794] Waiting for process rollout_proc3 to join...
1645
+ [2023-08-17 13:02:05,191][131794] Waiting for process rollout_proc4 to join...
1646
+ [2023-08-17 13:02:05,192][131794] Waiting for process rollout_proc5 to join...
1647
+ [2023-08-17 13:02:05,193][131794] Waiting for process rollout_proc6 to join...
1648
+ [2023-08-17 13:02:05,193][131794] Waiting for process rollout_proc7 to join...
1649
+ [2023-08-17 13:02:05,194][131794] Batcher 0 profile tree view:
1650
+ batching: 12.6388, releasing_batches: 0.0155
1651
+ [2023-08-17 13:02:05,194][131794] InferenceWorker_p0-w0 profile tree view:
1652
+ wait_policy: 0.0000
1653
+ wait_policy_total: 2.6989
1654
+ update_model: 2.1930
1655
+ weight_update: 0.0007
1656
+ one_step: 0.0019
1657
+ handle_policy_step: 134.6435
1658
+ deserialize: 6.0242, stack: 0.6403, obs_to_device_normalize: 30.9018, forward: 66.9499, send_messages: 8.4976
1659
+ prepare_outputs: 15.8515
1660
+ to_cpu: 10.1908
1661
+ [2023-08-17 13:02:05,195][131794] Learner 0 profile tree view:
1662
+ misc: 0.0062, prepare_batch: 6.1025
1663
+ train: 17.4443
1664
+ epoch_init: 0.0048, minibatch_init: 0.0045, losses_postprocess: 0.4413, kl_divergence: 0.3416, after_optimizer: 0.4372
1665
+ calculate_losses: 6.4031
1666
+ losses_init: 0.0025, forward_head: 0.4213, bptt_initial: 3.8098, tail: 0.4256, advantages_returns: 0.1042, losses: 0.8304
1667
+ bptt: 0.6899
1668
+ bptt_forward_core: 0.6553
1669
+ update: 9.5344
1670
+ clip: 4.9986
1671
+ [2023-08-17 13:02:05,195][131794] RolloutWorker_w0 profile tree view:
1672
+ wait_for_trajectories: 0.1145, enqueue_policy_requests: 4.7133, env_step: 65.9256, overhead: 6.2646, complete_rollouts: 0.1526
1673
+ save_policy_outputs: 6.3662
1674
+ split_output_tensors: 2.9670
1675
+ [2023-08-17 13:02:05,195][131794] RolloutWorker_w7 profile tree view:
1676
+ wait_for_trajectories: 0.1125, enqueue_policy_requests: 4.8606, env_step: 68.5610, overhead: 6.5985, complete_rollouts: 0.1617
1677
+ save_policy_outputs: 6.9442
1678
+ split_output_tensors: 3.1602
1679
+ [2023-08-17 13:02:05,196][131794] Loop Runner_EvtLoop terminating...
1680
+ [2023-08-17 13:02:05,197][131794] Runner profile tree view:
1681
+ main_loop: 149.2631
1682
+ [2023-08-17 13:02:05,197][131794] Collected {0: 10006528}, FPS: 40201.8
1683
+ [2023-08-17 13:02:20,239][131794] Loading existing experiment configuration from /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/config.json
1684
+ [2023-08-17 13:02:20,239][131794] Overriding arg 'num_workers' with value 1 passed from command line
1685
+ [2023-08-17 13:02:20,240][131794] Adding new argument 'no_render'=True that is not in the saved config file!
1686
+ [2023-08-17 13:02:20,240][131794] Adding new argument 'save_video'=True that is not in the saved config file!
1687
+ [2023-08-17 13:02:20,241][131794] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
1688
+ [2023-08-17 13:02:20,241][131794] Adding new argument 'video_name'=None that is not in the saved config file!
1689
+ [2023-08-17 13:02:20,242][131794] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
1690
+ [2023-08-17 13:02:20,243][131794] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
1691
+ [2023-08-17 13:02:20,243][131794] Adding new argument 'push_to_hub'=False that is not in the saved config file!
1692
+ [2023-08-17 13:02:20,244][131794] Adding new argument 'hf_repository'=None that is not in the saved config file!
1693
+ [2023-08-17 13:02:20,244][131794] Adding new argument 'policy_index'=0 that is not in the saved config file!
1694
+ [2023-08-17 13:02:20,245][131794] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
1695
+ [2023-08-17 13:02:20,246][131794] Adding new argument 'train_script'=None that is not in the saved config file!
1696
+ [2023-08-17 13:02:20,246][131794] Adding new argument 'enjoy_script'=None that is not in the saved config file!
1697
+ [2023-08-17 13:02:20,247][131794] Using frameskip 1 and render_action_repeat=4 for evaluation
1698
+ [2023-08-17 13:02:20,250][131794] RunningMeanStd input shape: (3, 72, 128)
1699
+ [2023-08-17 13:02:20,251][131794] RunningMeanStd input shape: (1,)
1700
+ [2023-08-17 13:02:20,257][131794] ConvEncoder: input_channels=3
1701
+ [2023-08-17 13:02:20,280][131794] Conv encoder output size: 512
1702
+ [2023-08-17 13:02:20,281][131794] Policy head output size: 512
1703
+ [2023-08-17 13:02:20,723][131794] Loading state from checkpoint /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000002443_10006528.pth...
1704
+ [2023-08-17 13:02:21,021][131794] Num frames 100...
1705
+ [2023-08-17 13:02:21,076][131794] Num frames 200...
1706
+ [2023-08-17 13:02:21,129][131794] Num frames 300...
1707
+ [2023-08-17 13:02:21,183][131794] Num frames 400...
1708
+ [2023-08-17 13:02:21,237][131794] Num frames 500...
1709
+ [2023-08-17 13:02:21,314][131794] Avg episode rewards: #0: 8.440, true rewards: #0: 5.440
1710
+ [2023-08-17 13:02:21,315][131794] Avg episode reward: 8.440, avg true_objective: 5.440
1711
+ [2023-08-17 13:02:21,348][131794] Num frames 600...
1712
+ [2023-08-17 13:02:21,402][131794] Num frames 700...
1713
+ [2023-08-17 13:02:21,457][131794] Num frames 800...
1714
+ [2023-08-17 13:02:21,512][131794] Num frames 900...
1715
+ [2023-08-17 13:02:21,567][131794] Num frames 1000...
1716
+ [2023-08-17 13:02:21,623][131794] Num frames 1100...
1717
+ [2023-08-17 13:02:21,679][131794] Num frames 1200...
1718
+ [2023-08-17 13:02:21,736][131794] Num frames 1300...
1719
+ [2023-08-17 13:02:21,793][131794] Num frames 1400...
1720
+ [2023-08-17 13:02:21,852][131794] Num frames 1500...
1721
+ [2023-08-17 13:02:21,912][131794] Num frames 1600...
1722
+ [2023-08-17 13:02:21,969][131794] Num frames 1700...
1723
+ [2023-08-17 13:02:22,026][131794] Num frames 1800...
1724
+ [2023-08-17 13:02:22,081][131794] Num frames 1900...
1725
+ [2023-08-17 13:02:22,137][131794] Num frames 2000...
1726
+ [2023-08-17 13:02:22,193][131794] Num frames 2100...
1727
+ [2023-08-17 13:02:22,251][131794] Num frames 2200...
1728
+ [2023-08-17 13:02:22,308][131794] Num frames 2300...
1729
+ [2023-08-17 13:02:22,367][131794] Num frames 2400...
1730
+ [2023-08-17 13:02:22,424][131794] Num frames 2500...
1731
+ [2023-08-17 13:02:22,482][131794] Num frames 2600...
1732
+ [2023-08-17 13:02:22,560][131794] Avg episode rewards: #0: 31.219, true rewards: #0: 13.220
1733
+ [2023-08-17 13:02:22,560][131794] Avg episode reward: 31.219, avg true_objective: 13.220
1734
+ [2023-08-17 13:02:22,593][131794] Num frames 2700...
1735
+ [2023-08-17 13:02:22,649][131794] Num frames 2800...
1736
+ [2023-08-17 13:02:22,705][131794] Num frames 2900...
1737
+ [2023-08-17 13:02:22,760][131794] Num frames 3000...
1738
+ [2023-08-17 13:02:22,817][131794] Num frames 3100...
1739
+ [2023-08-17 13:02:22,874][131794] Num frames 3200...
1740
+ [2023-08-17 13:02:22,931][131794] Num frames 3300...
1741
+ [2023-08-17 13:02:22,986][131794] Num frames 3400...
1742
+ [2023-08-17 13:02:23,040][131794] Num frames 3500...
1743
+ [2023-08-17 13:02:23,095][131794] Num frames 3600...
1744
+ [2023-08-17 13:02:23,150][131794] Num frames 3700...
1745
+ [2023-08-17 13:02:23,204][131794] Num frames 3800...
1746
+ [2023-08-17 13:02:23,310][131794] Avg episode rewards: #0: 30.323, true rewards: #0: 12.990
1747
+ [2023-08-17 13:02:23,311][131794] Avg episode reward: 30.323, avg true_objective: 12.990
1748
+ [2023-08-17 13:02:23,313][131794] Num frames 3900...
1749
+ [2023-08-17 13:02:23,369][131794] Num frames 4000...
1750
+ [2023-08-17 13:02:23,426][131794] Num frames 4100...
1751
+ [2023-08-17 13:02:23,484][131794] Num frames 4200...
1752
+ [2023-08-17 13:02:23,539][131794] Num frames 4300...
1753
+ [2023-08-17 13:02:23,595][131794] Num frames 4400...
1754
+ [2023-08-17 13:02:23,650][131794] Num frames 4500...
1755
+ [2023-08-17 13:02:23,706][131794] Num frames 4600...
1756
+ [2023-08-17 13:02:23,761][131794] Num frames 4700...
1757
+ [2023-08-17 13:02:23,816][131794] Num frames 4800...
1758
+ [2023-08-17 13:02:23,872][131794] Num frames 4900...
1759
+ [2023-08-17 13:02:23,929][131794] Num frames 5000...
1760
+ [2023-08-17 13:02:23,984][131794] Num frames 5100...
1761
+ [2023-08-17 13:02:24,039][131794] Num frames 5200...
1762
+ [2023-08-17 13:02:24,096][131794] Num frames 5300...
1763
+ [2023-08-17 13:02:24,152][131794] Num frames 5400...
1764
+ [2023-08-17 13:02:24,242][131794] Avg episode rewards: #0: 31.662, true rewards: #0: 13.662
1765
+ [2023-08-17 13:02:24,243][131794] Avg episode reward: 31.662, avg true_objective: 13.662
1766
+ [2023-08-17 13:02:24,263][131794] Num frames 5500...
1767
+ [2023-08-17 13:02:24,318][131794] Num frames 5600...
1768
+ [2023-08-17 13:02:24,374][131794] Num frames 5700...
1769
+ [2023-08-17 13:02:24,428][131794] Num frames 5800...
1770
+ [2023-08-17 13:02:24,483][131794] Num frames 5900...
1771
+ [2023-08-17 13:02:24,538][131794] Num frames 6000...
1772
+ [2023-08-17 13:02:24,631][131794] Avg episode rewards: #0: 27.146, true rewards: #0: 12.146
1773
+ [2023-08-17 13:02:24,632][131794] Avg episode reward: 27.146, avg true_objective: 12.146
1774
+ [2023-08-17 13:02:24,648][131794] Num frames 6100...
1775
+ [2023-08-17 13:02:24,706][131794] Num frames 6200...
1776
+ [2023-08-17 13:02:24,763][131794] Num frames 6300...
1777
+ [2023-08-17 13:02:24,818][131794] Num frames 6400...
1778
+ [2023-08-17 13:02:24,872][131794] Num frames 6500...
1779
+ [2023-08-17 13:02:24,937][131794] Avg episode rewards: #0: 23.535, true rewards: #0: 10.868
1780
+ [2023-08-17 13:02:24,938][131794] Avg episode reward: 23.535, avg true_objective: 10.868
1781
+ [2023-08-17 13:02:24,985][131794] Num frames 6600...
1782
+ [2023-08-17 13:02:25,040][131794] Num frames 6700...
1783
+ [2023-08-17 13:02:25,094][131794] Num frames 6800...
1784
+ [2023-08-17 13:02:25,150][131794] Num frames 6900...
1785
+ [2023-08-17 13:02:25,205][131794] Num frames 7000...
1786
+ [2023-08-17 13:02:25,260][131794] Num frames 7100...
1787
+ [2023-08-17 13:02:25,315][131794] Num frames 7200...
1788
+ [2023-08-17 13:02:25,416][131794] Avg episode rewards: #0: 21.984, true rewards: #0: 10.413
1789
+ [2023-08-17 13:02:25,417][131794] Avg episode reward: 21.984, avg true_objective: 10.413
1790
+ [2023-08-17 13:02:25,424][131794] Num frames 7300...
1791
+ [2023-08-17 13:02:25,479][131794] Num frames 7400...
1792
+ [2023-08-17 13:02:25,535][131794] Num frames 7500...
1793
+ [2023-08-17 13:02:25,591][131794] Num frames 7600...
1794
+ [2023-08-17 13:02:25,646][131794] Num frames 7700...
1795
+ [2023-08-17 13:02:25,746][131794] Avg episode rewards: #0: 20.484, true rewards: #0: 9.734
1796
+ [2023-08-17 13:02:25,747][131794] Avg episode reward: 20.484, avg true_objective: 9.734
1797
+ [2023-08-17 13:02:25,755][131794] Num frames 7800...
1798
+ [2023-08-17 13:02:25,809][131794] Num frames 7900...
1799
+ [2023-08-17 13:02:25,864][131794] Num frames 8000...
1800
+ [2023-08-17 13:02:25,918][131794] Num frames 8100...
1801
+ [2023-08-17 13:02:25,972][131794] Num frames 8200...
1802
+ [2023-08-17 13:02:26,027][131794] Num frames 8300...
1803
+ [2023-08-17 13:02:26,082][131794] Num frames 8400...
1804
+ [2023-08-17 13:02:26,167][131794] Avg episode rewards: #0: 19.510, true rewards: #0: 9.399
1805
+ [2023-08-17 13:02:26,168][131794] Avg episode reward: 19.510, avg true_objective: 9.399
1806
+ [2023-08-17 13:02:26,190][131794] Num frames 8500...
1807
+ [2023-08-17 13:02:26,245][131794] Num frames 8600...
1808
+ [2023-08-17 13:02:26,300][131794] Num frames 8700...
1809
+ [2023-08-17 13:02:26,355][131794] Num frames 8800...
1810
+ [2023-08-17 13:02:26,410][131794] Num frames 8900...
1811
+ [2023-08-17 13:02:26,466][131794] Avg episode rewards: #0: 18.107, true rewards: #0: 8.907
1812
+ [2023-08-17 13:02:26,467][131794] Avg episode reward: 18.107, avg true_objective: 8.907
1813
+ [2023-08-17 13:02:34,995][131794] Replay video saved to /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/replay.mp4!
1814
+ [2023-08-17 13:03:03,228][131794] Loading existing experiment configuration from /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/config.json
1815
+ [2023-08-17 13:03:03,228][131794] Overriding arg 'num_workers' with value 1 passed from command line
1816
+ [2023-08-17 13:03:03,229][131794] Adding new argument 'no_render'=True that is not in the saved config file!
1817
+ [2023-08-17 13:03:03,229][131794] Adding new argument 'save_video'=True that is not in the saved config file!
1818
+ [2023-08-17 13:03:03,230][131794] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
1819
+ [2023-08-17 13:03:03,230][131794] Adding new argument 'video_name'=None that is not in the saved config file!
1820
+ [2023-08-17 13:03:03,230][131794] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
1821
+ [2023-08-17 13:03:03,231][131794] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
1822
+ [2023-08-17 13:03:03,231][131794] Adding new argument 'push_to_hub'=True that is not in the saved config file!
1823
+ [2023-08-17 13:03:03,231][131794] Adding new argument 'hf_repository'='patonw/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
1824
+ [2023-08-17 13:03:03,232][131794] Adding new argument 'policy_index'=0 that is not in the saved config file!
1825
+ [2023-08-17 13:03:03,232][131794] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
1826
+ [2023-08-17 13:03:03,233][131794] Adding new argument 'train_script'=None that is not in the saved config file!
1827
+ [2023-08-17 13:03:03,233][131794] Adding new argument 'enjoy_script'=None that is not in the saved config file!
1828
+ [2023-08-17 13:03:03,233][131794] Using frameskip 1 and render_action_repeat=4 for evaluation
1829
+ [2023-08-17 13:03:03,238][131794] RunningMeanStd input shape: (3, 72, 128)
1830
+ [2023-08-17 13:03:03,239][131794] RunningMeanStd input shape: (1,)
1831
+ [2023-08-17 13:03:03,245][131794] ConvEncoder: input_channels=3
1832
+ [2023-08-17 13:03:03,266][131794] Conv encoder output size: 512
1833
+ [2023-08-17 13:03:03,267][131794] Policy head output size: 512
1834
+ [2023-08-17 13:03:03,283][131794] Loading state from checkpoint /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000002443_10006528.pth...
1835
+ [2023-08-17 13:03:03,601][131794] Num frames 100...
1836
+ [2023-08-17 13:03:03,655][131794] Num frames 200...
1837
+ [2023-08-17 13:03:03,719][131794] Num frames 300...
1838
+ [2023-08-17 13:03:03,778][131794] Num frames 400...
1839
+ [2023-08-17 13:03:03,835][131794] Num frames 500...
1840
+ [2023-08-17 13:03:03,888][131794] Num frames 600...
1841
+ [2023-08-17 13:03:03,942][131794] Num frames 700...
1842
+ [2023-08-17 13:03:03,995][131794] Num frames 800...
1843
+ [2023-08-17 13:03:04,049][131794] Num frames 900...
1844
+ [2023-08-17 13:03:04,102][131794] Num frames 1000...
1845
+ [2023-08-17 13:03:04,156][131794] Num frames 1100...
1846
+ [2023-08-17 13:03:04,210][131794] Num frames 1200...
1847
+ [2023-08-17 13:03:04,265][131794] Num frames 1300...
1848
+ [2023-08-17 13:03:04,319][131794] Num frames 1400...
1849
+ [2023-08-17 13:03:04,376][131794] Num frames 1500...
1850
+ [2023-08-17 13:03:04,430][131794] Num frames 1600...
1851
+ [2023-08-17 13:03:04,485][131794] Num frames 1700...
1852
+ [2023-08-17 13:03:04,541][131794] Num frames 1800...
1853
+ [2023-08-17 13:03:04,597][131794] Num frames 1900...
1854
+ [2023-08-17 13:03:04,655][131794] Num frames 2000...
1855
+ [2023-08-17 13:03:04,715][131794] Num frames 2100...
1856
+ [2023-08-17 13:03:04,766][131794] Avg episode rewards: #0: 53.999, true rewards: #0: 21.000
1857
+ [2023-08-17 13:03:04,767][131794] Avg episode reward: 53.999, avg true_objective: 21.000
1858
+ [2023-08-17 13:03:04,825][131794] Num frames 2200...
1859
+ [2023-08-17 13:03:04,883][131794] Num frames 2300...
1860
+ [2023-08-17 13:03:04,942][131794] Num frames 2400...
1861
+ [2023-08-17 13:03:04,999][131794] Num frames 2500...
1862
+ [2023-08-17 13:03:05,056][131794] Num frames 2600...
1863
+ [2023-08-17 13:03:05,114][131794] Num frames 2700...
1864
+ [2023-08-17 13:03:05,173][131794] Num frames 2800...
1865
+ [2023-08-17 13:03:05,231][131794] Num frames 2900...
1866
+ [2023-08-17 13:03:05,291][131794] Num frames 3000...
1867
+ [2023-08-17 13:03:05,348][131794] Num frames 3100...
1868
+ [2023-08-17 13:03:05,408][131794] Num frames 3200...
1869
+ [2023-08-17 13:03:05,466][131794] Num frames 3300...
1870
+ [2023-08-17 13:03:05,526][131794] Num frames 3400...
1871
+ [2023-08-17 13:03:05,586][131794] Num frames 3500...
1872
+ [2023-08-17 13:03:05,672][131794] Avg episode rewards: #0: 44.770, true rewards: #0: 17.770
1873
+ [2023-08-17 13:03:05,673][131794] Avg episode reward: 44.770, avg true_objective: 17.770
1874
+ [2023-08-17 13:03:05,699][131794] Num frames 3600...
1875
+ [2023-08-17 13:03:05,759][131794] Num frames 3700...
1876
+ [2023-08-17 13:03:05,819][131794] Num frames 3800...
1877
+ [2023-08-17 13:03:05,878][131794] Num frames 3900...
1878
+ [2023-08-17 13:03:05,936][131794] Num frames 4000...
1879
+ [2023-08-17 13:03:05,993][131794] Num frames 4100...
1880
+ [2023-08-17 13:03:06,050][131794] Num frames 4200...
1881
+ [2023-08-17 13:03:06,108][131794] Num frames 4300...
1882
+ [2023-08-17 13:03:06,166][131794] Num frames 4400...
1883
+ [2023-08-17 13:03:06,225][131794] Num frames 4500...
1884
+ [2023-08-17 13:03:06,284][131794] Num frames 4600...
1885
+ [2023-08-17 13:03:06,343][131794] Num frames 4700...
1886
+ [2023-08-17 13:03:06,401][131794] Num frames 4800...
1887
+ [2023-08-17 13:03:06,463][131794] Num frames 4900...
1888
+ [2023-08-17 13:03:06,521][131794] Num frames 5000...
1889
+ [2023-08-17 13:03:06,579][131794] Num frames 5100...
1890
+ [2023-08-17 13:03:06,638][131794] Num frames 5200...
1891
+ [2023-08-17 13:03:06,696][131794] Num frames 5300...
1892
+ [2023-08-17 13:03:06,753][131794] Num frames 5400...
1893
+ [2023-08-17 13:03:06,812][131794] Num frames 5500...
1894
+ [2023-08-17 13:03:06,870][131794] Num frames 5600...
1895
+ [2023-08-17 13:03:06,954][131794] Avg episode rewards: #0: 48.179, true rewards: #0: 18.847
1896
+ [2023-08-17 13:03:06,955][131794] Avg episode reward: 48.179, avg true_objective: 18.847
1897
+ [2023-08-17 13:03:06,981][131794] Num frames 5700...
1898
+ [2023-08-17 13:03:07,037][131794] Num frames 5800...
1899
+ [2023-08-17 13:03:07,094][131794] Num frames 5900...
1900
+ [2023-08-17 13:03:07,152][131794] Num frames 6000...
1901
+ [2023-08-17 13:03:07,210][131794] Num frames 6100...
1902
+ [2023-08-17 13:03:07,269][131794] Num frames 6200...
1903
+ [2023-08-17 13:03:07,327][131794] Num frames 6300...
1904
+ [2023-08-17 13:03:07,385][131794] Num frames 6400...
1905
+ [2023-08-17 13:03:07,443][131794] Num frames 6500...
1906
+ [2023-08-17 13:03:07,500][131794] Num frames 6600...
1907
+ [2023-08-17 13:03:07,559][131794] Num frames 6700...
1908
+ [2023-08-17 13:03:07,617][131794] Num frames 6800...
1909
+ [2023-08-17 13:03:07,679][131794] Avg episode rewards: #0: 42.537, true rewards: #0: 17.038
1910
+ [2023-08-17 13:03:07,679][131794] Avg episode reward: 42.537, avg true_objective: 17.038
1911
+ [2023-08-17 13:03:07,728][131794] Num frames 6900...
1912
+ [2023-08-17 13:03:07,787][131794] Num frames 7000...
1913
+ [2023-08-17 13:03:07,845][131794] Num frames 7100...
1914
+ [2023-08-17 13:03:07,903][131794] Num frames 7200...
1915
+ [2023-08-17 13:03:07,961][131794] Num frames 7300...
1916
+ [2023-08-17 13:03:08,020][131794] Num frames 7400...
1917
+ [2023-08-17 13:03:08,078][131794] Num frames 7500...
1918
+ [2023-08-17 13:03:08,137][131794] Num frames 7600...
1919
+ [2023-08-17 13:03:08,195][131794] Num frames 7700...
1920
+ [2023-08-17 13:03:08,255][131794] Num frames 7800...
1921
+ [2023-08-17 13:03:08,313][131794] Num frames 7900...
1922
+ [2023-08-17 13:03:08,371][131794] Num frames 8000...
1923
+ [2023-08-17 13:03:08,430][131794] Num frames 8100...
1924
+ [2023-08-17 13:03:08,492][131794] Num frames 8200...
1925
+ [2023-08-17 13:03:08,551][131794] Num frames 8300...
1926
+ [2023-08-17 13:03:08,610][131794] Num frames 8400...
1927
+ [2023-08-17 13:03:08,699][131794] Avg episode rewards: #0: 41.920, true rewards: #0: 16.920
1928
+ [2023-08-17 13:03:08,699][131794] Avg episode reward: 41.920, avg true_objective: 16.920
1929
+ [2023-08-17 13:03:08,724][131794] Num frames 8500...
1930
+ [2023-08-17 13:03:08,783][131794] Num frames 8600...
1931
+ [2023-08-17 13:03:08,842][131794] Num frames 8700...
1932
+ [2023-08-17 13:03:08,901][131794] Num frames 8800...
1933
+ [2023-08-17 13:03:08,960][131794] Num frames 8900...
1934
+ [2023-08-17 13:03:09,019][131794] Num frames 9000...
1935
+ [2023-08-17 13:03:09,074][131794] Avg episode rewards: #0: 36.173, true rewards: #0: 15.007
1936
+ [2023-08-17 13:03:09,074][131794] Avg episode reward: 36.173, avg true_objective: 15.007
1937
+ [2023-08-17 13:03:09,127][131794] Num frames 9100...
1938
+ [2023-08-17 13:03:09,181][131794] Num frames 9200...
1939
+ [2023-08-17 13:03:09,235][131794] Num frames 9300...
1940
+ [2023-08-17 13:03:09,290][131794] Num frames 9400...
1941
+ [2023-08-17 13:03:09,344][131794] Num frames 9500...
1942
+ [2023-08-17 13:03:09,398][131794] Num frames 9600...
1943
+ [2023-08-17 13:03:09,452][131794] Num frames 9700...
1944
+ [2023-08-17 13:03:09,506][131794] Num frames 9800...
1945
+ [2023-08-17 13:03:09,559][131794] Num frames 9900...
1946
+ [2023-08-17 13:03:09,663][131794] Avg episode rewards: #0: 34.137, true rewards: #0: 14.280
1947
+ [2023-08-17 13:03:09,664][131794] Avg episode reward: 34.137, avg true_objective: 14.280
1948
+ [2023-08-17 13:03:09,667][131794] Num frames 10000...
1949
+ [2023-08-17 13:03:09,720][131794] Num frames 10100...
1950
+ [2023-08-17 13:03:09,775][131794] Num frames 10200...
1951
+ [2023-08-17 13:03:09,831][131794] Num frames 10300...
1952
+ [2023-08-17 13:03:09,887][131794] Num frames 10400...
1953
+ [2023-08-17 13:03:09,942][131794] Num frames 10500...
1954
+ [2023-08-17 13:03:09,997][131794] Num frames 10600...
1955
+ [2023-08-17 13:03:10,101][131794] Avg episode rewards: #0: 32.115, true rewards: #0: 13.365
1956
+ [2023-08-17 13:03:10,102][131794] Avg episode reward: 32.115, avg true_objective: 13.365
1957
+ [2023-08-17 13:03:10,108][131794] Num frames 10700...
1958
+ [2023-08-17 13:03:10,164][131794] Num frames 10800...
1959
+ [2023-08-17 13:03:10,220][131794] Num frames 10900...
1960
+ [2023-08-17 13:03:10,275][131794] Num frames 11000...
1961
+ [2023-08-17 13:03:10,334][131794] Avg episode rewards: #0: 29.235, true rewards: #0: 12.236
1962
+ [2023-08-17 13:03:10,335][131794] Avg episode reward: 29.235, avg true_objective: 12.236
1963
+ [2023-08-17 13:03:10,387][131794] Num frames 11100...
1964
+ [2023-08-17 13:03:10,442][131794] Num frames 11200...
1965
+ [2023-08-17 13:03:10,497][131794] Num frames 11300...
1966
+ [2023-08-17 13:03:10,553][131794] Num frames 11400...
1967
+ [2023-08-17 13:03:10,610][131794] Num frames 11500...
1968
+ [2023-08-17 13:03:10,666][131794] Num frames 11600...
1969
+ [2023-08-17 13:03:10,720][131794] Num frames 11700...
1970
+ [2023-08-17 13:03:10,779][131794] Num frames 11800...
1971
+ [2023-08-17 13:03:10,862][131794] Avg episode rewards: #0: 28.046, true rewards: #0: 11.846
1972
+ [2023-08-17 13:03:10,863][131794] Avg episode reward: 28.046, avg true_objective: 11.846
1973
+ [2023-08-17 13:03:22,380][131794] Replay video saved to /home/patonw/code/learn/deep-rl-class/notebooks/unit8/train_dir/default_experiment/replay.mp4!