Upload folder using huggingface_hub
Browse files
.summary/0/events.out.tfevents.1756512588.5b240b05f395
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4a4bde4e30845d2a645a3cf0f76f47a0408a6a89659bd2d6b9f08bcddf74c163
|
3 |
+
size 218586
|
README.md
CHANGED
@@ -15,7 +15,7 @@ model-index:
|
|
15 |
type: doom_health_gathering_supreme
|
16 |
metrics:
|
17 |
- type: mean_reward
|
18 |
-
value: 4.
|
19 |
name: mean_reward
|
20 |
verified: false
|
21 |
---
|
|
|
15 |
type: doom_health_gathering_supreme
|
16 |
metrics:
|
17 |
- type: mean_reward
|
18 |
+
value: 4.44 +/- 1.93
|
19 |
name: mean_reward
|
20 |
verified: false
|
21 |
---
|
checkpoint_p0/best_000000241_987136_reward_8.004.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e2b3abbd3f202746bb3e4701ea5ef79646944fb7ec891253a39d7b99dd236c47
|
3 |
+
size 34929516
|
checkpoint_p0/checkpoint_000000210_860160.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ae71cc8d09a5811c617e87228169f91afd6bc7fe87147168ff032d1545919c0c
|
3 |
+
size 34929954
|
checkpoint_p0/checkpoint_000000246_1007616.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e3252ed55019ae81621a6060a8fc612a1c2968b555798de8c7f3186b6eee9ed8
|
3 |
+
size 34929954
|
config.json
CHANGED
@@ -65,7 +65,7 @@
|
|
65 |
"summaries_use_frameskip": true,
|
66 |
"heartbeat_interval": 20,
|
67 |
"heartbeat_reporting_interval": 600,
|
68 |
-
"train_for_env_steps":
|
69 |
"train_for_seconds": 10000000000,
|
70 |
"save_every_sec": 120,
|
71 |
"keep_checkpoints": 2,
|
|
|
65 |
"summaries_use_frameskip": true,
|
66 |
"heartbeat_interval": 20,
|
67 |
"heartbeat_reporting_interval": 600,
|
68 |
+
"train_for_env_steps": 1000000,
|
69 |
"train_for_seconds": 10000000000,
|
70 |
"save_every_sec": 120,
|
71 |
"keep_checkpoints": 2,
|
replay.mp4
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1d3e0bfbacf838ff96bf029cbbfbdc40680e09e95c64f961e4ee0a195e746aed
|
3 |
+
size 7311680
|
sf_log.txt
CHANGED
@@ -395,3 +395,767 @@ Check the documentation of torch.load to learn more about types accepted by defa
|
|
395 |
[2025-08-30 00:08:21,387][02133] Avg episode rewards: #0: 4.400, true rewards: #0: 4.000
|
396 |
[2025-08-30 00:08:21,388][02133] Avg episode reward: 4.400, avg true_objective: 4.000
|
397 |
[2025-08-30 00:08:41,723][02133] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
395 |
[2025-08-30 00:08:21,387][02133] Avg episode rewards: #0: 4.400, true rewards: #0: 4.000
|
396 |
[2025-08-30 00:08:21,388][02133] Avg episode reward: 4.400, avg true_objective: 4.000
|
397 |
[2025-08-30 00:08:41,723][02133] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
398 |
+
[2025-08-30 00:08:47,004][02133] The model has been pushed to https://huggingface.co/Priyam05/rl_course_vizdoom_health_gathering_supreme
|
399 |
+
[2025-08-30 00:09:48,409][02133] Environment doom_basic already registered, overwriting...
|
400 |
+
[2025-08-30 00:09:48,410][02133] Environment doom_two_colors_easy already registered, overwriting...
|
401 |
+
[2025-08-30 00:09:48,411][02133] Environment doom_two_colors_hard already registered, overwriting...
|
402 |
+
[2025-08-30 00:09:48,412][02133] Environment doom_dm already registered, overwriting...
|
403 |
+
[2025-08-30 00:09:48,413][02133] Environment doom_dwango5 already registered, overwriting...
|
404 |
+
[2025-08-30 00:09:48,415][02133] Environment doom_my_way_home_flat_actions already registered, overwriting...
|
405 |
+
[2025-08-30 00:09:48,416][02133] Environment doom_defend_the_center_flat_actions already registered, overwriting...
|
406 |
+
[2025-08-30 00:09:48,417][02133] Environment doom_my_way_home already registered, overwriting...
|
407 |
+
[2025-08-30 00:09:48,418][02133] Environment doom_deadly_corridor already registered, overwriting...
|
408 |
+
[2025-08-30 00:09:48,420][02133] Environment doom_defend_the_center already registered, overwriting...
|
409 |
+
[2025-08-30 00:09:48,421][02133] Environment doom_defend_the_line already registered, overwriting...
|
410 |
+
[2025-08-30 00:09:48,422][02133] Environment doom_health_gathering already registered, overwriting...
|
411 |
+
[2025-08-30 00:09:48,423][02133] Environment doom_health_gathering_supreme already registered, overwriting...
|
412 |
+
[2025-08-30 00:09:48,423][02133] Environment doom_battle already registered, overwriting...
|
413 |
+
[2025-08-30 00:09:48,424][02133] Environment doom_battle2 already registered, overwriting...
|
414 |
+
[2025-08-30 00:09:48,425][02133] Environment doom_duel_bots already registered, overwriting...
|
415 |
+
[2025-08-30 00:09:48,426][02133] Environment doom_deathmatch_bots already registered, overwriting...
|
416 |
+
[2025-08-30 00:09:48,426][02133] Environment doom_duel already registered, overwriting...
|
417 |
+
[2025-08-30 00:09:48,427][02133] Environment doom_deathmatch_full already registered, overwriting...
|
418 |
+
[2025-08-30 00:09:48,428][02133] Environment doom_benchmark already registered, overwriting...
|
419 |
+
[2025-08-30 00:09:48,431][02133] register_encoder_factory: <function make_vizdoom_encoder at 0x7850c852dc60>
|
420 |
+
[2025-08-30 00:09:48,445][02133] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
421 |
+
[2025-08-30 00:09:48,449][02133] Overriding arg 'train_for_env_steps' with value 1000000 passed from command line
|
422 |
+
[2025-08-30 00:09:48,454][02133] Experiment dir /content/train_dir/default_experiment already exists!
|
423 |
+
[2025-08-30 00:09:48,455][02133] Resuming existing experiment from /content/train_dir/default_experiment...
|
424 |
+
[2025-08-30 00:09:48,456][02133] Weights and Biases integration disabled
|
425 |
+
[2025-08-30 00:09:48,459][02133] Environment var CUDA_VISIBLE_DEVICES is 0
|
426 |
+
|
427 |
+
[2025-08-30 00:09:50,778][02133] Starting experiment with the following configuration:
|
428 |
+
help=False
|
429 |
+
algo=APPO
|
430 |
+
env=doom_health_gathering_supreme
|
431 |
+
experiment=default_experiment
|
432 |
+
train_dir=/content/train_dir
|
433 |
+
restart_behavior=resume
|
434 |
+
device=gpu
|
435 |
+
seed=None
|
436 |
+
num_policies=1
|
437 |
+
async_rl=True
|
438 |
+
serial_mode=False
|
439 |
+
batched_sampling=False
|
440 |
+
num_batches_to_accumulate=2
|
441 |
+
worker_num_splits=2
|
442 |
+
policy_workers_per_policy=1
|
443 |
+
max_policy_lag=1000
|
444 |
+
num_workers=8
|
445 |
+
num_envs_per_worker=4
|
446 |
+
batch_size=1024
|
447 |
+
num_batches_per_epoch=1
|
448 |
+
num_epochs=1
|
449 |
+
rollout=32
|
450 |
+
recurrence=32
|
451 |
+
shuffle_minibatches=False
|
452 |
+
gamma=0.99
|
453 |
+
reward_scale=1.0
|
454 |
+
reward_clip=1000.0
|
455 |
+
value_bootstrap=False
|
456 |
+
normalize_returns=True
|
457 |
+
exploration_loss_coeff=0.001
|
458 |
+
value_loss_coeff=0.5
|
459 |
+
kl_loss_coeff=0.0
|
460 |
+
exploration_loss=symmetric_kl
|
461 |
+
gae_lambda=0.95
|
462 |
+
ppo_clip_ratio=0.1
|
463 |
+
ppo_clip_value=0.2
|
464 |
+
with_vtrace=False
|
465 |
+
vtrace_rho=1.0
|
466 |
+
vtrace_c=1.0
|
467 |
+
optimizer=adam
|
468 |
+
adam_eps=1e-06
|
469 |
+
adam_beta1=0.9
|
470 |
+
adam_beta2=0.999
|
471 |
+
max_grad_norm=4.0
|
472 |
+
learning_rate=0.0001
|
473 |
+
lr_schedule=constant
|
474 |
+
lr_schedule_kl_threshold=0.008
|
475 |
+
lr_adaptive_min=1e-06
|
476 |
+
lr_adaptive_max=0.01
|
477 |
+
obs_subtract_mean=0.0
|
478 |
+
obs_scale=255.0
|
479 |
+
normalize_input=True
|
480 |
+
normalize_input_keys=None
|
481 |
+
decorrelate_experience_max_seconds=0
|
482 |
+
decorrelate_envs_on_one_worker=True
|
483 |
+
actor_worker_gpus=[]
|
484 |
+
set_workers_cpu_affinity=True
|
485 |
+
force_envs_single_thread=False
|
486 |
+
default_niceness=0
|
487 |
+
log_to_file=True
|
488 |
+
experiment_summaries_interval=10
|
489 |
+
flush_summaries_interval=30
|
490 |
+
stats_avg=100
|
491 |
+
summaries_use_frameskip=True
|
492 |
+
heartbeat_interval=20
|
493 |
+
heartbeat_reporting_interval=600
|
494 |
+
train_for_env_steps=1000000
|
495 |
+
train_for_seconds=10000000000
|
496 |
+
save_every_sec=120
|
497 |
+
keep_checkpoints=2
|
498 |
+
load_checkpoint_kind=latest
|
499 |
+
save_milestones_sec=-1
|
500 |
+
save_best_every_sec=5
|
501 |
+
save_best_metric=reward
|
502 |
+
save_best_after=100000
|
503 |
+
benchmark=False
|
504 |
+
encoder_mlp_layers=[512, 512]
|
505 |
+
encoder_conv_architecture=convnet_simple
|
506 |
+
encoder_conv_mlp_layers=[512]
|
507 |
+
use_rnn=True
|
508 |
+
rnn_size=512
|
509 |
+
rnn_type=gru
|
510 |
+
rnn_num_layers=1
|
511 |
+
decoder_mlp_layers=[]
|
512 |
+
nonlinearity=elu
|
513 |
+
policy_initialization=orthogonal
|
514 |
+
policy_init_gain=1.0
|
515 |
+
actor_critic_share_weights=True
|
516 |
+
adaptive_stddev=True
|
517 |
+
continuous_tanh_scale=0.0
|
518 |
+
initial_stddev=1.0
|
519 |
+
use_env_info_cache=False
|
520 |
+
env_gpu_actions=False
|
521 |
+
env_gpu_observations=True
|
522 |
+
env_frameskip=4
|
523 |
+
env_framestack=1
|
524 |
+
pixel_format=CHW
|
525 |
+
use_record_episode_statistics=False
|
526 |
+
with_wandb=False
|
527 |
+
wandb_user=None
|
528 |
+
wandb_project=sample_factory
|
529 |
+
wandb_group=None
|
530 |
+
wandb_job_type=SF
|
531 |
+
wandb_tags=[]
|
532 |
+
with_pbt=False
|
533 |
+
pbt_mix_policies_in_one_env=True
|
534 |
+
pbt_period_env_steps=5000000
|
535 |
+
pbt_start_mutation=20000000
|
536 |
+
pbt_replace_fraction=0.3
|
537 |
+
pbt_mutation_rate=0.15
|
538 |
+
pbt_replace_reward_gap=0.1
|
539 |
+
pbt_replace_reward_gap_absolute=1e-06
|
540 |
+
pbt_optimize_gamma=False
|
541 |
+
pbt_target_objective=true_objective
|
542 |
+
pbt_perturb_min=1.1
|
543 |
+
pbt_perturb_max=1.5
|
544 |
+
num_agents=-1
|
545 |
+
num_humans=0
|
546 |
+
num_bots=-1
|
547 |
+
start_bot_difficulty=None
|
548 |
+
timelimit=None
|
549 |
+
res_w=128
|
550 |
+
res_h=72
|
551 |
+
wide_aspect_ratio=False
|
552 |
+
eval_env_frameskip=1
|
553 |
+
fps=35
|
554 |
+
command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=100000
|
555 |
+
cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 100000}
|
556 |
+
git_hash=unknown
|
557 |
+
git_repo_name=not a git repository
|
558 |
+
[2025-08-30 00:09:50,779][02133] Saving configuration to /content/train_dir/default_experiment/config.json...
|
559 |
+
[2025-08-30 00:09:50,781][02133] Rollout worker 0 uses device cpu
|
560 |
+
[2025-08-30 00:09:50,782][02133] Rollout worker 1 uses device cpu
|
561 |
+
[2025-08-30 00:09:50,783][02133] Rollout worker 2 uses device cpu
|
562 |
+
[2025-08-30 00:09:50,784][02133] Rollout worker 3 uses device cpu
|
563 |
+
[2025-08-30 00:09:50,784][02133] Rollout worker 4 uses device cpu
|
564 |
+
[2025-08-30 00:09:50,785][02133] Rollout worker 5 uses device cpu
|
565 |
+
[2025-08-30 00:09:50,786][02133] Rollout worker 6 uses device cpu
|
566 |
+
[2025-08-30 00:09:50,787][02133] Rollout worker 7 uses device cpu
|
567 |
+
[2025-08-30 00:09:50,853][02133] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
568 |
+
[2025-08-30 00:09:50,854][02133] InferenceWorker_p0-w0: min num requests: 2
|
569 |
+
[2025-08-30 00:09:50,881][02133] Starting all processes...
|
570 |
+
[2025-08-30 00:09:50,882][02133] Starting process learner_proc0
|
571 |
+
[2025-08-30 00:09:50,943][02133] Starting all processes...
|
572 |
+
[2025-08-30 00:09:50,947][02133] Starting process inference_proc0-0
|
573 |
+
[2025-08-30 00:09:50,948][02133] Starting process rollout_proc0
|
574 |
+
[2025-08-30 00:09:50,948][02133] Starting process rollout_proc1
|
575 |
+
[2025-08-30 00:09:50,948][02133] Starting process rollout_proc2
|
576 |
+
[2025-08-30 00:09:50,948][02133] Starting process rollout_proc3
|
577 |
+
[2025-08-30 00:09:50,948][02133] Starting process rollout_proc4
|
578 |
+
[2025-08-30 00:09:50,948][02133] Starting process rollout_proc5
|
579 |
+
[2025-08-30 00:09:50,948][02133] Starting process rollout_proc6
|
580 |
+
[2025-08-30 00:09:50,948][02133] Starting process rollout_proc7
|
581 |
+
[2025-08-30 00:10:06,001][07555] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
582 |
+
[2025-08-30 00:10:06,001][07555] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
|
583 |
+
[2025-08-30 00:10:06,064][07555] Num visible devices: 1
|
584 |
+
[2025-08-30 00:10:06,076][07555] Starting seed is not provided
|
585 |
+
[2025-08-30 00:10:06,077][07555] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
586 |
+
[2025-08-30 00:10:06,077][07555] Initializing actor-critic model on device cuda:0
|
587 |
+
[2025-08-30 00:10:06,078][07555] RunningMeanStd input shape: (3, 72, 128)
|
588 |
+
[2025-08-30 00:10:06,080][07555] RunningMeanStd input shape: (1,)
|
589 |
+
[2025-08-30 00:10:06,140][07568] Worker 2 uses CPU cores [0]
|
590 |
+
[2025-08-30 00:10:06,157][07555] ConvEncoder: input_channels=3
|
591 |
+
[2025-08-30 00:10:06,257][07573] Worker 6 uses CPU cores [0]
|
592 |
+
[2025-08-30 00:10:06,432][07570] Worker 5 uses CPU cores [1]
|
593 |
+
[2025-08-30 00:10:06,480][07569] Worker 4 uses CPU cores [0]
|
594 |
+
[2025-08-30 00:10:06,489][07574] Worker 0 uses CPU cores [0]
|
595 |
+
[2025-08-30 00:10:06,602][07572] Worker 7 uses CPU cores [1]
|
596 |
+
[2025-08-30 00:10:06,626][07555] Conv encoder output size: 512
|
597 |
+
[2025-08-30 00:10:06,627][07555] Policy head output size: 512
|
598 |
+
[2025-08-30 00:10:06,648][07555] Created Actor Critic model with architecture:
|
599 |
+
[2025-08-30 00:10:06,648][07555] ActorCriticSharedWeights(
|
600 |
+
(obs_normalizer): ObservationNormalizer(
|
601 |
+
(running_mean_std): RunningMeanStdDictInPlace(
|
602 |
+
(running_mean_std): ModuleDict(
|
603 |
+
(obs): RunningMeanStdInPlace()
|
604 |
+
)
|
605 |
+
)
|
606 |
+
)
|
607 |
+
(returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
|
608 |
+
(encoder): VizdoomEncoder(
|
609 |
+
(basic_encoder): ConvEncoder(
|
610 |
+
(enc): RecursiveScriptModule(
|
611 |
+
original_name=ConvEncoderImpl
|
612 |
+
(conv_head): RecursiveScriptModule(
|
613 |
+
original_name=Sequential
|
614 |
+
(0): RecursiveScriptModule(original_name=Conv2d)
|
615 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
616 |
+
(2): RecursiveScriptModule(original_name=Conv2d)
|
617 |
+
(3): RecursiveScriptModule(original_name=ELU)
|
618 |
+
(4): RecursiveScriptModule(original_name=Conv2d)
|
619 |
+
(5): RecursiveScriptModule(original_name=ELU)
|
620 |
+
)
|
621 |
+
(mlp_layers): RecursiveScriptModule(
|
622 |
+
original_name=Sequential
|
623 |
+
(0): RecursiveScriptModule(original_name=Linear)
|
624 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
625 |
+
)
|
626 |
+
)
|
627 |
+
)
|
628 |
+
)
|
629 |
+
(core): ModelCoreRNN(
|
630 |
+
(core): GRU(512, 512)
|
631 |
+
)
|
632 |
+
(decoder): MlpDecoder(
|
633 |
+
(mlp): Identity()
|
634 |
+
)
|
635 |
+
(critic_linear): Linear(in_features=512, out_features=1, bias=True)
|
636 |
+
(action_parameterization): ActionParameterizationDefault(
|
637 |
+
(distribution_linear): Linear(in_features=512, out_features=5, bias=True)
|
638 |
+
)
|
639 |
+
)
|
640 |
+
[2025-08-30 00:10:06,785][07571] Worker 1 uses CPU cores [1]
|
641 |
+
[2025-08-30 00:10:06,800][07575] Worker 3 uses CPU cores [1]
|
642 |
+
[2025-08-30 00:10:06,871][07576] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
643 |
+
[2025-08-30 00:10:06,872][07576] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
|
644 |
+
[2025-08-30 00:10:06,890][07576] Num visible devices: 1
|
645 |
+
[2025-08-30 00:10:06,899][07555] Using optimizer <class 'torch.optim.adam.Adam'>
|
646 |
+
[2025-08-30 00:10:07,835][07555] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000026_106496.pth...
|
647 |
+
[2025-08-30 00:10:07,838][07555] Could not load from checkpoint, attempt 0
|
648 |
+
Traceback (most recent call last):
|
649 |
+
File "/usr/local/lib/python3.12/dist-packages/sample_factory/algo/learning/learner.py", line 281, in load_checkpoint
|
650 |
+
checkpoint_dict = torch.load(latest_checkpoint, map_location=device)
|
651 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
652 |
+
File "/usr/local/lib/python3.12/dist-packages/torch/serialization.py", line 1529, in load
|
653 |
+
raise pickle.UnpicklingError(_get_wo_message(str(e))) from None
|
654 |
+
_pickle.UnpicklingError: Weights only load failed. This file can still be loaded, to do so you have two options, [1mdo those steps only if you trust the source of the checkpoint[0m.
|
655 |
+
(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.
|
656 |
+
(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.
|
657 |
+
WeightsUnpickler error: Unsupported global: GLOBAL numpy.core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([numpy.core.multiarray.scalar])` or the `torch.serialization.safe_globals([numpy.core.multiarray.scalar])` context manager to allowlist this global if you trust this class/function.
|
658 |
+
|
659 |
+
Check the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html.
|
660 |
+
[2025-08-30 00:10:07,839][07555] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000026_106496.pth...
|
661 |
+
[2025-08-30 00:10:07,840][07555] Could not load from checkpoint, attempt 1
|
662 |
+
Traceback (most recent call last):
|
663 |
+
File "/usr/local/lib/python3.12/dist-packages/sample_factory/algo/learning/learner.py", line 281, in load_checkpoint
|
664 |
+
checkpoint_dict = torch.load(latest_checkpoint, map_location=device)
|
665 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
666 |
+
File "/usr/local/lib/python3.12/dist-packages/torch/serialization.py", line 1529, in load
|
667 |
+
raise pickle.UnpicklingError(_get_wo_message(str(e))) from None
|
668 |
+
_pickle.UnpicklingError: Weights only load failed. This file can still be loaded, to do so you have two options, [1mdo those steps only if you trust the source of the checkpoint[0m.
|
669 |
+
(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.
|
670 |
+
(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.
|
671 |
+
WeightsUnpickler error: Unsupported global: GLOBAL numpy.core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([numpy.core.multiarray.scalar])` or the `torch.serialization.safe_globals([numpy.core.multiarray.scalar])` context manager to allowlist this global if you trust this class/function.
|
672 |
+
|
673 |
+
Check the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html.
|
674 |
+
[2025-08-30 00:10:07,841][07555] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000026_106496.pth...
|
675 |
+
[2025-08-30 00:10:07,841][07555] Could not load from checkpoint, attempt 2
|
676 |
+
Traceback (most recent call last):
|
677 |
+
File "/usr/local/lib/python3.12/dist-packages/sample_factory/algo/learning/learner.py", line 281, in load_checkpoint
|
678 |
+
checkpoint_dict = torch.load(latest_checkpoint, map_location=device)
|
679 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
680 |
+
File "/usr/local/lib/python3.12/dist-packages/torch/serialization.py", line 1529, in load
|
681 |
+
raise pickle.UnpicklingError(_get_wo_message(str(e))) from None
|
682 |
+
_pickle.UnpicklingError: Weights only load failed. This file can still be loaded, to do so you have two options, [1mdo those steps only if you trust the source of the checkpoint[0m.
|
683 |
+
(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.
|
684 |
+
(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.
|
685 |
+
WeightsUnpickler error: Unsupported global: GLOBAL numpy.core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([numpy.core.multiarray.scalar])` or the `torch.serialization.safe_globals([numpy.core.multiarray.scalar])` context manager to allowlist this global if you trust this class/function.
|
686 |
+
|
687 |
+
Check the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html.
|
688 |
+
[2025-08-30 00:10:07,842][07555] Did not load from checkpoint, starting from scratch!
|
689 |
+
[2025-08-30 00:10:07,842][07555] Initialized policy 0 weights for model version 0
|
690 |
+
[2025-08-30 00:10:07,845][07555] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
691 |
+
[2025-08-30 00:10:07,851][07555] LearnerWorker_p0 finished initialization!
|
692 |
+
[2025-08-30 00:10:08,049][07576] RunningMeanStd input shape: (3, 72, 128)
|
693 |
+
[2025-08-30 00:10:08,050][07576] RunningMeanStd input shape: (1,)
|
694 |
+
[2025-08-30 00:10:08,060][07576] ConvEncoder: input_channels=3
|
695 |
+
[2025-08-30 00:10:08,151][07576] Conv encoder output size: 512
|
696 |
+
[2025-08-30 00:10:08,151][07576] Policy head output size: 512
|
697 |
+
[2025-08-30 00:10:08,187][02133] Inference worker 0-0 is ready!
|
698 |
+
[2025-08-30 00:10:08,188][02133] All inference workers are ready! Signal rollout workers to start!
|
699 |
+
[2025-08-30 00:10:08,368][07570] Doom resolution: 160x120, resize resolution: (128, 72)
|
700 |
+
[2025-08-30 00:10:08,368][07569] Doom resolution: 160x120, resize resolution: (128, 72)
|
701 |
+
[2025-08-30 00:10:08,370][07571] Doom resolution: 160x120, resize resolution: (128, 72)
|
702 |
+
[2025-08-30 00:10:08,366][07568] Doom resolution: 160x120, resize resolution: (128, 72)
|
703 |
+
[2025-08-30 00:10:08,372][07575] Doom resolution: 160x120, resize resolution: (128, 72)
|
704 |
+
[2025-08-30 00:10:08,373][07572] Doom resolution: 160x120, resize resolution: (128, 72)
|
705 |
+
[2025-08-30 00:10:08,369][07574] Doom resolution: 160x120, resize resolution: (128, 72)
|
706 |
+
[2025-08-30 00:10:08,374][07573] Doom resolution: 160x120, resize resolution: (128, 72)
|
707 |
+
[2025-08-30 00:10:08,459][02133] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 0. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
708 |
+
[2025-08-30 00:10:09,706][07572] Decorrelating experience for 0 frames...
|
709 |
+
[2025-08-30 00:10:09,708][07571] Decorrelating experience for 0 frames...
|
710 |
+
[2025-08-30 00:10:09,711][07575] Decorrelating experience for 0 frames...
|
711 |
+
[2025-08-30 00:10:09,995][07568] Decorrelating experience for 0 frames...
|
712 |
+
[2025-08-30 00:10:10,006][07573] Decorrelating experience for 0 frames...
|
713 |
+
[2025-08-30 00:10:10,004][07569] Decorrelating experience for 0 frames...
|
714 |
+
[2025-08-30 00:10:10,007][07574] Decorrelating experience for 0 frames...
|
715 |
+
[2025-08-30 00:10:10,846][02133] Heartbeat connected on Batcher_0
|
716 |
+
[2025-08-30 00:10:10,850][02133] Heartbeat connected on LearnerWorker_p0
|
717 |
+
[2025-08-30 00:10:10,857][07572] Decorrelating experience for 32 frames...
|
718 |
+
[2025-08-30 00:10:10,858][07571] Decorrelating experience for 32 frames...
|
719 |
+
[2025-08-30 00:10:10,904][02133] Heartbeat connected on InferenceWorker_p0-w0
|
720 |
+
[2025-08-30 00:10:11,182][07574] Decorrelating experience for 32 frames...
|
721 |
+
[2025-08-30 00:10:11,195][07573] Decorrelating experience for 32 frames...
|
722 |
+
[2025-08-30 00:10:11,200][07568] Decorrelating experience for 32 frames...
|
723 |
+
[2025-08-30 00:10:11,445][07570] Decorrelating experience for 0 frames...
|
724 |
+
[2025-08-30 00:10:11,448][07575] Decorrelating experience for 32 frames...
|
725 |
+
[2025-08-30 00:10:12,475][07569] Decorrelating experience for 32 frames...
|
726 |
+
[2025-08-30 00:10:12,544][07572] Decorrelating experience for 64 frames...
|
727 |
+
[2025-08-30 00:10:12,849][07570] Decorrelating experience for 32 frames...
|
728 |
+
[2025-08-30 00:10:13,262][07575] Decorrelating experience for 64 frames...
|
729 |
+
[2025-08-30 00:10:13,459][02133] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
730 |
+
[2025-08-30 00:10:13,483][07574] Decorrelating experience for 64 frames...
|
731 |
+
[2025-08-30 00:10:13,494][07568] Decorrelating experience for 64 frames...
|
732 |
+
[2025-08-30 00:10:13,501][07573] Decorrelating experience for 64 frames...
|
733 |
+
[2025-08-30 00:10:14,544][07571] Decorrelating experience for 64 frames...
|
734 |
+
[2025-08-30 00:10:14,673][07569] Decorrelating experience for 64 frames...
|
735 |
+
[2025-08-30 00:10:15,469][07573] Decorrelating experience for 96 frames...
|
736 |
+
[2025-08-30 00:10:15,839][07575] Decorrelating experience for 96 frames...
|
737 |
+
[2025-08-30 00:10:15,883][02133] Heartbeat connected on RolloutWorker_w6
|
738 |
+
[2025-08-30 00:10:16,073][07572] Decorrelating experience for 96 frames...
|
739 |
+
[2025-08-30 00:10:16,086][07570] Decorrelating experience for 64 frames...
|
740 |
+
[2025-08-30 00:10:16,533][02133] Heartbeat connected on RolloutWorker_w3
|
741 |
+
[2025-08-30 00:10:16,763][02133] Heartbeat connected on RolloutWorker_w7
|
742 |
+
[2025-08-30 00:10:17,156][07571] Decorrelating experience for 96 frames...
|
743 |
+
[2025-08-30 00:10:17,742][02133] Heartbeat connected on RolloutWorker_w1
|
744 |
+
[2025-08-30 00:10:17,978][07568] Decorrelating experience for 96 frames...
|
745 |
+
[2025-08-30 00:10:18,159][07574] Decorrelating experience for 96 frames...
|
746 |
+
[2025-08-30 00:10:18,459][02133] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 2.8. Samples: 28. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
747 |
+
[2025-08-30 00:10:18,461][02133] Avg episode reward: [(0, '1.067')]
|
748 |
+
[2025-08-30 00:10:18,473][07569] Decorrelating experience for 96 frames...
|
749 |
+
[2025-08-30 00:10:18,495][02133] Heartbeat connected on RolloutWorker_w2
|
750 |
+
[2025-08-30 00:10:18,768][02133] Heartbeat connected on RolloutWorker_w0
|
751 |
+
[2025-08-30 00:10:18,780][07570] Decorrelating experience for 96 frames...
|
752 |
+
[2025-08-30 00:10:19,058][02133] Heartbeat connected on RolloutWorker_w4
|
753 |
+
[2025-08-30 00:10:19,079][02133] Heartbeat connected on RolloutWorker_w5
|
754 |
+
[2025-08-30 00:10:20,881][07555] Signal inference workers to stop experience collection...
|
755 |
+
[2025-08-30 00:10:20,894][07576] InferenceWorker_p0-w0: stopping experience collection
|
756 |
+
[2025-08-30 00:10:22,382][07555] Signal inference workers to resume experience collection...
|
757 |
+
[2025-08-30 00:10:22,384][07576] InferenceWorker_p0-w0: resuming experience collection
|
758 |
+
[2025-08-30 00:10:23,459][02133] Fps is (10 sec: 1228.8, 60 sec: 819.2, 300 sec: 819.2). Total num frames: 12288. Throughput: 0: 220.3. Samples: 3304. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0)
|
759 |
+
[2025-08-30 00:10:23,462][02133] Avg episode reward: [(0, '2.552')]
|
760 |
+
[2025-08-30 00:10:28,459][02133] Fps is (10 sec: 3276.8, 60 sec: 1638.4, 300 sec: 1638.4). Total num frames: 32768. Throughput: 0: 345.2. Samples: 6904. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
761 |
+
[2025-08-30 00:10:28,460][02133] Avg episode reward: [(0, '3.840')]
|
762 |
+
[2025-08-30 00:10:30,042][07576] Updated weights for policy 0, policy_version 10 (0.0095)
|
763 |
+
[2025-08-30 00:10:33,459][02133] Fps is (10 sec: 3686.4, 60 sec: 1966.1, 300 sec: 1966.1). Total num frames: 49152. Throughput: 0: 502.7. Samples: 12568. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
764 |
+
[2025-08-30 00:10:33,460][02133] Avg episode reward: [(0, '4.421')]
|
765 |
+
[2025-08-30 00:10:38,459][02133] Fps is (10 sec: 4096.0, 60 sec: 2457.6, 300 sec: 2457.6). Total num frames: 73728. Throughput: 0: 641.7. Samples: 19250. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
766 |
+
[2025-08-30 00:10:38,463][02133] Avg episode reward: [(0, '4.386')]
|
767 |
+
[2025-08-30 00:10:39,670][07576] Updated weights for policy 0, policy_version 20 (0.0023)
|
768 |
+
[2025-08-30 00:10:43,459][02133] Fps is (10 sec: 4915.2, 60 sec: 2808.7, 300 sec: 2808.7). Total num frames: 98304. Throughput: 0: 655.6. Samples: 22946. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
769 |
+
[2025-08-30 00:10:43,463][02133] Avg episode reward: [(0, '4.373')]
|
770 |
+
[2025-08-30 00:10:48,459][02133] Fps is (10 sec: 3686.4, 60 sec: 2764.8, 300 sec: 2764.8). Total num frames: 110592. Throughput: 0: 704.9. Samples: 28196. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
771 |
+
[2025-08-30 00:10:48,461][02133] Avg episode reward: [(0, '4.323')]
|
772 |
+
[2025-08-30 00:10:48,466][07555] Saving new best policy, reward=4.323!
|
773 |
+
[2025-08-30 00:10:50,391][07576] Updated weights for policy 0, policy_version 30 (0.0031)
|
774 |
+
[2025-08-30 00:10:53,459][02133] Fps is (10 sec: 3686.4, 60 sec: 3003.7, 300 sec: 3003.7). Total num frames: 135168. Throughput: 0: 774.2. Samples: 34838. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
775 |
+
[2025-08-30 00:10:53,463][02133] Avg episode reward: [(0, '4.347')]
|
776 |
+
[2025-08-30 00:10:53,467][07555] Saving new best policy, reward=4.347!
|
777 |
+
[2025-08-30 00:10:58,459][02133] Fps is (10 sec: 4915.2, 60 sec: 3194.9, 300 sec: 3194.9). Total num frames: 159744. Throughput: 0: 854.3. Samples: 38442. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
778 |
+
[2025-08-30 00:10:58,460][02133] Avg episode reward: [(0, '4.387')]
|
779 |
+
[2025-08-30 00:10:58,466][07555] Saving new best policy, reward=4.387!
|
780 |
+
[2025-08-30 00:10:59,068][07576] Updated weights for policy 0, policy_version 40 (0.0016)
|
781 |
+
[2025-08-30 00:11:03,459][02133] Fps is (10 sec: 4096.0, 60 sec: 3202.3, 300 sec: 3202.3). Total num frames: 176128. Throughput: 0: 973.0. Samples: 43814. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
782 |
+
[2025-08-30 00:11:03,463][02133] Avg episode reward: [(0, '4.307')]
|
783 |
+
[2025-08-30 00:11:08,459][02133] Fps is (10 sec: 4096.0, 60 sec: 3345.1, 300 sec: 3345.1). Total num frames: 200704. Throughput: 0: 1054.3. Samples: 50746. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
784 |
+
[2025-08-30 00:11:08,460][02133] Avg episode reward: [(0, '4.405')]
|
785 |
+
[2025-08-30 00:11:08,464][07555] Saving new best policy, reward=4.405!
|
786 |
+
[2025-08-30 00:11:09,147][07576] Updated weights for policy 0, policy_version 50 (0.0028)
|
787 |
+
[2025-08-30 00:11:13,459][02133] Fps is (10 sec: 4505.6, 60 sec: 3686.4, 300 sec: 3402.8). Total num frames: 221184. Throughput: 0: 1053.6. Samples: 54316. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
788 |
+
[2025-08-30 00:11:13,463][02133] Avg episode reward: [(0, '4.508')]
|
789 |
+
[2025-08-30 00:11:13,476][07555] Saving new best policy, reward=4.508!
|
790 |
+
[2025-08-30 00:11:18,459][02133] Fps is (10 sec: 3686.4, 60 sec: 3959.5, 300 sec: 3393.8). Total num frames: 237568. Throughput: 0: 1045.8. Samples: 59628. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
791 |
+
[2025-08-30 00:11:18,466][02133] Avg episode reward: [(0, '4.578')]
|
792 |
+
[2025-08-30 00:11:18,475][07555] Saving new best policy, reward=4.578!
|
793 |
+
[2025-08-30 00:11:19,490][07576] Updated weights for policy 0, policy_version 60 (0.0016)
|
794 |
+
[2025-08-30 00:11:23,459][02133] Fps is (10 sec: 4096.0, 60 sec: 4164.3, 300 sec: 3495.3). Total num frames: 262144. Throughput: 0: 1048.6. Samples: 66438. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
795 |
+
[2025-08-30 00:11:23,463][02133] Avg episode reward: [(0, '4.495')]
|
796 |
+
[2025-08-30 00:11:28,113][07576] Updated weights for policy 0, policy_version 70 (0.0022)
|
797 |
+
[2025-08-30 00:11:28,459][02133] Fps is (10 sec: 4915.1, 60 sec: 4232.5, 300 sec: 3584.0). Total num frames: 286720. Throughput: 0: 1049.0. Samples: 70150. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
798 |
+
[2025-08-30 00:11:28,465][02133] Avg episode reward: [(0, '4.637')]
|
799 |
+
[2025-08-30 00:11:28,471][07555] Saving new best policy, reward=4.637!
|
800 |
+
[2025-08-30 00:11:33,459][02133] Fps is (10 sec: 4096.0, 60 sec: 4232.5, 300 sec: 3565.9). Total num frames: 303104. Throughput: 0: 1045.6. Samples: 75246. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
801 |
+
[2025-08-30 00:11:33,464][02133] Avg episode reward: [(0, '4.447')]
|
802 |
+
[2025-08-30 00:11:38,353][07576] Updated weights for policy 0, policy_version 80 (0.0011)
|
803 |
+
[2025-08-30 00:11:38,459][02133] Fps is (10 sec: 4096.1, 60 sec: 4232.5, 300 sec: 3640.9). Total num frames: 327680. Throughput: 0: 1056.9. Samples: 82400. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
804 |
+
[2025-08-30 00:11:38,461][02133] Avg episode reward: [(0, '4.378')]
|
805 |
+
[2025-08-30 00:11:43,461][02133] Fps is (10 sec: 4505.4, 60 sec: 4164.2, 300 sec: 3664.8). Total num frames: 348160. Throughput: 0: 1056.3. Samples: 85974. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
806 |
+
[2025-08-30 00:11:43,464][02133] Avg episode reward: [(0, '4.573')]
|
807 |
+
[2025-08-30 00:11:48,459][02133] Fps is (10 sec: 3686.4, 60 sec: 4232.5, 300 sec: 3645.4). Total num frames: 364544. Throughput: 0: 1048.1. Samples: 90980. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
808 |
+
[2025-08-30 00:11:48,462][02133] Avg episode reward: [(0, '4.632')]
|
809 |
+
[2025-08-30 00:11:48,467][07555] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000089_364544.pth...
|
810 |
+
[2025-08-30 00:11:48,832][07576] Updated weights for policy 0, policy_version 90 (0.0028)
|
811 |
+
[2025-08-30 00:11:53,459][02133] Fps is (10 sec: 4096.2, 60 sec: 4232.5, 300 sec: 3705.9). Total num frames: 389120. Throughput: 0: 1050.3. Samples: 98010. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
812 |
+
[2025-08-30 00:11:53,463][02133] Avg episode reward: [(0, '4.556')]
|
813 |
+
[2025-08-30 00:11:57,374][07576] Updated weights for policy 0, policy_version 100 (0.0014)
|
814 |
+
[2025-08-30 00:11:58,461][02133] Fps is (10 sec: 4504.8, 60 sec: 4164.1, 300 sec: 3723.6). Total num frames: 409600. Throughput: 0: 1052.8. Samples: 101694. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
815 |
+
[2025-08-30 00:11:58,462][02133] Avg episode reward: [(0, '4.577')]
|
816 |
+
[2025-08-30 00:12:03,460][02133] Fps is (10 sec: 4095.7, 60 sec: 4232.5, 300 sec: 3739.8). Total num frames: 430080. Throughput: 0: 1045.8. Samples: 106690. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
817 |
+
[2025-08-30 00:12:03,464][02133] Avg episode reward: [(0, '4.485')]
|
818 |
+
[2025-08-30 00:12:07,741][07576] Updated weights for policy 0, policy_version 110 (0.0016)
|
819 |
+
[2025-08-30 00:12:08,459][02133] Fps is (10 sec: 4096.7, 60 sec: 4164.3, 300 sec: 3754.7). Total num frames: 450560. Throughput: 0: 1054.0. Samples: 113866. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
820 |
+
[2025-08-30 00:12:08,466][02133] Avg episode reward: [(0, '4.489')]
|
821 |
+
[2025-08-30 00:12:13,461][02133] Fps is (10 sec: 4504.9, 60 sec: 4232.4, 300 sec: 3801.0). Total num frames: 475136. Throughput: 0: 1052.2. Samples: 117502. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
822 |
+
[2025-08-30 00:12:13,463][02133] Avg episode reward: [(0, '4.714')]
|
823 |
+
[2025-08-30 00:12:13,471][07555] Saving new best policy, reward=4.714!
|
824 |
+
[2025-08-30 00:12:18,053][07576] Updated weights for policy 0, policy_version 120 (0.0024)
|
825 |
+
[2025-08-30 00:12:18,459][02133] Fps is (10 sec: 4096.0, 60 sec: 4232.5, 300 sec: 3780.9). Total num frames: 491520. Throughput: 0: 1048.9. Samples: 122446. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
826 |
+
[2025-08-30 00:12:18,460][02133] Avg episode reward: [(0, '4.611')]
|
827 |
+
[2025-08-30 00:12:23,459][02133] Fps is (10 sec: 4097.0, 60 sec: 4232.5, 300 sec: 3822.9). Total num frames: 516096. Throughput: 0: 1049.8. Samples: 129640. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
828 |
+
[2025-08-30 00:12:23,463][02133] Avg episode reward: [(0, '4.557')]
|
829 |
+
[2025-08-30 00:12:26,671][07576] Updated weights for policy 0, policy_version 130 (0.0021)
|
830 |
+
[2025-08-30 00:12:28,459][02133] Fps is (10 sec: 4505.6, 60 sec: 4164.3, 300 sec: 3832.7). Total num frames: 536576. Throughput: 0: 1047.5. Samples: 133110. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
831 |
+
[2025-08-30 00:12:28,468][02133] Avg episode reward: [(0, '4.613')]
|
832 |
+
[2025-08-30 00:12:33,459][02133] Fps is (10 sec: 3686.4, 60 sec: 4164.3, 300 sec: 3813.5). Total num frames: 552960. Throughput: 0: 1046.0. Samples: 138048. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
833 |
+
[2025-08-30 00:12:33,463][02133] Avg episode reward: [(0, '4.702')]
|
834 |
+
[2025-08-30 00:12:37,311][07576] Updated weights for policy 0, policy_version 140 (0.0018)
|
835 |
+
[2025-08-30 00:12:38,459][02133] Fps is (10 sec: 4096.0, 60 sec: 4164.3, 300 sec: 3850.2). Total num frames: 577536. Throughput: 0: 1042.1. Samples: 144904. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
836 |
+
[2025-08-30 00:12:38,465][02133] Avg episode reward: [(0, '5.077')]
|
837 |
+
[2025-08-30 00:12:38,474][07555] Saving new best policy, reward=5.077!
|
838 |
+
[2025-08-30 00:12:43,459][02133] Fps is (10 sec: 4505.7, 60 sec: 4164.3, 300 sec: 3858.2). Total num frames: 598016. Throughput: 0: 1033.3. Samples: 148190. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
839 |
+
[2025-08-30 00:12:43,462][02133] Avg episode reward: [(0, '4.964')]
|
840 |
+
[2025-08-30 00:12:48,460][02133] Fps is (10 sec: 3276.5, 60 sec: 4095.9, 300 sec: 3814.4). Total num frames: 610304. Throughput: 0: 1021.6. Samples: 152660. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
841 |
+
[2025-08-30 00:12:48,465][02133] Avg episode reward: [(0, '4.715')]
|
842 |
+
[2025-08-30 00:12:48,488][07576] Updated weights for policy 0, policy_version 150 (0.0020)
|
843 |
+
[2025-08-30 00:12:53,459][02133] Fps is (10 sec: 3686.4, 60 sec: 4096.0, 300 sec: 3847.8). Total num frames: 634880. Throughput: 0: 1013.3. Samples: 159466. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
844 |
+
[2025-08-30 00:12:53,464][02133] Avg episode reward: [(0, '4.849')]
|
845 |
+
[2025-08-30 00:12:57,336][07576] Updated weights for policy 0, policy_version 160 (0.0018)
|
846 |
+
[2025-08-30 00:12:58,459][02133] Fps is (10 sec: 4505.9, 60 sec: 4096.1, 300 sec: 3855.1). Total num frames: 655360. Throughput: 0: 1012.1. Samples: 163044. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
847 |
+
[2025-08-30 00:12:58,463][02133] Avg episode reward: [(0, '4.868')]
|
848 |
+
[2025-08-30 00:13:03,459][02133] Fps is (10 sec: 3686.4, 60 sec: 4027.8, 300 sec: 3838.5). Total num frames: 671744. Throughput: 0: 1009.6. Samples: 167876. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
849 |
+
[2025-08-30 00:13:03,463][02133] Avg episode reward: [(0, '4.680')]
|
850 |
+
[2025-08-30 00:13:07,855][07576] Updated weights for policy 0, policy_version 170 (0.0013)
|
851 |
+
[2025-08-30 00:13:08,459][02133] Fps is (10 sec: 4096.1, 60 sec: 4096.0, 300 sec: 3868.4). Total num frames: 696320. Throughput: 0: 1009.0. Samples: 175046. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
852 |
+
[2025-08-30 00:13:08,463][02133] Avg episode reward: [(0, '4.664')]
|
853 |
+
[2025-08-30 00:13:13,459][02133] Fps is (10 sec: 4915.2, 60 sec: 4096.2, 300 sec: 3896.7). Total num frames: 720896. Throughput: 0: 1012.9. Samples: 178690. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
854 |
+
[2025-08-30 00:13:13,460][02133] Avg episode reward: [(0, '4.963')]
|
855 |
+
[2025-08-30 00:13:18,315][07576] Updated weights for policy 0, policy_version 180 (0.0022)
|
856 |
+
[2025-08-30 00:13:18,459][02133] Fps is (10 sec: 4096.0, 60 sec: 4096.0, 300 sec: 3880.4). Total num frames: 737280. Throughput: 0: 1012.6. Samples: 183614. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
857 |
+
[2025-08-30 00:13:18,460][02133] Avg episode reward: [(0, '5.224')]
|
858 |
+
[2025-08-30 00:13:18,467][07555] Saving new best policy, reward=5.224!
|
859 |
+
[2025-08-30 00:13:23,459][02133] Fps is (10 sec: 3686.4, 60 sec: 4027.7, 300 sec: 3886.0). Total num frames: 757760. Throughput: 0: 1016.9. Samples: 190666. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
860 |
+
[2025-08-30 00:13:23,463][02133] Avg episode reward: [(0, '5.501')]
|
861 |
+
[2025-08-30 00:13:23,546][07555] Saving new best policy, reward=5.501!
|
862 |
+
[2025-08-30 00:13:27,376][07576] Updated weights for policy 0, policy_version 190 (0.0018)
|
863 |
+
[2025-08-30 00:13:28,460][02133] Fps is (10 sec: 4095.7, 60 sec: 4027.7, 300 sec: 3891.2). Total num frames: 778240. Throughput: 0: 1021.8. Samples: 194170. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
864 |
+
[2025-08-30 00:13:28,464][02133] Avg episode reward: [(0, '5.684')]
|
865 |
+
[2025-08-30 00:13:28,476][07555] Saving new best policy, reward=5.684!
|
866 |
+
[2025-08-30 00:13:33,459][02133] Fps is (10 sec: 4096.0, 60 sec: 4096.0, 300 sec: 3896.2). Total num frames: 798720. Throughput: 0: 1032.9. Samples: 199140. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
867 |
+
[2025-08-30 00:13:33,464][02133] Avg episode reward: [(0, '5.653')]
|
868 |
+
[2025-08-30 00:13:37,372][07576] Updated weights for policy 0, policy_version 200 (0.0023)
|
869 |
+
[2025-08-30 00:13:38,459][02133] Fps is (10 sec: 4505.9, 60 sec: 4096.0, 300 sec: 3920.5). Total num frames: 823296. Throughput: 0: 1042.9. Samples: 206398. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
870 |
+
[2025-08-30 00:13:38,463][02133] Avg episode reward: [(0, '5.665')]
|
871 |
+
[2025-08-30 00:13:43,460][02133] Fps is (10 sec: 4505.2, 60 sec: 4095.9, 300 sec: 3924.5). Total num frames: 843776. Throughput: 0: 1044.0. Samples: 210024. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
872 |
+
[2025-08-30 00:13:43,464][02133] Avg episode reward: [(0, '5.705')]
|
873 |
+
[2025-08-30 00:13:43,465][07555] Saving new best policy, reward=5.705!
|
874 |
+
[2025-08-30 00:13:47,677][07576] Updated weights for policy 0, policy_version 210 (0.0012)
|
875 |
+
[2025-08-30 00:13:48,459][02133] Fps is (10 sec: 3686.5, 60 sec: 4164.3, 300 sec: 3909.8). Total num frames: 860160. Throughput: 0: 1045.1. Samples: 214906. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
876 |
+
[2025-08-30 00:13:48,462][02133] Avg episode reward: [(0, '5.644')]
|
877 |
+
[2025-08-30 00:13:48,468][07555] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000210_860160.pth...
|
878 |
+
[2025-08-30 00:13:48,592][07555] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000026_106496.pth
|
879 |
+
[2025-08-30 00:13:53,459][02133] Fps is (10 sec: 4096.4, 60 sec: 4164.3, 300 sec: 3932.2). Total num frames: 884736. Throughput: 0: 1042.7. Samples: 221966. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
880 |
+
[2025-08-30 00:13:53,463][02133] Avg episode reward: [(0, '5.383')]
|
881 |
+
[2025-08-30 00:13:56,361][07576] Updated weights for policy 0, policy_version 220 (0.0016)
|
882 |
+
[2025-08-30 00:13:58,459][02133] Fps is (10 sec: 4505.6, 60 sec: 4164.3, 300 sec: 3935.7). Total num frames: 905216. Throughput: 0: 1041.5. Samples: 225558. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
883 |
+
[2025-08-30 00:13:58,465][02133] Avg episode reward: [(0, '6.183')]
|
884 |
+
[2025-08-30 00:13:58,471][07555] Saving new best policy, reward=6.183!
|
885 |
+
[2025-08-30 00:14:03,459][02133] Fps is (10 sec: 4096.0, 60 sec: 4232.5, 300 sec: 3939.1). Total num frames: 925696. Throughput: 0: 1042.1. Samples: 230510. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
886 |
+
[2025-08-30 00:14:03,463][02133] Avg episode reward: [(0, '6.369')]
|
887 |
+
[2025-08-30 00:14:03,467][07555] Saving new best policy, reward=6.369!
|
888 |
+
[2025-08-30 00:14:06,935][07576] Updated weights for policy 0, policy_version 230 (0.0015)
|
889 |
+
[2025-08-30 00:14:08,459][02133] Fps is (10 sec: 4096.0, 60 sec: 4164.3, 300 sec: 3942.4). Total num frames: 946176. Throughput: 0: 1041.7. Samples: 237544. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
890 |
+
[2025-08-30 00:14:08,463][02133] Avg episode reward: [(0, '6.839')]
|
891 |
+
[2025-08-30 00:14:08,470][07555] Saving new best policy, reward=6.839!
|
892 |
+
[2025-08-30 00:14:13,463][02133] Fps is (10 sec: 4095.0, 60 sec: 4095.8, 300 sec: 3945.5). Total num frames: 966656. Throughput: 0: 1041.3. Samples: 241032. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
893 |
+
[2025-08-30 00:14:13,468][02133] Avg episode reward: [(0, '7.071')]
|
894 |
+
[2025-08-30 00:14:13,470][07555] Saving new best policy, reward=7.071!
|
895 |
+
[2025-08-30 00:14:17,488][07576] Updated weights for policy 0, policy_version 240 (0.0027)
|
896 |
+
[2025-08-30 00:14:18,459][02133] Fps is (10 sec: 4096.0, 60 sec: 4164.3, 300 sec: 3948.5). Total num frames: 987136. Throughput: 0: 1043.1. Samples: 246078. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
897 |
+
[2025-08-30 00:14:18,463][02133] Avg episode reward: [(0, '8.004')]
|
898 |
+
[2025-08-30 00:14:18,469][07555] Saving new best policy, reward=8.004!
|
899 |
+
[2025-08-30 00:14:22,764][07555] Stopping Batcher_0...
|
900 |
+
[2025-08-30 00:14:22,764][07555] Loop batcher_evt_loop terminating...
|
901 |
+
[2025-08-30 00:14:22,765][02133] Component Batcher_0 stopped!
|
902 |
+
[2025-08-30 00:14:22,768][07555] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000246_1007616.pth...
|
903 |
+
[2025-08-30 00:14:22,897][07555] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000089_364544.pth
|
904 |
+
[2025-08-30 00:14:22,909][07555] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000246_1007616.pth...
|
905 |
+
[2025-08-30 00:14:22,913][07576] Weights refcount: 2 0
|
906 |
+
[2025-08-30 00:14:22,930][02133] Component InferenceWorker_p0-w0 stopped!
|
907 |
+
[2025-08-30 00:14:22,931][07576] Stopping InferenceWorker_p0-w0...
|
908 |
+
[2025-08-30 00:14:22,934][07576] Loop inference_proc0-0_evt_loop terminating...
|
909 |
+
[2025-08-30 00:14:23,115][02133] Component LearnerWorker_p0 stopped!
|
910 |
+
[2025-08-30 00:14:23,119][07555] Stopping LearnerWorker_p0...
|
911 |
+
[2025-08-30 00:14:23,119][07555] Loop learner_proc0_evt_loop terminating...
|
912 |
+
[2025-08-30 00:14:23,565][02133] Component RolloutWorker_w6 stopped!
|
913 |
+
[2025-08-30 00:14:23,568][07573] Stopping RolloutWorker_w6...
|
914 |
+
[2025-08-30 00:14:23,575][07573] Loop rollout_proc6_evt_loop terminating...
|
915 |
+
[2025-08-30 00:14:23,593][07571] Stopping RolloutWorker_w1...
|
916 |
+
[2025-08-30 00:14:23,594][07571] Loop rollout_proc1_evt_loop terminating...
|
917 |
+
[2025-08-30 00:14:23,593][02133] Component RolloutWorker_w1 stopped!
|
918 |
+
[2025-08-30 00:14:23,601][07572] Stopping RolloutWorker_w7...
|
919 |
+
[2025-08-30 00:14:23,601][02133] Component RolloutWorker_w7 stopped!
|
920 |
+
[2025-08-30 00:14:23,603][07572] Loop rollout_proc7_evt_loop terminating...
|
921 |
+
[2025-08-30 00:14:23,622][07575] Stopping RolloutWorker_w3...
|
922 |
+
[2025-08-30 00:14:23,622][02133] Component RolloutWorker_w3 stopped!
|
923 |
+
[2025-08-30 00:14:23,633][07570] Stopping RolloutWorker_w5...
|
924 |
+
[2025-08-30 00:14:23,633][02133] Component RolloutWorker_w5 stopped!
|
925 |
+
[2025-08-30 00:14:23,623][07575] Loop rollout_proc3_evt_loop terminating...
|
926 |
+
[2025-08-30 00:14:23,634][07570] Loop rollout_proc5_evt_loop terminating...
|
927 |
+
[2025-08-30 00:14:23,648][02133] Component RolloutWorker_w4 stopped!
|
928 |
+
[2025-08-30 00:14:23,649][07569] Stopping RolloutWorker_w4...
|
929 |
+
[2025-08-30 00:14:23,650][07569] Loop rollout_proc4_evt_loop terminating...
|
930 |
+
[2025-08-30 00:14:23,663][02133] Component RolloutWorker_w0 stopped!
|
931 |
+
[2025-08-30 00:14:23,664][07574] Stopping RolloutWorker_w0...
|
932 |
+
[2025-08-30 00:14:23,665][07574] Loop rollout_proc0_evt_loop terminating...
|
933 |
+
[2025-08-30 00:14:23,724][02133] Component RolloutWorker_w2 stopped!
|
934 |
+
[2025-08-30 00:14:23,727][02133] Waiting for process learner_proc0 to stop...
|
935 |
+
[2025-08-30 00:14:23,730][07568] Stopping RolloutWorker_w2...
|
936 |
+
[2025-08-30 00:14:23,741][07568] Loop rollout_proc2_evt_loop terminating...
|
937 |
+
[2025-08-30 00:14:25,085][02133] Waiting for process inference_proc0-0 to join...
|
938 |
+
[2025-08-30 00:14:25,090][02133] Waiting for process rollout_proc0 to join...
|
939 |
+
[2025-08-30 00:14:27,614][02133] Waiting for process rollout_proc1 to join...
|
940 |
+
[2025-08-30 00:14:27,918][02133] Waiting for process rollout_proc2 to join...
|
941 |
+
[2025-08-30 00:14:27,920][02133] Waiting for process rollout_proc3 to join...
|
942 |
+
[2025-08-30 00:14:27,921][02133] Waiting for process rollout_proc4 to join...
|
943 |
+
[2025-08-30 00:14:27,922][02133] Waiting for process rollout_proc5 to join...
|
944 |
+
[2025-08-30 00:14:27,923][02133] Waiting for process rollout_proc6 to join...
|
945 |
+
[2025-08-30 00:14:27,924][02133] Waiting for process rollout_proc7 to join...
|
946 |
+
[2025-08-30 00:14:27,925][02133] Batcher 0 profile tree view:
|
947 |
+
batching: 6.2551, releasing_batches: 0.0077
|
948 |
+
[2025-08-30 00:14:27,927][02133] InferenceWorker_p0-w0 profile tree view:
|
949 |
+
wait_policy: 0.0000
|
950 |
+
wait_policy_total: 107.8726
|
951 |
+
update_model: 1.9901
|
952 |
+
weight_update: 0.0026
|
953 |
+
one_step: 0.0192
|
954 |
+
handle_policy_step: 134.6821
|
955 |
+
deserialize: 3.4461, stack: 0.7702, obs_to_device_normalize: 28.2248, forward: 69.2912, send_messages: 6.8835
|
956 |
+
prepare_outputs: 20.1651
|
957 |
+
to_cpu: 12.5958
|
958 |
+
[2025-08-30 00:14:27,928][02133] Learner 0 profile tree view:
|
959 |
+
misc: 0.0009, prepare_batch: 4.1257
|
960 |
+
train: 19.1130
|
961 |
+
epoch_init: 0.0010, minibatch_init: 0.0016, losses_postprocess: 0.1870, kl_divergence: 0.1665, after_optimizer: 8.3136
|
962 |
+
calculate_losses: 6.6074
|
963 |
+
losses_init: 0.0009, forward_head: 0.6567, bptt_initial: 4.1140, tail: 0.2923, advantages_returns: 0.0716, losses: 0.9097
|
964 |
+
bptt: 0.4920
|
965 |
+
bptt_forward_core: 0.4440
|
966 |
+
update: 3.7163
|
967 |
+
clip: 0.2445
|
968 |
+
[2025-08-30 00:14:27,930][02133] RolloutWorker_w0 profile tree view:
|
969 |
+
wait_for_trajectories: 0.0546, enqueue_policy_requests: 26.2367, env_step: 191.6451, overhead: 2.9250, complete_rollouts: 1.6959
|
970 |
+
save_policy_outputs: 4.3974
|
971 |
+
split_output_tensors: 1.7725
|
972 |
+
[2025-08-30 00:14:27,931][02133] RolloutWorker_w7 profile tree view:
|
973 |
+
wait_for_trajectories: 0.0872, enqueue_policy_requests: 26.4995, env_step: 191.5837, overhead: 3.0973, complete_rollouts: 1.5555
|
974 |
+
save_policy_outputs: 4.6519
|
975 |
+
split_output_tensors: 1.9002
|
976 |
+
[2025-08-30 00:14:27,932][02133] Loop Runner_EvtLoop terminating...
|
977 |
+
[2025-08-30 00:14:27,935][02133] Runner profile tree view:
|
978 |
+
main_loop: 277.0532
|
979 |
+
[2025-08-30 00:14:27,936][02133] Collected {0: 1007616}, FPS: 3636.9
|
980 |
+
[2025-08-30 00:14:46,367][02133] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
981 |
+
[2025-08-30 00:14:46,368][02133] Overriding arg 'num_workers' with value 1 passed from command line
|
982 |
+
[2025-08-30 00:14:46,369][02133] Adding new argument 'no_render'=True that is not in the saved config file!
|
983 |
+
[2025-08-30 00:14:46,370][02133] Adding new argument 'save_video'=True that is not in the saved config file!
|
984 |
+
[2025-08-30 00:14:46,371][02133] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
985 |
+
[2025-08-30 00:14:46,372][02133] Adding new argument 'video_name'=None that is not in the saved config file!
|
986 |
+
[2025-08-30 00:14:46,373][02133] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
|
987 |
+
[2025-08-30 00:14:46,374][02133] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
988 |
+
[2025-08-30 00:14:46,375][02133] Adding new argument 'push_to_hub'=False that is not in the saved config file!
|
989 |
+
[2025-08-30 00:14:46,376][02133] Adding new argument 'hf_repository'=None that is not in the saved config file!
|
990 |
+
[2025-08-30 00:14:46,376][02133] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
991 |
+
[2025-08-30 00:14:46,377][02133] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
992 |
+
[2025-08-30 00:14:46,378][02133] Adding new argument 'train_script'=None that is not in the saved config file!
|
993 |
+
[2025-08-30 00:14:46,379][02133] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
994 |
+
[2025-08-30 00:14:46,380][02133] Using frameskip 1 and render_action_repeat=4 for evaluation
|
995 |
+
[2025-08-30 00:14:46,415][02133] RunningMeanStd input shape: (3, 72, 128)
|
996 |
+
[2025-08-30 00:14:46,416][02133] RunningMeanStd input shape: (1,)
|
997 |
+
[2025-08-30 00:14:46,427][02133] ConvEncoder: input_channels=3
|
998 |
+
[2025-08-30 00:14:46,458][02133] Conv encoder output size: 512
|
999 |
+
[2025-08-30 00:14:46,459][02133] Policy head output size: 512
|
1000 |
+
[2025-08-30 00:14:46,477][02133] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000246_1007616.pth...
|
1001 |
+
[2025-08-30 00:14:46,904][02133] Num frames 100...
|
1002 |
+
[2025-08-30 00:14:47,024][02133] Num frames 200...
|
1003 |
+
[2025-08-30 00:14:47,145][02133] Num frames 300...
|
1004 |
+
[2025-08-30 00:14:47,301][02133] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
|
1005 |
+
[2025-08-30 00:14:47,302][02133] Avg episode reward: 3.840, avg true_objective: 3.840
|
1006 |
+
[2025-08-30 00:14:47,326][02133] Num frames 400...
|
1007 |
+
[2025-08-30 00:14:47,451][02133] Num frames 500...
|
1008 |
+
[2025-08-30 00:14:47,575][02133] Num frames 600...
|
1009 |
+
[2025-08-30 00:14:47,698][02133] Num frames 700...
|
1010 |
+
[2025-08-30 00:14:47,823][02133] Num frames 800...
|
1011 |
+
[2025-08-30 00:14:47,958][02133] Num frames 900...
|
1012 |
+
[2025-08-30 00:14:48,089][02133] Num frames 1000...
|
1013 |
+
[2025-08-30 00:14:48,247][02133] Avg episode rewards: #0: 7.440, true rewards: #0: 5.440
|
1014 |
+
[2025-08-30 00:14:48,248][02133] Avg episode reward: 7.440, avg true_objective: 5.440
|
1015 |
+
[2025-08-30 00:14:48,267][02133] Num frames 1100...
|
1016 |
+
[2025-08-30 00:14:48,388][02133] Num frames 1200...
|
1017 |
+
[2025-08-30 00:14:48,512][02133] Num frames 1300...
|
1018 |
+
[2025-08-30 00:14:48,642][02133] Num frames 1400...
|
1019 |
+
[2025-08-30 00:14:48,771][02133] Num frames 1500...
|
1020 |
+
[2025-08-30 00:14:48,904][02133] Num frames 1600...
|
1021 |
+
[2025-08-30 00:14:49,030][02133] Num frames 1700...
|
1022 |
+
[2025-08-30 00:14:49,154][02133] Num frames 1800...
|
1023 |
+
[2025-08-30 00:14:49,312][02133] Avg episode rewards: #0: 10.280, true rewards: #0: 6.280
|
1024 |
+
[2025-08-30 00:14:49,312][02133] Avg episode reward: 10.280, avg true_objective: 6.280
|
1025 |
+
[2025-08-30 00:14:49,336][02133] Num frames 1900...
|
1026 |
+
[2025-08-30 00:14:49,457][02133] Num frames 2000...
|
1027 |
+
[2025-08-30 00:14:49,577][02133] Num frames 2100...
|
1028 |
+
[2025-08-30 00:14:49,698][02133] Num frames 2200...
|
1029 |
+
[2025-08-30 00:14:49,832][02133] Num frames 2300...
|
1030 |
+
[2025-08-30 00:14:49,884][02133] Avg episode rewards: #0: 9.250, true rewards: #0: 5.750
|
1031 |
+
[2025-08-30 00:14:49,885][02133] Avg episode reward: 9.250, avg true_objective: 5.750
|
1032 |
+
[2025-08-30 00:14:50,079][02133] Num frames 2400...
|
1033 |
+
[2025-08-30 00:14:50,254][02133] Num frames 2500...
|
1034 |
+
[2025-08-30 00:14:50,433][02133] Num frames 2600...
|
1035 |
+
[2025-08-30 00:14:50,609][02133] Num frames 2700...
|
1036 |
+
[2025-08-30 00:14:50,747][02133] Avg episode rewards: #0: 8.496, true rewards: #0: 5.496
|
1037 |
+
[2025-08-30 00:14:50,750][02133] Avg episode reward: 8.496, avg true_objective: 5.496
|
1038 |
+
[2025-08-30 00:14:50,845][02133] Num frames 2800...
|
1039 |
+
[2025-08-30 00:14:51,020][02133] Num frames 2900...
|
1040 |
+
[2025-08-30 00:14:51,191][02133] Num frames 3000...
|
1041 |
+
[2025-08-30 00:14:51,367][02133] Num frames 3100...
|
1042 |
+
[2025-08-30 00:14:51,547][02133] Num frames 3200...
|
1043 |
+
[2025-08-30 00:14:51,718][02133] Num frames 3300...
|
1044 |
+
[2025-08-30 00:14:51,818][02133] Avg episode rewards: #0: 8.707, true rewards: #0: 5.540
|
1045 |
+
[2025-08-30 00:14:51,820][02133] Avg episode reward: 8.707, avg true_objective: 5.540
|
1046 |
+
[2025-08-30 00:14:51,956][02133] Num frames 3400...
|
1047 |
+
[2025-08-30 00:14:52,144][02133] Num frames 3500...
|
1048 |
+
[2025-08-30 00:14:52,281][02133] Num frames 3600...
|
1049 |
+
[2025-08-30 00:14:52,405][02133] Num frames 3700...
|
1050 |
+
[2025-08-30 00:14:52,549][02133] Avg episode rewards: #0: 8.246, true rewards: #0: 5.389
|
1051 |
+
[2025-08-30 00:14:52,550][02133] Avg episode reward: 8.246, avg true_objective: 5.389
|
1052 |
+
[2025-08-30 00:14:52,587][02133] Num frames 3800...
|
1053 |
+
[2025-08-30 00:14:52,710][02133] Num frames 3900...
|
1054 |
+
[2025-08-30 00:14:52,837][02133] Num frames 4000...
|
1055 |
+
[2025-08-30 00:14:52,960][02133] Num frames 4100...
|
1056 |
+
[2025-08-30 00:14:53,089][02133] Num frames 4200...
|
1057 |
+
[2025-08-30 00:14:53,222][02133] Num frames 4300...
|
1058 |
+
[2025-08-30 00:14:53,342][02133] Avg episode rewards: #0: 8.435, true rewards: #0: 5.435
|
1059 |
+
[2025-08-30 00:14:53,343][02133] Avg episode reward: 8.435, avg true_objective: 5.435
|
1060 |
+
[2025-08-30 00:14:53,408][02133] Num frames 4400...
|
1061 |
+
[2025-08-30 00:14:53,532][02133] Num frames 4500...
|
1062 |
+
[2025-08-30 00:14:53,656][02133] Num frames 4600...
|
1063 |
+
[2025-08-30 00:14:53,784][02133] Num frames 4700...
|
1064 |
+
[2025-08-30 00:14:53,835][02133] Avg episode rewards: #0: 8.111, true rewards: #0: 5.222
|
1065 |
+
[2025-08-30 00:14:53,836][02133] Avg episode reward: 8.111, avg true_objective: 5.222
|
1066 |
+
[2025-08-30 00:14:53,965][02133] Num frames 4800...
|
1067 |
+
[2025-08-30 00:14:54,092][02133] Num frames 4900...
|
1068 |
+
[2025-08-30 00:14:54,226][02133] Num frames 5000...
|
1069 |
+
[2025-08-30 00:14:54,346][02133] Num frames 5100...
|
1070 |
+
[2025-08-30 00:14:54,469][02133] Num frames 5200...
|
1071 |
+
[2025-08-30 00:14:54,589][02133] Num frames 5300...
|
1072 |
+
[2025-08-30 00:14:54,710][02133] Num frames 5400...
|
1073 |
+
[2025-08-30 00:14:54,849][02133] Avg episode rewards: #0: 8.568, true rewards: #0: 5.468
|
1074 |
+
[2025-08-30 00:14:54,850][02133] Avg episode reward: 8.568, avg true_objective: 5.468
|
1075 |
+
[2025-08-30 00:15:24,708][02133] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
1076 |
+
[2025-08-30 00:15:57,391][02133] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
1077 |
+
[2025-08-30 00:15:57,392][02133] Overriding arg 'num_workers' with value 1 passed from command line
|
1078 |
+
[2025-08-30 00:15:57,393][02133] Adding new argument 'no_render'=True that is not in the saved config file!
|
1079 |
+
[2025-08-30 00:15:57,394][02133] Adding new argument 'save_video'=True that is not in the saved config file!
|
1080 |
+
[2025-08-30 00:15:57,395][02133] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
1081 |
+
[2025-08-30 00:15:57,396][02133] Adding new argument 'video_name'=None that is not in the saved config file!
|
1082 |
+
[2025-08-30 00:15:57,397][02133] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
|
1083 |
+
[2025-08-30 00:15:57,397][02133] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
1084 |
+
[2025-08-30 00:15:57,398][02133] Adding new argument 'push_to_hub'=True that is not in the saved config file!
|
1085 |
+
[2025-08-30 00:15:57,399][02133] Adding new argument 'hf_repository'='Priyam05/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
|
1086 |
+
[2025-08-30 00:15:57,400][02133] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
1087 |
+
[2025-08-30 00:15:57,401][02133] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
1088 |
+
[2025-08-30 00:15:57,402][02133] Adding new argument 'train_script'=None that is not in the saved config file!
|
1089 |
+
[2025-08-30 00:15:57,403][02133] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
1090 |
+
[2025-08-30 00:15:57,404][02133] Using frameskip 1 and render_action_repeat=4 for evaluation
|
1091 |
+
[2025-08-30 00:15:57,428][02133] RunningMeanStd input shape: (3, 72, 128)
|
1092 |
+
[2025-08-30 00:15:57,563][02133] RunningMeanStd input shape: (1,)
|
1093 |
+
[2025-08-30 00:15:57,572][02133] ConvEncoder: input_channels=3
|
1094 |
+
[2025-08-30 00:15:57,603][02133] Conv encoder output size: 512
|
1095 |
+
[2025-08-30 00:15:57,603][02133] Policy head output size: 512
|
1096 |
+
[2025-08-30 00:15:57,619][02133] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000246_1007616.pth...
|
1097 |
+
[2025-08-30 00:15:58,009][02133] Num frames 100...
|
1098 |
+
[2025-08-30 00:15:58,130][02133] Num frames 200...
|
1099 |
+
[2025-08-30 00:15:58,262][02133] Num frames 300...
|
1100 |
+
[2025-08-30 00:15:58,423][02133] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
|
1101 |
+
[2025-08-30 00:15:58,424][02133] Avg episode reward: 3.840, avg true_objective: 3.840
|
1102 |
+
[2025-08-30 00:15:58,446][02133] Num frames 400...
|
1103 |
+
[2025-08-30 00:15:58,564][02133] Num frames 500...
|
1104 |
+
[2025-08-30 00:15:58,681][02133] Num frames 600...
|
1105 |
+
[2025-08-30 00:15:58,782][02133] Avg episode rewards: #0: 3.200, true rewards: #0: 3.200
|
1106 |
+
[2025-08-30 00:15:58,783][02133] Avg episode reward: 3.200, avg true_objective: 3.200
|
1107 |
+
[2025-08-30 00:15:58,855][02133] Num frames 700...
|
1108 |
+
[2025-08-30 00:15:58,975][02133] Num frames 800...
|
1109 |
+
[2025-08-30 00:15:59,096][02133] Num frames 900...
|
1110 |
+
[2025-08-30 00:15:59,229][02133] Num frames 1000...
|
1111 |
+
[2025-08-30 00:15:59,316][02133] Avg episode rewards: #0: 3.413, true rewards: #0: 3.413
|
1112 |
+
[2025-08-30 00:15:59,316][02133] Avg episode reward: 3.413, avg true_objective: 3.413
|
1113 |
+
[2025-08-30 00:15:59,408][02133] Num frames 1100...
|
1114 |
+
[2025-08-30 00:15:59,528][02133] Num frames 1200...
|
1115 |
+
[2025-08-30 00:15:59,648][02133] Num frames 1300...
|
1116 |
+
[2025-08-30 00:15:59,815][02133] Avg episode rewards: #0: 3.740, true rewards: #0: 3.490
|
1117 |
+
[2025-08-30 00:15:59,816][02133] Avg episode reward: 3.740, avg true_objective: 3.490
|
1118 |
+
[2025-08-30 00:15:59,824][02133] Num frames 1400...
|
1119 |
+
[2025-08-30 00:15:59,943][02133] Num frames 1500...
|
1120 |
+
[2025-08-30 00:16:00,060][02133] Num frames 1600...
|
1121 |
+
[2025-08-30 00:16:00,202][02133] Num frames 1700...
|
1122 |
+
[2025-08-30 00:16:00,363][02133] Avg episode rewards: #0: 3.760, true rewards: #0: 3.560
|
1123 |
+
[2025-08-30 00:16:00,364][02133] Avg episode reward: 3.760, avg true_objective: 3.560
|
1124 |
+
[2025-08-30 00:16:00,390][02133] Num frames 1800...
|
1125 |
+
[2025-08-30 00:16:00,510][02133] Num frames 1900...
|
1126 |
+
[2025-08-30 00:16:00,631][02133] Num frames 2000...
|
1127 |
+
[2025-08-30 00:16:00,758][02133] Num frames 2100...
|
1128 |
+
[2025-08-30 00:16:00,878][02133] Num frames 2200...
|
1129 |
+
[2025-08-30 00:16:00,998][02133] Num frames 2300...
|
1130 |
+
[2025-08-30 00:16:01,121][02133] Num frames 2400...
|
1131 |
+
[2025-08-30 00:16:01,245][02133] Num frames 2500...
|
1132 |
+
[2025-08-30 00:16:01,375][02133] Num frames 2600...
|
1133 |
+
[2025-08-30 00:16:01,494][02133] Num frames 2700...
|
1134 |
+
[2025-08-30 00:16:01,559][02133] Avg episode rewards: #0: 5.847, true rewards: #0: 4.513
|
1135 |
+
[2025-08-30 00:16:01,559][02133] Avg episode reward: 5.847, avg true_objective: 4.513
|
1136 |
+
[2025-08-30 00:16:01,671][02133] Num frames 2800...
|
1137 |
+
[2025-08-30 00:16:01,790][02133] Num frames 2900...
|
1138 |
+
[2025-08-30 00:16:01,925][02133] Num frames 3000...
|
1139 |
+
[2025-08-30 00:16:02,032][02133] Avg episode rewards: #0: 5.611, true rewards: #0: 4.326
|
1140 |
+
[2025-08-30 00:16:02,033][02133] Avg episode reward: 5.611, avg true_objective: 4.326
|
1141 |
+
[2025-08-30 00:16:02,156][02133] Num frames 3100...
|
1142 |
+
[2025-08-30 00:16:02,336][02133] Num frames 3200...
|
1143 |
+
[2025-08-30 00:16:02,534][02133] Avg episode rewards: #0: 5.230, true rewards: #0: 4.105
|
1144 |
+
[2025-08-30 00:16:02,535][02133] Avg episode reward: 5.230, avg true_objective: 4.105
|
1145 |
+
[2025-08-30 00:16:02,565][02133] Num frames 3300...
|
1146 |
+
[2025-08-30 00:16:02,735][02133] Num frames 3400...
|
1147 |
+
[2025-08-30 00:16:02,903][02133] Num frames 3500...
|
1148 |
+
[2025-08-30 00:16:03,069][02133] Num frames 3600...
|
1149 |
+
[2025-08-30 00:16:03,239][02133] Num frames 3700...
|
1150 |
+
[2025-08-30 00:16:03,422][02133] Num frames 3800...
|
1151 |
+
[2025-08-30 00:16:03,638][02133] Avg episode rewards: #0: 5.658, true rewards: #0: 4.324
|
1152 |
+
[2025-08-30 00:16:03,639][02133] Avg episode reward: 5.658, avg true_objective: 4.324
|
1153 |
+
[2025-08-30 00:16:03,655][02133] Num frames 3900...
|
1154 |
+
[2025-08-30 00:16:03,831][02133] Num frames 4000...
|
1155 |
+
[2025-08-30 00:16:04,011][02133] Num frames 4100...
|
1156 |
+
[2025-08-30 00:16:04,160][02133] Num frames 4200...
|
1157 |
+
[2025-08-30 00:16:04,286][02133] Num frames 4300...
|
1158 |
+
[2025-08-30 00:16:04,420][02133] Num frames 4400...
|
1159 |
+
[2025-08-30 00:16:04,521][02133] Avg episode rewards: #0: 5.836, true rewards: #0: 4.436
|
1160 |
+
[2025-08-30 00:16:04,521][02133] Avg episode reward: 5.836, avg true_objective: 4.436
|
1161 |
+
[2025-08-30 00:16:27,557][02133] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|