ppo-SnowballTarget / configuration.yaml
patonw's picture
Baseline
48051ae
default_settings: null
behaviors:
SnowballTarget:
trainer_type: ppo
hyperparameters:
batch_size: 1024
buffer_size: 10240
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
shared_critic: false
learning_rate_schedule: linear
beta_schedule: linear
epsilon_schedule: linear
checkpoint_interval: 500000
network_settings:
normalize: false
hidden_units: 128
num_layers: 2
vis_encode_type: simple
memory: null
goal_conditioning_type: hyper
deterministic: false
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
network_settings:
normalize: false
hidden_units: 128
num_layers: 2
vis_encode_type: simple
memory: null
goal_conditioning_type: hyper
deterministic: false
init_path: null
keep_checkpoints: 5
even_checkpoints: false
max_steps: 500000
time_horizon: 64
summary_freq: 50000
threaded: false
self_play: null
behavioral_cloning: null
env_settings:
env_path: envs/SnowballTarget/SnowballTarget
env_args: null
base_port: 5005
num_envs: 1
num_areas: 1
seed: -1
max_lifetime_restarts: 10
restarts_rate_limit_n: 1
restarts_rate_limit_period_s: 60
engine_settings:
width: 84
height: 84
quality_level: 5
time_scale: 20
target_frame_rate: -1
capture_frame_rate: 60
no_graphics: true
environment_parameters: null
checkpoint_settings:
run_id: SnowballTarget-00
initialize_from: null
load_model: false
resume: false
force: false
train_model: false
inference: false
results_dir: results
torch_settings:
device: null
debug: false